diff options
author | Dave Airlie <airlied@redhat.com> | 2011-09-20 09:35:22 +0100 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-09-20 09:35:22 +0100 |
commit | b2d108ba333cdff80d9e7645d7697cbb6bb0fc29 (patch) | |
tree | f62b2464a35e4676b5e3e9ef340758a17d0b80f8 /kernel/events/core.c | |
parent | fcf4de5acf09889e3f0c131ebe385c983006d71b (diff) | |
parent | a0d9a8feb928465f3cb525a19e5fafd06ef66ced (diff) |
Merge branch 'drm-nouveau-next' of git://git.freedesktop.org/git/nouveau/linux-2.6 into drm-next
* 'drm-nouveau-next' of git://git.freedesktop.org/git/nouveau/linux-2.6: (353 commits)
drm/nouveau: remove allocations from gart populate() hook
drm/nvc0/fb: slightly improve PMFB intr handling, move out of nvc0_graph.c
drm/nvc0/fifo: avoid touching missing subfifos
drm/nvd9/disp: bail out of mode_set_base if no fb bound to crtc
drm/nvd9/disp: stub some more api hooks so we don't oops on resume
drm/nouveau: fix printk typo in ioremap failure path
drm/nvc0/pm: minor clock readback fixes
drm/nv40/pm: execute memory reset script from vbios
drm/nv50/gr: refactor initialisation
drm/nouveau: if requested, try harder at disabling sysmem pushbufs
drm/nv50/gr: enable ctxprog xfer only when we need it to save power
drm/nouveau/dp: add support for displayport table 0x30
drm/nouveau/dp: return master dp table pointer too when looking up encoder
drm/nouveau/bios: simplify U/d table hash matching func to just match
drm/nouveau/dp: preserve non-pattern bits in DP_TRAINING_PATTERN_SET
drm/nvc0/gr: remove MODULE_FIRMWARE() lines
drm/nouveau/dp: use alternate lane mask for nvaf
drm/nouveau/dp: link rate scripts are selected with a comparison table
drm/nv40/pm: write nv40-specific reclocking routines
drm/nv40/pm: parse geometric delta clock from vbios
...
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r-- | kernel/events/core.c | 67 |
1 files changed, 55 insertions, 12 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index b8785e26ee1..0f857782d06 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -399,14 +399,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode) local_irq_restore(flags); } -static inline void perf_cgroup_sched_out(struct task_struct *task) +static inline void perf_cgroup_sched_out(struct task_struct *task, + struct task_struct *next) { - perf_cgroup_switch(task, PERF_CGROUP_SWOUT); + struct perf_cgroup *cgrp1; + struct perf_cgroup *cgrp2 = NULL; + + /* + * we come here when we know perf_cgroup_events > 0 + */ + cgrp1 = perf_cgroup_from_task(task); + + /* + * next is NULL when called from perf_event_enable_on_exec() + * that will systematically cause a cgroup_switch() + */ + if (next) + cgrp2 = perf_cgroup_from_task(next); + + /* + * only schedule out current cgroup events if we know + * that we are switching to a different cgroup. Otherwise, + * do no touch the cgroup events. + */ + if (cgrp1 != cgrp2) + perf_cgroup_switch(task, PERF_CGROUP_SWOUT); } -static inline void perf_cgroup_sched_in(struct task_struct *task) +static inline void perf_cgroup_sched_in(struct task_struct *prev, + struct task_struct *task) { - perf_cgroup_switch(task, PERF_CGROUP_SWIN); + struct perf_cgroup *cgrp1; + struct perf_cgroup *cgrp2 = NULL; + + /* + * we come here when we know perf_cgroup_events > 0 + */ + cgrp1 = perf_cgroup_from_task(task); + + /* prev can never be NULL */ + cgrp2 = perf_cgroup_from_task(prev); + + /* + * only need to schedule in cgroup events if we are changing + * cgroup during ctxsw. Cgroup events were not scheduled + * out of ctxsw out if that was not the case. + */ + if (cgrp1 != cgrp2) + perf_cgroup_switch(task, PERF_CGROUP_SWIN); } static inline int perf_cgroup_connect(int fd, struct perf_event *event, @@ -518,11 +558,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) { } -static inline void perf_cgroup_sched_out(struct task_struct *task) +static inline void perf_cgroup_sched_out(struct task_struct *task, + struct task_struct *next) { } -static inline void perf_cgroup_sched_in(struct task_struct *task) +static inline void perf_cgroup_sched_in(struct task_struct *prev, + struct task_struct *task) { } @@ -1988,7 +2030,7 @@ void __perf_event_task_sched_out(struct task_struct *task, * cgroup event are system-wide mode only */ if (atomic_read(&__get_cpu_var(perf_cgroup_events))) - perf_cgroup_sched_out(task); + perf_cgroup_sched_out(task, next); } static void task_ctx_sched_out(struct perf_event_context *ctx) @@ -2153,7 +2195,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, * accessing the event control register. If a NMI hits, then it will * keep the event running. */ -void __perf_event_task_sched_in(struct task_struct *task) +void __perf_event_task_sched_in(struct task_struct *prev, + struct task_struct *task) { struct perf_event_context *ctx; int ctxn; @@ -2171,7 +2214,7 @@ void __perf_event_task_sched_in(struct task_struct *task) * cgroup event are system-wide mode only */ if (atomic_read(&__get_cpu_var(perf_cgroup_events))) - perf_cgroup_sched_in(task); + perf_cgroup_sched_in(prev, task); } static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) @@ -2427,7 +2470,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) * ctxswin cgroup events which are already scheduled * in. */ - perf_cgroup_sched_out(current); + perf_cgroup_sched_out(current, NULL); raw_spin_lock(&ctx->lock); task_ctx_sched_out(ctx); @@ -3353,8 +3396,8 @@ static int perf_event_index(struct perf_event *event) } static void calc_timer_values(struct perf_event *event, - u64 *running, - u64 *enabled) + u64 *enabled, + u64 *running) { u64 now, ctx_time; |