diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-09 21:17:43 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-05-28 18:01:14 +0200 |
commit | 04dc2dbbfe1c6f81b996d4dab255da75f9efbb4a (patch) | |
tree | a99e0c849f61d5bf7f3d9777f0e5c4bf69c61d9a /kernel/events | |
parent | facc43071cc0d4821c176d7d34570714eb348df9 (diff) |
perf: Remove task_ctx_sched_in()
Make task_ctx_sched_*() imply EVENT_ALL, since anything less will not
actually have scheduled the task in/out at all.
Since there's no site that schedules all of a task in (due to the
interleave with flexible cpuctx) we can remove this function.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110409192141.817893268@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 26 |
1 files changed, 6 insertions, 20 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index d243af954dc..66b3dd80940 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1979,8 +1979,7 @@ void __perf_event_task_sched_out(struct task_struct *task, perf_cgroup_sched_out(task); } -static void task_ctx_sched_out(struct perf_event_context *ctx, - enum event_type_t event_type) +static void task_ctx_sched_out(struct perf_event_context *ctx) { struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); @@ -1990,7 +1989,7 @@ static void task_ctx_sched_out(struct perf_event_context *ctx, if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) return; - ctx_sched_out(ctx, cpuctx, event_type); + ctx_sched_out(ctx, cpuctx, EVENT_ALL); cpuctx->task_ctx = NULL; } @@ -2098,19 +2097,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, ctx_sched_in(ctx, cpuctx, event_type, task); } -static void task_ctx_sched_in(struct perf_event_context *ctx, - enum event_type_t event_type) -{ - struct perf_cpu_context *cpuctx; - - cpuctx = __get_cpu_context(ctx); - if (cpuctx->task_ctx == ctx) - return; - - ctx_sched_in(ctx, cpuctx, event_type, NULL); - cpuctx->task_ctx = ctx; -} - static void perf_event_context_sched_in(struct perf_event_context *ctx, struct task_struct *task) { @@ -2363,7 +2349,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx) cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); if (ctx) - task_ctx_sched_out(ctx, EVENT_FLEXIBLE); + ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); rotate_ctx(&cpuctx->ctx); if (ctx) @@ -2371,7 +2357,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx) cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current); if (ctx) - task_ctx_sched_in(ctx, EVENT_FLEXIBLE); + ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, current); done: if (remove) @@ -2435,7 +2421,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) perf_cgroup_sched_out(current); raw_spin_lock(&ctx->lock); - task_ctx_sched_out(ctx, EVENT_ALL); + task_ctx_sched_out(ctx); list_for_each_entry(event, &ctx->pinned_groups, group_entry) { ret = event_enable_on_exec(event, ctx); @@ -6794,7 +6780,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) * incremented the context's refcount before we do put_ctx below. */ raw_spin_lock(&child_ctx->lock); - task_ctx_sched_out(child_ctx, EVENT_ALL); + task_ctx_sched_out(child_ctx); child->perf_event_ctxp[ctxn] = NULL; /* * If this context is a clone; unclone it so it can't get |