diff options
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 82 |
1 files changed, 49 insertions, 33 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 11847bf1e8c..b782b7a79f0 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -38,6 +38,12 @@ #include <asm/irq_regs.h> +enum event_type_t { + EVENT_FLEXIBLE = 0x1, + EVENT_PINNED = 0x2, + EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, +}; + atomic_t perf_task_events __read_mostly; static atomic_t nr_mmap_events __read_mostly; static atomic_t nr_comm_events __read_mostly; @@ -65,6 +71,12 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000; static atomic64_t perf_event_id; +static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, + enum event_type_t event_type); + +static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, + enum event_type_t event_type); + void __weak perf_event_print_debug(void) { } extern __weak const char *perf_pmu_name(void) @@ -72,6 +84,11 @@ extern __weak const char *perf_pmu_name(void) return "pmu"; } +static inline u64 perf_clock(void) +{ + return local_clock(); +} + void perf_pmu_disable(struct pmu *pmu) { int *count = this_cpu_ptr(pmu->pmu_disable_count); @@ -240,11 +257,6 @@ static void perf_unpin_context(struct perf_event_context *ctx) put_ctx(ctx); } -static inline u64 perf_clock(void) -{ - return local_clock(); -} - /* * Update the record of the current time in a context. */ @@ -256,6 +268,12 @@ static void update_context_time(struct perf_event_context *ctx) ctx->timestamp = now; } +static u64 perf_event_time(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + return ctx ? ctx->time : 0; +} + /* * Update the total_time_enabled and total_time_running fields for a event. */ @@ -269,7 +287,7 @@ static void update_event_times(struct perf_event *event) return; if (ctx->is_active) - run_end = ctx->time; + run_end = perf_event_time(event); else run_end = event->tstamp_stopped; @@ -278,7 +296,7 @@ static void update_event_times(struct perf_event *event) if (event->state == PERF_EVENT_STATE_INACTIVE) run_end = event->tstamp_stopped; else - run_end = ctx->time; + run_end = perf_event_time(event); event->total_time_running = run_end - event->tstamp_running; } @@ -534,6 +552,7 @@ event_sched_out(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { + u64 tstamp = perf_event_time(event); u64 delta; /* * An event which could not be activated because of @@ -545,7 +564,7 @@ event_sched_out(struct perf_event *event, && !event_filter_match(event)) { delta = ctx->time - event->tstamp_stopped; event->tstamp_running += delta; - event->tstamp_stopped = ctx->time; + event->tstamp_stopped = tstamp; } if (event->state != PERF_EVENT_STATE_ACTIVE) @@ -556,7 +575,7 @@ event_sched_out(struct perf_event *event, event->pending_disable = 0; event->state = PERF_EVENT_STATE_OFF; } - event->tstamp_stopped = ctx->time; + event->tstamp_stopped = tstamp; event->pmu->del(event, 0); event->oncpu = -1; @@ -768,6 +787,8 @@ event_sched_in(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { + u64 tstamp = perf_event_time(event); + if (event->state <= PERF_EVENT_STATE_OFF) return 0; @@ -784,9 +805,9 @@ event_sched_in(struct perf_event *event, return -EAGAIN; } - event->tstamp_running += ctx->time - event->tstamp_stopped; + event->tstamp_running += tstamp - event->tstamp_stopped; - event->shadow_ctx_time = ctx->time - ctx->timestamp; + event->shadow_ctx_time = tstamp - ctx->timestamp; if (!is_software_event(event)) cpuctx->active_oncpu++; @@ -898,11 +919,13 @@ static int group_can_go_on(struct perf_event *event, static void add_event_to_ctx(struct perf_event *event, struct perf_event_context *ctx) { + u64 tstamp = perf_event_time(event); + list_add_event(event, ctx); perf_group_attach(event); - event->tstamp_enabled = ctx->time; - event->tstamp_running = ctx->time; - event->tstamp_stopped = ctx->time; + event->tstamp_enabled = tstamp; + event->tstamp_running = tstamp; + event->tstamp_stopped = tstamp; } /* @@ -937,7 +960,7 @@ static void __perf_install_in_context(void *info) add_event_to_ctx(event, ctx); - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) goto unlock; /* @@ -1042,14 +1065,13 @@ static void __perf_event_mark_enabled(struct perf_event *event, struct perf_event_context *ctx) { struct perf_event *sub; + u64 tstamp = perf_event_time(event); event->state = PERF_EVENT_STATE_INACTIVE; - event->tstamp_enabled = ctx->time - event->total_time_enabled; + event->tstamp_enabled = tstamp - event->total_time_enabled; list_for_each_entry(sub, &event->sibling_list, group_entry) { - if (sub->state >= PERF_EVENT_STATE_INACTIVE) { - sub->tstamp_enabled = - ctx->time - sub->total_time_enabled; - } + if (sub->state >= PERF_EVENT_STATE_INACTIVE) + sub->tstamp_enabled = tstamp - sub->total_time_enabled; } } @@ -1082,7 +1104,7 @@ static void __perf_event_enable(void *info) goto unlock; __perf_event_mark_enabled(event, ctx); - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) goto unlock; /* @@ -1193,12 +1215,6 @@ static int perf_event_refresh(struct perf_event *event, int refresh) return 0; } -enum event_type_t { - EVENT_FLEXIBLE = 0x1, - EVENT_PINNED = 0x2, - EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, -}; - static void ctx_sched_out(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx, enum event_type_t event_type) @@ -1435,7 +1451,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx, list_for_each_entry(event, &ctx->pinned_groups, group_entry) { if (event->state <= PERF_EVENT_STATE_OFF) continue; - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) continue; if (group_can_go_on(event, cpuctx, 1)) @@ -1467,7 +1483,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx, * Listen to the 'cpu' scheduling filter constraint * of events: */ - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) continue; if (group_can_go_on(event, cpuctx, can_add_hw)) { @@ -1694,7 +1710,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) if (event->state != PERF_EVENT_STATE_ACTIVE) continue; - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) continue; hwc = &event->hw; @@ -3893,7 +3909,7 @@ static int perf_event_task_match(struct perf_event *event) if (event->state < PERF_EVENT_STATE_INACTIVE) return 0; - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) return 0; if (event->attr.comm || event->attr.mmap || @@ -4030,7 +4046,7 @@ static int perf_event_comm_match(struct perf_event *event) if (event->state < PERF_EVENT_STATE_INACTIVE) return 0; - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) return 0; if (event->attr.comm) @@ -4178,7 +4194,7 @@ static int perf_event_mmap_match(struct perf_event *event, if (event->state < PERF_EVENT_STATE_INACTIVE) return 0; - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) return 0; if ((!executable && event->attr.mmap_data) || |