summaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/perf_event.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-09-27 20:43:07 -0700
committerDavid S. Miller <davem@davemloft.net>2009-09-27 20:43:07 -0700
commit01552f765cae873d0ea3cca1e64e41dfd62659e6 (patch)
tree80887592fb5fb3dd848b06c44a2b0c76770843be /arch/sparc/kernel/perf_event.c
parent7eebda60d57a0862a410f45122c73b8bbe6e260c (diff)
sparc64: Add initial perf event conflict resolution and checks.
Cribbed from powerpc code, as usual. :-) Currently it is only used to validate that all counters have the same user/kernel/hv attributes. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/perf_event.c')
-rw-r--r--arch/sparc/kernel/perf_event.c82
1 files changed, 77 insertions, 5 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 9541b456c3e..91995249815 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -713,12 +713,66 @@ static void hw_perf_event_destroy(struct perf_event *event)
perf_event_release_pmc();
}
+static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
+{
+ int eu = 0, ek = 0, eh = 0;
+ struct perf_event *event;
+ int i, n, first;
+
+ n = n_prev + n_new;
+ if (n <= 1)
+ return 0;
+
+ first = 1;
+ for (i = 0; i < n; i++) {
+ event = evts[i];
+ if (first) {
+ eu = event->attr.exclude_user;
+ ek = event->attr.exclude_kernel;
+ eh = event->attr.exclude_hv;
+ first = 0;
+ } else if (event->attr.exclude_user != eu ||
+ event->attr.exclude_kernel != ek ||
+ event->attr.exclude_hv != eh) {
+ return -EAGAIN;
+ }
+ }
+
+ return 0;
+}
+
+static int collect_events(struct perf_event *group, int max_count,
+ struct perf_event *evts[], u64 *events)
+{
+ struct perf_event *event;
+ int n = 0;
+
+ if (!is_software_event(group)) {
+ if (n >= max_count)
+ return -1;
+ evts[n] = group;
+ events[n++] = group->hw.config;
+ }
+ list_for_each_entry(event, &group->sibling_list, group_entry) {
+ if (!is_software_event(event) &&
+ event->state != PERF_EVENT_STATE_OFF) {
+ if (n >= max_count)
+ return -1;
+ evts[n] = event;
+ events[n++] = event->hw.config;
+ }
+ }
+ return n;
+}
+
static int __hw_perf_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
+ struct perf_event *evts[MAX_HWEVENTS];
struct hw_perf_event *hwc = &event->hw;
const struct perf_event_map *pmap;
- u64 enc;
+ u64 enc, events[MAX_HWEVENTS];
+ int n;
if (atomic_read(&nmi_active) < 0)
return -ENODEV;
@@ -734,9 +788,6 @@ static int __hw_perf_event_init(struct perf_event *event)
} else
return -EOPNOTSUPP;
- perf_event_grab_pmc();
- event->destroy = hw_perf_event_destroy;
-
/* We save the enable bits in the config_base. So to
* turn off sampling just write 'config', and to enable
* things write 'config | config_base'.
@@ -749,13 +800,34 @@ static int __hw_perf_event_init(struct perf_event *event)
if (!attr->exclude_hv)
hwc->config_base |= sparc_pmu->hv_bit;
+ enc = pmap->encoding;
+
+ n = 0;
+ if (event->group_leader != event) {
+ n = collect_events(event->group_leader,
+ perf_max_events - 1,
+ evts, events);
+ if (n < 0)
+ return -EINVAL;
+ }
+ events[n] = enc;
+ evts[n] = event;
+
+ if (check_excludes(evts, n, 1))
+ return -EINVAL;
+
+ /* Try to do all error checking before this point, as unwinding
+ * state after grabbing the PMC is difficult.
+ */
+ perf_event_grab_pmc();
+ event->destroy = hw_perf_event_destroy;
+
if (!hwc->sample_period) {
hwc->sample_period = MAX_PERIOD;
hwc->last_period = hwc->sample_period;
atomic64_set(&hwc->period_left, hwc->sample_period);
}
- enc = pmap->encoding;
if (pmap->pic_mask & PIC_UPPER) {
hwc->idx = PIC_UPPER_INDEX;
enc <<= sparc_pmu->upper_shift;