summaryrefslogtreecommitdiffstats
path: root/tools/perf/ui/hist.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-08 11:31:16 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-08 11:31:16 -0700
commit3f17ea6dea8ba5668873afa54628a91aaa3fb1c0 (patch)
treeafbeb2accd4c2199ddd705ae943995b143a0af02 /tools/perf/ui/hist.c
parent1860e379875dfe7271c649058aeddffe5afd9d0d (diff)
parent1a5700bc2d10cd379a795fd2bb377a190af5acd4 (diff)
Merge branch 'next' (accumulated 3.16 merge window patches) into master
Now that 3.15 is released, this merges the 'next' branch into 'master', bringing us to the normal situation where my 'master' branch is the merge window. * accumulated work in next: (6809 commits) ufs: sb mutex merge + mutex_destroy powerpc: update comments for generic idle conversion cris: update comments for generic idle conversion idle: remove cpu_idle() forward declarations nbd: zero from and len fields in NBD_CMD_DISCONNECT. mm: convert some level-less printks to pr_* MAINTAINERS: adi-buildroot-devel is moderated MAINTAINERS: add linux-api for review of API/ABI changes mm/kmemleak-test.c: use pr_fmt for logging fs/dlm/debug_fs.c: replace seq_printf by seq_puts fs/dlm/lockspace.c: convert simple_str to kstr fs/dlm/config.c: convert simple_str to kstr mm: mark remap_file_pages() syscall as deprecated mm: memcontrol: remove unnecessary memcg argument from soft limit functions mm: memcontrol: clean up memcg zoneinfo lookup mm/memblock.c: call kmemleak directly from memblock_(alloc|free) mm/mempool.c: update the kmemleak stack trace for mempool allocations lib/radix-tree.c: update the kmemleak stack trace for radix tree allocations mm: introduce kmemleak_update_trace() mm/kmemleak.c: use %u to print ->checksum ...
Diffstat (limited to 'tools/perf/ui/hist.c')
-rw-r--r--tools/perf/ui/hist.c252
1 files changed, 205 insertions, 47 deletions
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 0f403b83e9d..4484f5bd1b1 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -16,30 +16,25 @@
})
int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
- hpp_field_fn get_field, hpp_callback_fn callback,
- const char *fmt, hpp_snprint_fn print_fn, bool fmt_percent)
+ hpp_field_fn get_field, const char *fmt,
+ hpp_snprint_fn print_fn, bool fmt_percent)
{
- int ret = 0;
+ int ret;
struct hists *hists = he->hists;
struct perf_evsel *evsel = hists_to_evsel(hists);
char *buf = hpp->buf;
size_t size = hpp->size;
- if (callback) {
- ret = callback(hpp, true);
- advance_hpp(hpp, ret);
- }
-
if (fmt_percent) {
double percent = 0.0;
+ u64 total = hists__total_period(hists);
- if (hists->stats.total_period)
- percent = 100.0 * get_field(he) /
- hists->stats.total_period;
+ if (total)
+ percent = 100.0 * get_field(he) / total;
- ret += hpp__call_print_fn(hpp, print_fn, fmt, percent);
+ ret = hpp__call_print_fn(hpp, print_fn, fmt, percent);
} else
- ret += hpp__call_print_fn(hpp, print_fn, fmt, get_field(he));
+ ret = hpp__call_print_fn(hpp, print_fn, fmt, get_field(he));
if (perf_evsel__is_group_event(evsel)) {
int prev_idx, idx_delta;
@@ -50,7 +45,7 @@ int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
list_for_each_entry(pair, &he->pairs.head, pairs.node) {
u64 period = get_field(pair);
- u64 total = pair->hists->stats.total_period;
+ u64 total = hists__total_period(pair->hists);
if (!total)
continue;
@@ -99,13 +94,6 @@ int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
}
}
- if (callback) {
- int __ret = callback(hpp, false);
-
- advance_hpp(hpp, __ret);
- ret += __ret;
- }
-
/*
* Restore original buf and size as it's where caller expects
* the result will be saved.
@@ -116,6 +104,62 @@ int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
return ret;
}
+static int field_cmp(u64 field_a, u64 field_b)
+{
+ if (field_a > field_b)
+ return 1;
+ if (field_a < field_b)
+ return -1;
+ return 0;
+}
+
+static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
+ hpp_field_fn get_field)
+{
+ s64 ret;
+ int i, nr_members;
+ struct perf_evsel *evsel;
+ struct hist_entry *pair;
+ u64 *fields_a, *fields_b;
+
+ ret = field_cmp(get_field(a), get_field(b));
+ if (ret || !symbol_conf.event_group)
+ return ret;
+
+ evsel = hists_to_evsel(a->hists);
+ if (!perf_evsel__is_group_event(evsel))
+ return ret;
+
+ nr_members = evsel->nr_members;
+ fields_a = calloc(sizeof(*fields_a), nr_members);
+ fields_b = calloc(sizeof(*fields_b), nr_members);
+
+ if (!fields_a || !fields_b)
+ goto out;
+
+ list_for_each_entry(pair, &a->pairs.head, pairs.node) {
+ evsel = hists_to_evsel(pair->hists);
+ fields_a[perf_evsel__group_idx(evsel)] = get_field(pair);
+ }
+
+ list_for_each_entry(pair, &b->pairs.head, pairs.node) {
+ evsel = hists_to_evsel(pair->hists);
+ fields_b[perf_evsel__group_idx(evsel)] = get_field(pair);
+ }
+
+ for (i = 1; i < nr_members; i++) {
+ ret = field_cmp(fields_a[i], fields_b[i]);
+ if (ret)
+ break;
+ }
+
+out:
+ free(fields_a);
+ free(fields_b);
+
+ return ret;
+}
+
#define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
static int hpp__header_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
struct perf_hpp *hpp, \
@@ -179,7 +223,7 @@ static u64 he_get_##_field(struct hist_entry *he) \
static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
- return __hpp__fmt(hpp, he, he_get_##_field, NULL, " %6.2f%%", \
+ return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \
hpp_color_scnprintf, true); \
}
@@ -188,10 +232,16 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \
- return __hpp__fmt(hpp, he, he_get_##_field, NULL, fmt, \
+ return __hpp__fmt(hpp, he, he_get_##_field, fmt, \
hpp_entry_scnprintf, true); \
}
+#define __HPP_SORT_FN(_type, _field) \
+static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \
+{ \
+ return __hpp__sort(a, b, he_get_##_field); \
+}
+
#define __HPP_ENTRY_RAW_FN(_type, _field) \
static u64 he_get_raw_##_field(struct hist_entry *he) \
{ \
@@ -202,20 +252,29 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64; \
- return __hpp__fmt(hpp, he, he_get_raw_##_field, NULL, fmt, \
+ return __hpp__fmt(hpp, he, he_get_raw_##_field, fmt, \
hpp_entry_scnprintf, false); \
}
+#define __HPP_SORT_RAW_FN(_type, _field) \
+static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \
+{ \
+ return __hpp__sort(a, b, he_get_raw_##_field); \
+}
+
+
#define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width) \
__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
__HPP_WIDTH_FN(_type, _min_width, _unit_width) \
__HPP_COLOR_PERCENT_FN(_type, _field) \
-__HPP_ENTRY_PERCENT_FN(_type, _field)
+__HPP_ENTRY_PERCENT_FN(_type, _field) \
+__HPP_SORT_FN(_type, _field)
#define HPP_RAW_FNS(_type, _str, _field, _min_width, _unit_width) \
__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
__HPP_WIDTH_FN(_type, _min_width, _unit_width) \
-__HPP_ENTRY_RAW_FN(_type, _field)
+__HPP_ENTRY_RAW_FN(_type, _field) \
+__HPP_SORT_RAW_FN(_type, _field)
HPP_PERCENT_FNS(overhead, "Overhead", period, 8, 8)
@@ -227,19 +286,31 @@ HPP_PERCENT_FNS(overhead_guest_us, "guest usr", period_guest_us, 9, 8)
HPP_RAW_FNS(samples, "Samples", nr_events, 12, 12)
HPP_RAW_FNS(period, "Period", period, 12, 12)
+static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused,
+ struct hist_entry *b __maybe_unused)
+{
+ return 0;
+}
+
#define HPP__COLOR_PRINT_FNS(_name) \
{ \
.header = hpp__header_ ## _name, \
.width = hpp__width_ ## _name, \
.color = hpp__color_ ## _name, \
- .entry = hpp__entry_ ## _name \
+ .entry = hpp__entry_ ## _name, \
+ .cmp = hpp__nop_cmp, \
+ .collapse = hpp__nop_cmp, \
+ .sort = hpp__sort_ ## _name, \
}
#define HPP__PRINT_FNS(_name) \
{ \
.header = hpp__header_ ## _name, \
.width = hpp__width_ ## _name, \
- .entry = hpp__entry_ ## _name \
+ .entry = hpp__entry_ ## _name, \
+ .cmp = hpp__nop_cmp, \
+ .collapse = hpp__nop_cmp, \
+ .sort = hpp__sort_ ## _name, \
}
struct perf_hpp_fmt perf_hpp__format[] = {
@@ -253,6 +324,7 @@ struct perf_hpp_fmt perf_hpp__format[] = {
};
LIST_HEAD(perf_hpp__list);
+LIST_HEAD(perf_hpp__sort_list);
#undef HPP__COLOR_PRINT_FNS
@@ -270,6 +342,25 @@ LIST_HEAD(perf_hpp__list);
void perf_hpp__init(void)
{
+ struct list_head *list;
+ int i;
+
+ for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
+ struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
+
+ INIT_LIST_HEAD(&fmt->list);
+
+ /* sort_list may be linked by setup_sorting() */
+ if (fmt->sort_list.next == NULL)
+ INIT_LIST_HEAD(&fmt->sort_list);
+ }
+
+ /*
+ * If user specified field order, no need to setup default fields.
+ */
+ if (field_order)
+ return;
+
perf_hpp__column_enable(PERF_HPP__OVERHEAD);
if (symbol_conf.show_cpu_utilization) {
@@ -287,6 +378,11 @@ void perf_hpp__init(void)
if (symbol_conf.show_total_period)
perf_hpp__column_enable(PERF_HPP__PERIOD);
+
+ /* prepend overhead field for backward compatiblity. */
+ list = &perf_hpp__format[PERF_HPP__OVERHEAD].sort_list;
+ if (list_empty(list))
+ list_add(list, &perf_hpp__sort_list);
}
void perf_hpp__column_register(struct perf_hpp_fmt *format)
@@ -294,29 +390,90 @@ void perf_hpp__column_register(struct perf_hpp_fmt *format)
list_add_tail(&format->list, &perf_hpp__list);
}
+void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
+{
+ list_add_tail(&format->sort_list, &perf_hpp__sort_list);
+}
+
void perf_hpp__column_enable(unsigned col)
{
BUG_ON(col >= PERF_HPP__MAX_INDEX);
perf_hpp__column_register(&perf_hpp__format[col]);
}
-int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size,
- struct hists *hists)
+void perf_hpp__setup_output_field(void)
{
- const char *sep = symbol_conf.field_sep;
- struct sort_entry *se;
- int ret = 0;
+ struct perf_hpp_fmt *fmt;
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- if (se->elide)
+ /* append sort keys to output field */
+ perf_hpp__for_each_sort_list(fmt) {
+ if (!list_empty(&fmt->list))
continue;
- ret += scnprintf(s + ret, size - ret, "%s", sep ?: " ");
- ret += se->se_snprintf(he, s + ret, size - ret,
- hists__col_len(hists, se->se_width_idx));
+ /*
+ * sort entry fields are dynamically created,
+ * so they can share a same sort key even though
+ * the list is empty.
+ */
+ if (perf_hpp__is_sort_entry(fmt)) {
+ struct perf_hpp_fmt *pos;
+
+ perf_hpp__for_each_format(pos) {
+ if (perf_hpp__same_sort_entry(pos, fmt))
+ goto next;
+ }
+ }
+
+ perf_hpp__column_register(fmt);
+next:
+ continue;
}
+}
- return ret;
+void perf_hpp__append_sort_keys(void)
+{
+ struct perf_hpp_fmt *fmt;
+
+ /* append output fields to sort keys */
+ perf_hpp__for_each_format(fmt) {
+ if (!list_empty(&fmt->sort_list))
+ continue;
+
+ /*
+ * sort entry fields are dynamically created,
+ * so they can share a same sort key even though
+ * the list is empty.
+ */
+ if (perf_hpp__is_sort_entry(fmt)) {
+ struct perf_hpp_fmt *pos;
+
+ perf_hpp__for_each_sort_list(pos) {
+ if (perf_hpp__same_sort_entry(pos, fmt))
+ goto next;
+ }
+ }
+
+ perf_hpp__register_sort_field(fmt);
+next:
+ continue;
+ }
+}
+
+void perf_hpp__reset_output_field(void)
+{
+ struct perf_hpp_fmt *fmt, *tmp;
+
+ /* reset output fields */
+ perf_hpp__for_each_format_safe(fmt, tmp) {
+ list_del_init(&fmt->list);
+ list_del_init(&fmt->sort_list);
+ }
+
+ /* reset sort keys */
+ perf_hpp__for_each_sort_list_safe(fmt, tmp) {
+ list_del_init(&fmt->list);
+ list_del_init(&fmt->sort_list);
+ }
}
/*
@@ -325,22 +482,23 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size,
unsigned int hists__sort_list_width(struct hists *hists)
{
struct perf_hpp_fmt *fmt;
- struct sort_entry *se;
- int i = 0, ret = 0;
+ int ret = 0;
+ bool first = true;
struct perf_hpp dummy_hpp;
perf_hpp__for_each_format(fmt) {
- if (i)
+ if (perf_hpp__should_skip(fmt))
+ continue;
+
+ if (first)
+ first = false;
+ else
ret += 2;
ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
}
- list_for_each_entry(se, &hist_entry__sort_list, list)
- if (!se->elide)
- ret += 2 + hists__col_len(hists, se->se_width_idx);
-
- if (verbose) /* Addr + origin */
+ if (verbose && sort__has_sym) /* Addr + origin */
ret += 3 + BITS_PER_LONG / 4;
return ret;