summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_event_profile.c
blob: c9f687ab0d4fcd208327aad09f33971cd2726e64 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
/*
 * trace event based perf counter profiling
 *
 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
 *
 */

#include <linux/module.h>
#include "trace.h"

/*
 * We can't use a size but a type in alloc_percpu()
 * So let's create a dummy type that matches the desired size
 */
typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t;

char		*trace_profile_buf;
EXPORT_SYMBOL_GPL(trace_profile_buf);

char		*trace_profile_buf_nmi;
EXPORT_SYMBOL_GPL(trace_profile_buf_nmi);

/* Count the events in use (per event id, not per instance) */
static int	total_profile_count;

static int ftrace_profile_enable_event(struct ftrace_event_call *event)
{
	char *buf;
	int ret = -ENOMEM;

	if (atomic_inc_return(&event->profile_count))
		return 0;

	if (!total_profile_count) {
		buf = (char *)alloc_percpu(profile_buf_t);
		if (!buf)
			goto fail_buf;

		rcu_assign_pointer(trace_profile_buf, buf);

		buf = (char *)alloc_percpu(profile_buf_t);
		if (!buf)
			goto fail_buf_nmi;

		rcu_assign_pointer(trace_profile_buf_nmi, buf);
	}

	ret = event->profile_enable(event);
	if (!ret) {
		total_profile_count++;
		return 0;
	}

fail_buf_nmi:
	if (!total_profile_count) {
		free_percpu(trace_profile_buf_nmi);
		free_percpu(trace_profile_buf);
		trace_profile_buf_nmi = NULL;
		trace_profile_buf = NULL;
	}
fail_buf:
	atomic_dec(&event->profile_count);

	return ret;
}

int ftrace_profile_enable(int event_id)
{
	struct ftrace_event_call *event;
	int ret = -EINVAL;

	mutex_lock(&event_mutex);
	list_for_each_entry(event, &ftrace_events, list) {
		if (event->id == event_id && event->profile_enable &&
		    try_module_get(event->mod)) {
			ret = ftrace_profile_enable_event(event);
			break;
		}
	}
	mutex_unlock(&event_mutex);

	return ret;
}

static void ftrace_profile_disable_event(struct ftrace_event_call *event)
{
	char *buf, *nmi_buf;

	if (!atomic_add_negative(-1, &event->profile_count))
		return;

	event->profile_disable(event);

	if (!--total_profile_count) {
		buf = trace_profile_buf;
		rcu_assign_pointer(trace_profile_buf, NULL);

		nmi_buf = trace_profile_buf_nmi;
		rcu_assign_pointer(trace_profile_buf_nmi, NULL);

		/*
		 * Ensure every events in profiling have finished before
		 * releasing the buffers
		 */
		synchronize_sched();

		free_percpu(buf);
		free_percpu(nmi_buf);
	}
}

void ftrace_profile_disable(int event_id)
{
	struct ftrace_event_call *event;

	mutex_lock(&event_mutex);
	list_for_each_entry(event, &ftrace_events, list) {
		if (event->id == event_id) {
			ftrace_profile_disable_event(event);
			module_put(event->mod);
			break;
		}
	}
	mutex_unlock(&event_mutex);
}