summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-04-09 10:53:45 +0200
committerIngo Molnar <mingo@elte.hu>2009-04-09 11:50:52 +0200
commit1ccd15497869f3ed83b5225d410df53a96e52757 (patch)
tree67c475136503f53c2b2d9d9ada1c281aef0a162b
parent9ee318a7825929bc3734110b83ae8e20e53d9de3 (diff)
perf_counter: sysctl for system wide perf counters
Impact: add sysctl for paranoid/relaxed perfcounters policy Allow the use of system wide perf counters to everybody, but provide a sysctl to disable it for the paranoid security minded. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090409085524.514046352@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/perf_counter.h2
-rw-r--r--kernel/perf_counter.c4
-rw-r--r--kernel/sysctl.c11
3 files changed, 16 insertions, 1 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index c22363a4f74..98143288530 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -568,6 +568,8 @@ struct perf_callchain_entry {
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+extern int sysctl_perf_counter_priv;
+
#else
static inline void
perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 76376ecb23b..7efb7ebaaae 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -42,6 +42,8 @@ static atomic_t nr_mmap_tracking __read_mostly;
static atomic_t nr_munmap_tracking __read_mostly;
static atomic_t nr_comm_tracking __read_mostly;
+int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
+
/*
* Mutex for (sysadmin-configurable) counter reservations:
*/
@@ -1132,7 +1134,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
*/
if (cpu != -1) {
/* Must be root to operate on a CPU counter: */
- if (!capable(CAP_SYS_ADMIN))
+ if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
if (cpu < 0 || cpu > num_possible_cpus())
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4286b62b34a..8ba457838d9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -49,6 +49,7 @@
#include <linux/reboot.h>
#include <linux/ftrace.h>
#include <linux/slow-work.h>
+#include <linux/perf_counter.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
@@ -920,6 +921,16 @@ static struct ctl_table kern_table[] = {
.child = slow_work_sysctls,
},
#endif
+#ifdef CONFIG_PERF_COUNTERS
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "perf_counter_privileged",
+ .data = &sysctl_perf_counter_priv,
+ .maxlen = sizeof(sysctl_perf_counter_priv),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#endif
/*
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt