summaryrefslogtreecommitdiffstats
path: root/include/linux/jump_label.h
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2011-11-27 17:59:09 +0200
committerIngo Molnar <mingo@elte.hu>2011-12-06 08:34:02 +0100
commitb202952075f62603bea9bfb6ebc6b0420db11949 (patch)
tree9c8e0538b455e68b5c371caba5b1585ed0ef9d8a /include/linux/jump_label.h
parentb79387ef185af2323594920923cecba5753c3817 (diff)
perf, core: Rate limit perf_sched_events jump_label patching
jump_lable patching is very expensive operation that involves pausing all cpus. The patching of perf_sched_events jump_label is easily controllable from userspace by unprivileged user. When te user runs a loop like this: "while true; do perf stat -e cycles true; done" ... the performance of my test application that just increments a counter for one second drops by 4%. This is on a 16 cpu box with my test application using only one of them. An impact on a real server doing real work will be worse. Performance of KVM PMU drops nearly 50% due to jump_lable for "perf record" since KVM PMU implementation creates and destroys perf event frequently. This patch introduces a way to rate limit jump_label patching and uses it to fix the above problem. I believe that as jump_label use will spread the problem will become more common and thus solving it in a generic code is appropriate. Also fixing it in the perf code would result in moving jump_label accounting logic to perf code with all the ifdefs in case of JUMP_LABEL=n kernel. With this patch all details are nicely hidden inside jump_label code. Signed-off-by: Gleb Natapov <gleb@redhat.com> Acked-by: Jason Baron <jbaron@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20111127155909.GO2557@redhat.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/jump_label.h')
-rw-r--r--include/linux/jump_label.h24
1 files changed, 24 insertions, 0 deletions
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 388b0d425b5..a1e7f909c80 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/workqueue.h>
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -14,6 +15,12 @@ struct jump_label_key {
#endif
};
+struct jump_label_key_deferred {
+ struct jump_label_key key;
+ unsigned long timeout;
+ struct delayed_work work;
+};
+
# include <asm/jump_label.h>
# define HAVE_JUMP_LABEL
#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
@@ -51,8 +58,11 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
extern int jump_label_text_reserved(void *start, void *end);
extern void jump_label_inc(struct jump_label_key *key);
extern void jump_label_dec(struct jump_label_key *key);
+extern void jump_label_dec_deferred(struct jump_label_key_deferred *key);
extern bool jump_label_enabled(struct jump_label_key *key);
extern void jump_label_apply_nops(struct module *mod);
+extern void jump_label_rate_limit(struct jump_label_key_deferred *key,
+ unsigned long rl);
#else /* !HAVE_JUMP_LABEL */
@@ -68,6 +78,10 @@ static __always_inline void jump_label_init(void)
{
}
+struct jump_label_key_deferred {
+ struct jump_label_key key;
+};
+
static __always_inline bool static_branch(struct jump_label_key *key)
{
if (unlikely(atomic_read(&key->enabled)))
@@ -85,6 +99,11 @@ static inline void jump_label_dec(struct jump_label_key *key)
atomic_dec(&key->enabled);
}
+static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key)
+{
+ jump_label_dec(&key->key);
+}
+
static inline int jump_label_text_reserved(void *start, void *end)
{
return 0;
@@ -102,6 +121,11 @@ static inline int jump_label_apply_nops(struct module *mod)
{
return 0;
}
+
+static inline void jump_label_rate_limit(struct jump_label_key_deferred *key,
+ unsigned long rl)
+{
+}
#endif /* HAVE_JUMP_LABEL */
#endif /* _LINUX_JUMP_LABEL_H */