summaryrefslogtreecommitdiffstats
path: root/kernel/sched_debug.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 17:00:03 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-15 17:00:03 +0200
commita25707f3aef9cf68c341eba5960d580f364e4e6f (patch)
tree77f13a0d32f68217cf6be32b1ab755bf7c1c0665 /kernel/sched_debug.c
parent8ebc91d93669af39dbed50914d7daf457eeb43be (diff)
sched: remove precise CPU load
CPU load calculations are statistical anyway, and there's little benefit from having it calculated on every scheduling event. So remove this code, it gets rid of a divide from the scheduler wakeup and context-switch fastpath. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched_debug.c')
-rw-r--r--kernel/sched_debug.c2
1 files changed, 0 insertions, 2 deletions
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index fd080f686f1..6b789dae7fd 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -145,8 +145,6 @@ static void print_cpu(struct seq_file *m, int cpu)
P(nr_running);
SEQ_printf(m, " .%-30s: %lu\n", "load",
rq->ls.load.weight);
- P(ls.delta_fair);
- P(ls.delta_exec);
P(nr_switches);
P(nr_load_updates);
P(nr_uninterruptible);