summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 17:00:03 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-15 17:00:03 +0200
commit8ebc91d93669af39dbed50914d7daf457eeb43be (patch)
treec7ab6dfce3ea737db57599c8e42a41ddac51464c /kernel
parent2bd8e6d422a4f44c0994f909317eba80b0fe08a1 (diff)
sched: remove stat_gran
remove the stat_gran code - it was disabled by default and it causes unnecessary overhead. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/sched_fair.c46
-rw-r--r--kernel/sysctl.c11
3 files changed, 15 insertions, 47 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ae1544f0a20..d4dabfcc776 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -829,7 +829,7 @@ static void update_curr_load(struct rq *rq)
* Stagger updates to ls->delta_fair. Very frequent updates
* can be expensive.
*/
- if (ls->delta_stat >= sysctl_sched_stat_granularity)
+ if (ls->delta_stat)
__update_curr_load(rq, ls);
}
@@ -1588,9 +1588,6 @@ static void __sched_fork(struct task_struct *p)
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
- p->se.delta_exec = 0;
- p->se.delta_fair_run = 0;
- p->se.delta_fair_sleep = 0;
p->se.wait_runtime = 0;
p->se.sleep_start_fair = 0;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 2e84aaffe42..2138c40f483 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -85,8 +85,6 @@ const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
*/
const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
-const_debug unsigned int sysctl_sched_stat_granularity;
-
unsigned int sysctl_sched_runtime_limit __read_mostly;
/*
@@ -360,13 +358,13 @@ add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
* are not in our scheduling class.
*/
static inline void
-__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
+ unsigned long delta_exec)
{
- unsigned long delta, delta_exec, delta_fair, delta_mine;
+ unsigned long delta, delta_fair, delta_mine;
struct load_weight *lw = &cfs_rq->load;
unsigned long load = lw->weight;
- delta_exec = curr->delta_exec;
schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
curr->sum_exec_runtime += delta_exec;
@@ -400,6 +398,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq_curr(cfs_rq);
+ u64 now = rq_of(cfs_rq)->clock;
unsigned long delta_exec;
if (unlikely(!curr))
@@ -410,15 +409,10 @@ static void update_curr(struct cfs_rq *cfs_rq)
* since the last time we changed load (this cannot
* overflow on 32 bits):
*/
- delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start);
-
- curr->delta_exec += delta_exec;
+ delta_exec = (unsigned long)(now - curr->exec_start);
- if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) {
- __update_curr(cfs_rq, curr);
- curr->delta_exec = 0;
- }
- curr->exec_start = rq_of(cfs_rq)->clock;
+ __update_curr(cfs_rq, curr, delta_exec);
+ curr->exec_start = now;
}
static inline void
@@ -494,10 +488,9 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
* Note: must be called with a freshly updated rq->fair_clock.
*/
static inline void
-__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ unsigned long delta_fair)
{
- unsigned long delta_fair = se->delta_fair_run;
-
schedstat_set(se->wait_max, max(se->wait_max,
rq_of(cfs_rq)->clock - se->wait_start));
@@ -519,12 +512,7 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
(u64)(cfs_rq->fair_clock - se->wait_start_fair));
- se->delta_fair_run += delta_fair;
- if (unlikely(abs(se->delta_fair_run) >=
- sysctl_sched_stat_granularity)) {
- __update_stats_wait_end(cfs_rq, se);
- se->delta_fair_run = 0;
- }
+ __update_stats_wait_end(cfs_rq, se, delta_fair);
se->wait_start_fair = 0;
schedstat_set(se->wait_start, 0);
@@ -567,9 +555,10 @@ update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
* Scheduling class queueing methods:
*/
-static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ unsigned long delta_fair)
{
- unsigned long load = cfs_rq->load.weight, delta_fair;
+ unsigned long load = cfs_rq->load.weight;
long prev_runtime;
/*
@@ -582,8 +571,6 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
load = rq_of(cfs_rq)->cpu_load[2];
- delta_fair = se->delta_fair_sleep;
-
/*
* Fix up delta_fair with the effect of us running
* during the whole sleep period:
@@ -618,12 +605,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
(u64)(cfs_rq->fair_clock - se->sleep_start_fair));
- se->delta_fair_sleep += delta_fair;
- if (unlikely(abs(se->delta_fair_sleep) >=
- sysctl_sched_stat_granularity)) {
- __enqueue_sleeper(cfs_rq, se);
- se->delta_fair_sleep = 0;
- }
+ __enqueue_sleeper(cfs_rq, se, delta_fair);
se->sleep_start_fair = 0;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 6c97259e863..9b1b0d4ff96 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -266,17 +266,6 @@ static ctl_table kern_table[] = {
},
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "sched_stat_granularity_ns",
- .data = &sysctl_sched_stat_granularity,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
- .extra1 = &min_wakeup_granularity_ns,
- .extra2 = &max_wakeup_granularity_ns,
- },
- {
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_runtime_limit_ns",
.data = &sysctl_sched_runtime_limit,
.maxlen = sizeof(unsigned int),