summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKen Chen <kenchen@google.com>2008-12-16 23:41:22 -0800
committerIngo Molnar <mingo@elte.hu>2008-12-18 13:54:01 +0100
commit9c2c48020ec0dd6ecd27e5a1298f73b40d85a595 (patch)
tree1a44f56e18012120aff0969094eebd572deb2ea3
parente9515c3c9feecd74174c2998add0db51e02abb8d (diff)
schedstat: consolidate per-task cpu runtime stats
Impact: simplify code When we turn on CONFIG_SCHEDSTATS, per-task cpu runtime is accumulated twice. Once in task->se.sum_exec_runtime and once in sched_info.cpu_time. These two stats are exactly the same. Given that task->se.sum_exec_runtime is always accumulated by the core scheduler, sched_info can reuse that data instead of duplicate the accounting. Signed-off-by: Ken Chen <kenchen@google.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--fs/proc/base.c2
-rw-r--r--include/linux/sched.h3
-rw-r--r--kernel/delayacct.c2
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/sched_stats.h5
5 files changed, 7 insertions, 7 deletions
diff --git a/fs/proc/base.c b/fs/proc/base.c
index d4677603c88..4d745bac768 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -347,7 +347,7 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer)
static int proc_pid_schedstat(struct task_struct *task, char *buffer)
{
return sprintf(buffer, "%llu %llu %lu\n",
- task->sched_info.cpu_time,
+ task->se.sum_exec_runtime,
task->sched_info.run_delay,
task->sched_info.pcount);
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8cccd6dc5d6..2d1e840ddd3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -670,8 +670,7 @@ struct reclaim_state;
struct sched_info {
/* cumulative counters */
unsigned long pcount; /* # of times run on this cpu */
- unsigned long long cpu_time, /* time spent on the cpu */
- run_delay; /* time spent waiting on a runqueue */
+ unsigned long long run_delay; /* time spent waiting on a runqueue */
/* timestamps */
unsigned long long last_arrival,/* when we last ran on a cpu */
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index b3179dad71b..abb6e17505e 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -127,7 +127,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
*/
t1 = tsk->sched_info.pcount;
t2 = tsk->sched_info.run_delay;
- t3 = tsk->sched_info.cpu_time;
+ t3 = tsk->se.sum_exec_runtime;
d->cpu_count += t1;
diff --git a/kernel/sched.c b/kernel/sched.c
index f53e2b8ef52..fd835fc320b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -596,6 +596,8 @@ struct rq {
#ifdef CONFIG_SCHEDSTATS
/* latency stats */
struct sched_info rq_sched_info;
+ unsigned long long rq_cpu_time;
+ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
/* sys_sched_yield() stats */
unsigned int yld_exp_empty;
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 7dbf72a2b02..3b01098164c 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -31,7 +31,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
rq->sched_switch, rq->sched_count, rq->sched_goidle,
rq->ttwu_count, rq->ttwu_local,
- rq->rq_sched_info.cpu_time,
+ rq->rq_cpu_time,
rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
seq_printf(seq, "\n");
@@ -123,7 +123,7 @@ static inline void
rq_sched_info_depart(struct rq *rq, unsigned long long delta)
{
if (rq)
- rq->rq_sched_info.cpu_time += delta;
+ rq->rq_cpu_time += delta;
}
static inline void
@@ -236,7 +236,6 @@ static inline void sched_info_depart(struct task_struct *t)
unsigned long long delta = task_rq(t)->clock -
t->sched_info.last_arrival;
- t->sched_info.cpu_time += delta;
rq_sched_info_depart(task_rq(t), delta);
if (t->state == TASK_RUNNING)