diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-10-15 17:00:03 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 17:00:03 +0200 |
commit | 62160e3f4a06d948ec89665d29f1173e551deedc (patch) | |
tree | b86bfddf913ea3b22837fb3dcbbdf8c2fb567ed1 | |
parent | 53df556e06d85245cf6aacedaba8e4da684859c3 (diff) |
sched: track cfs_rq->curr on !group-scheduling too
Noticed by Roman Zippel: use cfs_rq->curr in the !group-scheduling
case too. Small micro-optimization and cleanup effect:
text data bss dec hex filename
36269 3482 24 39775 9b5f sched.o.before
36177 3486 24 39687 9b07 sched.o.after
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/sched_fair.c | 31 |
2 files changed, 10 insertions, 23 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f6a81061fd5..3209e2cc2c2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -189,11 +189,11 @@ struct cfs_rq { struct rb_root tasks_timeline; struct rb_node *rb_leftmost; struct rb_node *rb_load_balance_curr; -#ifdef CONFIG_FAIR_GROUP_SCHED /* 'curr' points to currently running entity on this cfs_rq. * It is set to NULL otherwise (i.e when none are currently running). */ struct sched_entity *curr; +#ifdef CONFIG_FAIR_GROUP_SCHED struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ /* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 105d57b41aa..335faf06a56 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -111,51 +111,38 @@ extern struct sched_class fair_sched_class; * CFS operations on generic schedulable entities: */ -#ifdef CONFIG_FAIR_GROUP_SCHED - -/* cpu runqueue to which this cfs_rq is attached */ -static inline struct rq *rq_of(struct cfs_rq *cfs_rq) -{ - return cfs_rq->rq; -} - /* currently running entity (if any) on this cfs_rq */ static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq) { return cfs_rq->curr; } -/* An entity is a task if it doesn't "own" a runqueue */ -#define entity_is_task(se) (!se->my_q) - static inline void set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) { cfs_rq->curr = se; } -#else /* CONFIG_FAIR_GROUP_SCHED */ +#ifdef CONFIG_FAIR_GROUP_SCHED +/* cpu runqueue to which this cfs_rq is attached */ static inline struct rq *rq_of(struct cfs_rq *cfs_rq) { - return container_of(cfs_rq, struct rq, cfs); + return cfs_rq->rq; } -static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq) -{ - struct rq *rq = rq_of(cfs_rq); +/* An entity is a task if it doesn't "own" a runqueue */ +#define entity_is_task(se) (!se->my_q) - if (unlikely(rq->curr->sched_class != &fair_sched_class)) - return NULL; +#else /* CONFIG_FAIR_GROUP_SCHED */ - return &rq->curr->se; +static inline struct rq *rq_of(struct cfs_rq *cfs_rq) +{ + return container_of(cfs_rq, struct rq, cfs); } #define entity_is_task(se) 1 -static inline void -set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) { } - #endif /* CONFIG_FAIR_GROUP_SCHED */ static inline struct task_struct *task_of(struct sched_entity *se) |