summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPeter Williams <pwil3058@bigpond.net.au>2007-10-24 18:23:51 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-24 18:23:51 +0200
commite1d1484f72127a5580d37c379f6a5b2c2786434c (patch)
treee3e6529c5b9079f35b2c60bbd346a3c51c2b2c13 /include
parenta0f846aa76c3e03d54c1700a87cab3a46ccd71e2 (diff)
sched: reduce balance-tasks overhead
At the moment, balance_tasks() provides low level functionality for both move_tasks() and move_one_task() (indirectly) via the load_balance() function (in the sched_class interface) which also provides dual functionality. This dual functionality complicates the interfaces and internal mechanisms and makes the run time overhead of operations that are called with two run queue locks held. This patch addresses this issue and reduces the overhead of these operations. Signed-off-by: Peter Williams <pwil3058@bigpond.net.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/linux/sched.h7
1 files changed, 5 insertions, 2 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 52288a64769..639241f4f3d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -829,11 +829,14 @@ struct sched_class {
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
- struct rq *busiest,
- unsigned long max_nr_move, unsigned long max_load_move,
+ struct rq *busiest, unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio);
+ int (*move_one_task) (struct rq *this_rq, int this_cpu,
+ struct rq *busiest, struct sched_domain *sd,
+ enum cpu_idle_type idle);
+
void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p);
void (*task_new) (struct rq *rq, struct task_struct *p);