diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-09-10 14:36:49 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-09-10 14:36:49 -0700 |
commit | 53f7b9bccd332bba39be78f9085ac1a26fa387fc (patch) | |
tree | 841d4de4cdb5ce5ccf4b197c0cda570fc17a49df /kernel/sched.c | |
parent | 3c038f97e4b14c322b49f13578e0714e1a2ece53 (diff) | |
parent | 1169783085adb9ac969d21103a6885e8435f7ed3 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
sched: fix ideal_runtime calculations for reniced tasks
sched: improve prev_sum_exec_runtime setting
sched: simplify __check_preempt_curr_fair()
sched: fix xtensa build warning
sched: debug: fix sum_exec_runtime clearing
sched: debug: fix cfs_rq->wait_runtime accounting
sched: fix niced_granularity() shift
sched: fix MC/HT scheduler optimization, without breaking the FUZZ logic.
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 15 |
1 files changed, 6 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index b533d6db78a..deeb1f8e0c3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -668,7 +668,7 @@ static u64 div64_likely32(u64 divident, unsigned long divisor) /* * Shift right and round: */ -#define RSR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) +#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) static unsigned long calc_delta_mine(unsigned long delta_exec, unsigned long weight, @@ -684,10 +684,10 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, * Check whether we'd overflow the 64-bit multiplication: */ if (unlikely(tmp > WMULT_CONST)) - tmp = RSR(RSR(tmp, WMULT_SHIFT/2) * lw->inv_weight, + tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, WMULT_SHIFT/2); else - tmp = RSR(tmp * lw->inv_weight, WMULT_SHIFT); + tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); } @@ -858,7 +858,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq) static void set_load_weight(struct task_struct *p) { - task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime; p->se.wait_runtime = 0; if (task_has_rt_policy(p)) { @@ -2512,7 +2511,7 @@ group_next: * a think about bumping its value to force at least one task to be * moved */ - if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) { + if (*imbalance < busiest_load_per_task) { unsigned long tmp, pwr_now, pwr_move; unsigned int imbn; @@ -2564,10 +2563,8 @@ small_imbalance: pwr_move /= SCHED_LOAD_SCALE; /* Move if we gain throughput */ - if (pwr_move <= pwr_now) - goto out_balanced; - - *imbalance = busiest_load_per_task; + if (pwr_move > pwr_now) + *imbalance = busiest_load_per_task; } return busiest; |