summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2013-08-06 17:36:41 +0900
committerIngo Molnar <mingo@kernel.org>2013-09-02 08:26:59 +0200
commit95a79b805b935f4a7b685aa8a117d916c638323e (patch)
treee9c7380555d30bf5eea04219aba179696c82b7e5 /kernel
parenta4f61cc03e443647211a5ae0ab8f8cda2e9e1043 (diff)
sched: Remove one division operation in find_busiest_queue()
Remove one division operation in find_busiest_queue() by using crosswise multiplication: wl_i / power_i > wl_j / power_j := wl_i * power_j > wl_j * power_i Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> [ Expanded the changelog. ] Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1375778203-31343-2-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f918635efe0..8aa217f62a9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4968,7 +4968,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
struct sched_group *group)
{
struct rq *busiest = NULL, *rq;
- unsigned long max_load = 0;
+ unsigned long busiest_load = 0, busiest_power = 1;
int i;
for_each_cpu(i, sched_group_cpus(group)) {
@@ -4998,11 +4998,15 @@ static struct rq *find_busiest_queue(struct lb_env *env,
* the weighted_cpuload() scaled with the cpu power, so that
* the load can be moved away from the cpu that is potentially
* running at a lower capacity.
+ *
+ * Thus we're looking for max(wl_i / power_i), crosswise
+ * multiplication to rid ourselves of the division works out
+ * to: wl_i * power_j > wl_j * power_i; where j is our
+ * previous maximum.
*/
- wl = (wl * SCHED_POWER_SCALE) / power;
-
- if (wl > max_load) {
- max_load = wl;
+ if (wl * busiest_power > busiest_load * power) {
+ busiest_load = wl;
+ busiest_power = power;
busiest = rq;
}
}