summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-01 10:34:36 +0200
committerIngo Molnar <mingo@elte.hu>2009-09-04 10:09:54 +0200
commitab29230e673c646292c90c8b9d378b9562145af0 (patch)
tree0b87a89add83d14f0cf6aae3b6322865985103a0 /kernel/sched.c
parenta52bfd73589eaf88d9c95ad2c1de0b38a6b27972 (diff)
sched: Implement dynamic cpu_power
Recompute the cpu_power for each cpu during load-balance. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Andreas Herrmann <andreas.herrmann3@amd.com> Acked-by: Andreas Herrmann <andreas.herrmann3@amd.com> Acked-by: Gautham R Shenoy <ego@in.ibm.com> Cc: Balbir Singh <balbir@in.ibm.com> LKML-Reference: <20090901083826.162033479@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c38
1 files changed, 35 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 55112261027..036600fd70b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3699,14 +3699,46 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
}
#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
-static void update_sched_power(struct sched_domain *sd)
+unsigned long __weak arch_smt_gain(struct sched_domain *sd, int cpu)
+{
+ unsigned long weight = cpumask_weight(sched_domain_span(sd));
+ unsigned long smt_gain = sd->smt_gain;
+
+ smt_gain /= weight;
+
+ return smt_gain;
+}
+
+static void update_cpu_power(struct sched_domain *sd, int cpu)
+{
+ unsigned long weight = cpumask_weight(sched_domain_span(sd));
+ unsigned long power = SCHED_LOAD_SCALE;
+ struct sched_group *sdg = sd->groups;
+ unsigned long old = sdg->__cpu_power;
+
+ /* here we could scale based on cpufreq */
+
+ if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
+ power *= arch_smt_gain(sd, cpu);
+ power >>= SCHED_LOAD_SHIFT;
+ }
+
+ /* here we could scale based on RT time */
+
+ if (power != old) {
+ sdg->__cpu_power = power;
+ sdg->reciprocal_cpu_power = reciprocal_value(power);
+ }
+}
+
+static void update_group_power(struct sched_domain *sd, int cpu)
{
struct sched_domain *child = sd->child;
struct sched_group *group, *sdg = sd->groups;
unsigned long power = sdg->__cpu_power;
if (!child) {
- /* compute cpu power for this cpu */
+ update_cpu_power(sd, cpu);
return;
}
@@ -3749,7 +3781,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
if (local_group) {
balance_cpu = group_first_cpu(group);
if (balance_cpu == this_cpu)
- update_sched_power(sd);
+ update_group_power(sd, this_cpu);
}
/* Tally up the load of all CPUs in the group */