diff options
author | venkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com> | 2008-08-04 11:59:09 -0700 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2008-10-09 13:52:44 -0400 |
commit | 3430502d356284ff4f7782d75bb01a402fd3d45e (patch) | |
tree | b656eeb8b58e13e3b9d5a57b60cbf637829b3886 /drivers/cpufreq | |
parent | c43aa3bd99a67009a167430e80c5fde6f37288d8 (diff) |
[CPUFREQ][3/6] cpufreq: get_cpu_idle_time() changes in ondemand for idle-microaccounting
Preparatory changes for doing idle micro-accounting in ondemand governor.
get_cpu_idle_time() gets extra parameter and returns idle time and also the
wall time that corresponds to the idle time measurement.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 29 |
1 files changed, 15 insertions, 14 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 42fcb146ba2..b935092aab2 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -94,13 +94,13 @@ static struct dbs_tuners { .powersave_bias = 0, }; -static inline cputime64_t get_cpu_idle_time(unsigned int cpu) +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) { cputime64_t idle_time; - cputime64_t cur_jiffies; + cputime64_t cur_wall_time; cputime64_t busy_time; - cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, kstat_cpu(cpu).cpustat.system); @@ -113,7 +113,10 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu) kstat_cpu(cpu).cpustat.nice); } - idle_time = cputime64_sub(cur_jiffies, busy_time); + idle_time = cputime64_sub(cur_wall_time, busy_time); + if (wall) + *wall = cur_wall_time; + return idle_time; } @@ -277,8 +280,8 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, for_each_online_cpu(j) { struct cpu_dbs_info_s *dbs_info; dbs_info = &per_cpu(cpu_dbs_info, j); - dbs_info->prev_cpu_idle = get_cpu_idle_time(j); - dbs_info->prev_cpu_wall = get_jiffies_64(); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); } mutex_unlock(&dbs_mutex); @@ -368,21 +371,19 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) int freq_avg; j_dbs_info = &per_cpu(cpu_dbs_info, j); - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + wall_time = (unsigned int) cputime64_sub(cur_wall_time, j_dbs_info->prev_cpu_wall); j_dbs_info->prev_cpu_wall = cur_wall_time; - cur_idle_time = get_cpu_idle_time(j); idle_time = (unsigned int) cputime64_sub(cur_idle_time, j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle = cur_idle_time; - if (unlikely(wall_time <= idle_time || - (cputime_to_msecs(wall_time) < - dbs_tuners_ins.sampling_rate / (2 * 1000)))) { + if (unlikely(!wall_time || wall_time < idle_time)) continue; - } load = 100 * (wall_time - idle_time) / wall_time; @@ -531,8 +532,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info->cur_policy = policy; - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); - j_dbs_info->prev_cpu_wall = get_jiffies_64(); + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); } this_dbs_info->cpu = cpu; /* |