summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVenki Pallipadi <venkatesh.pallipadi@intel.com>2007-06-20 14:26:24 -0700
committerGreg Kroah-Hartman <gregkh@suse.de>2007-08-15 09:25:10 -0700
commit42418d94efa05105ba5a467119d3864201425430 (patch)
tree22462853523d272c772fb14d6387e83ff1581f2e
parentfd2efeae63567dde934bb54772bb1b991275b04a (diff)
downloadlwn-42418d94efa05105ba5a467119d3864201425430.tar.gz
lwn-42418d94efa05105ba5a467119d3864201425430.zip
CPUFREQ: ondemand: fix tickless accounting and software coordination bug
With tickless kernel and software coordination os P-states, ondemand can look at wrong idle statistics. This can happen when ondemand sampling is happening on CPU 0 and due to software coordination sampling also looks at utilization of CPU 1. If CPU 1 is in tickless state at that moment, its idle statistics will not be uptodate and CPU 0 thinks CPU 1 is idle for less amount of time than it actually is. This can be resolved by looking at all the busy times of CPUs, which is accurate, even with tickless, and use that to determine idle time in a round about way (total time - busy time). Thanks to Arjan for originally reporting the ondemand bug on Lenovo T61. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Dave Jones <davej@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 8532bb79e5fc..b6b3e52c9f28 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -96,15 +96,25 @@ static struct dbs_tuners {
static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
{
- cputime64_t retval;
+ cputime64_t idle_time;
+ cputime64_t cur_jiffies;
+ cputime64_t busy_time;
- retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
- kstat_cpu(cpu).cpustat.iowait);
+ cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
+ busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
+ kstat_cpu(cpu).cpustat.system);
- if (dbs_tuners_ins.ignore_nice)
- retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
+ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
+ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
+ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
- return retval;
+ if (!dbs_tuners_ins.ignore_nice) {
+ busy_time = cputime64_add(busy_time,
+ kstat_cpu(cpu).cpustat.nice);
+ }
+
+ idle_time = cputime64_sub(cur_jiffies, busy_time);
+ return idle_time;
}
/*
@@ -339,7 +349,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
this_dbs_info->prev_cpu_wall);
- this_dbs_info->prev_cpu_wall = cur_jiffies;
+ this_dbs_info->prev_cpu_wall = get_jiffies_64();
+
if (!total_ticks)
return;
/*