summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2005-05-31 19:03:49 -0700
committerDave Jones <davej@redhat.com>2005-05-31 19:03:49 -0700
commitdac1c1a56279b4545a822ec7bc770003c233e546 (patch)
tree61175f7534ae731b1eaa4b75a3410a447058b4dc /drivers/cpufreq
parent1206aaac285904e3e3995eecbf4129b6555a8973 (diff)
downloadop-kernel-dev-dac1c1a56279b4545a822ec7bc770003c233e546.zip
op-kernel-dev-dac1c1a56279b4545a822ec7bc770003c233e546.tar.gz
[CPUFREQ] ondemand,conservative minor bug-fix and cleanup
[PATCH] [1/5] ondemand,conservative minor bug-fix and cleanup Attached patch fixes some minor issues with Alexander's patch and related cleanup in both ondemand and conservative governor. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c53
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c58
2 files changed, 38 insertions, 73 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index dd2f5b2..3082a3f 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -89,6 +89,15 @@ static struct dbs_tuners dbs_tuners_ins = {
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
};
+static inline unsigned int get_cpu_idle_time(unsigned int cpu)
+{
+ return kstat_cpu(cpu).cpustat.idle +
+ kstat_cpu(cpu).cpustat.iowait +
+ ( !dbs_tuners_ins.ignore_nice ?
+ kstat_cpu(cpu).cpustat.nice :
+ 0);
+}
+
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{
@@ -221,16 +230,10 @@ static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
dbs_tuners_ins.ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_online_cpu(j) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
- j_dbs_info->cur_policy = policy;
-
- j_dbs_info->prev_cpu_idle_up =
- kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait +
- ( !dbs_tuners_ins.ignore_nice
- ? kstat_cpu(j).cpustat.nice : 0 );
+ j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
}
up(&dbs_sem);
@@ -335,11 +338,7 @@ static void dbs_check_cpu(int cpu)
*/
/* Check for frequency increase */
- total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
- kstat_cpu(cpu).cpustat.iowait;
- /* consider 'nice' tasks as 'idle' time too if required */
- if (dbs_tuners_ins.ignore_nice == 0)
- total_idle_ticks += kstat_cpu(cpu).cpustat.nice;
+ total_idle_ticks = get_cpu_idle_time(cpu);
idle_ticks = total_idle_ticks -
this_dbs_info->prev_cpu_idle_up;
this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
@@ -354,11 +353,7 @@ static void dbs_check_cpu(int cpu)
j_dbs_info = &per_cpu(cpu_dbs_info, j);
/* Check for frequency increase */
- total_idle_ticks = kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait;
- /* consider 'nice' too? */
- if (dbs_tuners_ins.ignore_nice == 0)
- total_idle_ticks += kstat_cpu(j).cpustat.nice;
+ total_idle_ticks = get_cpu_idle_time(j);
tmp_idle_ticks = total_idle_ticks -
j_dbs_info->prev_cpu_idle_up;
j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
@@ -373,6 +368,8 @@ static void dbs_check_cpu(int cpu)
usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
if (idle_ticks < up_idle_ticks) {
+ down_skip[cpu] = 0;
+ this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
/* if we are already at full speed then break out early */
if (requested_freq[cpu] == policy->max)
return;
@@ -389,8 +386,6 @@ static void dbs_check_cpu(int cpu)
__cpufreq_driver_target(policy, requested_freq[cpu],
CPUFREQ_RELATION_H);
- down_skip[cpu] = 0;
- this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
return;
}
@@ -399,11 +394,7 @@ static void dbs_check_cpu(int cpu)
if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
return;
- total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
- kstat_cpu(cpu).cpustat.iowait;
- /* consider 'nice' too? */
- if (dbs_tuners_ins.ignore_nice == 0)
- total_idle_ticks += kstat_cpu(cpu).cpustat.nice;
+ total_idle_ticks = this_dbs_info->prev_cpu_idle_up;
idle_ticks = total_idle_ticks -
this_dbs_info->prev_cpu_idle_down;
this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
@@ -417,11 +408,7 @@ static void dbs_check_cpu(int cpu)
j_dbs_info = &per_cpu(cpu_dbs_info, j);
/* Check for frequency increase */
- total_idle_ticks = kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait;
- /* consider 'nice' too? */
- if (dbs_tuners_ins.ignore_nice == 0)
- total_idle_ticks += kstat_cpu(j).cpustat.nice;
+ total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
tmp_idle_ticks = total_idle_ticks -
j_dbs_info->prev_cpu_idle_down;
j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
@@ -516,11 +503,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
- j_dbs_info->prev_cpu_idle_up =
- kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait +
- ( !dbs_tuners_ins.ignore_nice
- ? kstat_cpu(j).cpustat.nice : 0 );
+ j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
j_dbs_info->prev_cpu_idle_down
= j_dbs_info->prev_cpu_idle_up;
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 0565916..26cf54b 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -88,6 +88,15 @@ static struct dbs_tuners dbs_tuners_ins = {
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
};
+static inline unsigned int get_cpu_idle_time(unsigned int cpu)
+{
+ return kstat_cpu(cpu).cpustat.idle +
+ kstat_cpu(cpu).cpustat.iowait +
+ ( !dbs_tuners_ins.ignore_nice ?
+ kstat_cpu(cpu).cpustat.nice :
+ 0);
+}
+
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{
@@ -220,16 +229,10 @@ static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
dbs_tuners_ins.ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_online_cpu(j) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
- j_dbs_info->cur_policy = policy;
-
- j_dbs_info->prev_cpu_idle_up =
- kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait +
- ( !dbs_tuners_ins.ignore_nice
- ? kstat_cpu(j).cpustat.nice : 0 );
+ j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
}
up(&dbs_sem);
@@ -322,15 +325,10 @@ static void dbs_check_cpu(int cpu)
*/
/* Check for frequency increase */
- total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
- kstat_cpu(cpu).cpustat.iowait;
- /* consider 'nice' tasks as 'idle' time too if required */
- if (dbs_tuners_ins.ignore_nice == 0)
- total_idle_ticks += kstat_cpu(cpu).cpustat.nice;
+ total_idle_ticks = get_cpu_idle_time(cpu);
idle_ticks = total_idle_ticks -
this_dbs_info->prev_cpu_idle_up;
this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
-
for_each_cpu_mask(j, policy->cpus) {
unsigned int tmp_idle_ticks;
@@ -341,11 +339,7 @@ static void dbs_check_cpu(int cpu)
j_dbs_info = &per_cpu(cpu_dbs_info, j);
/* Check for frequency increase */
- total_idle_ticks = kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait;
- /* consider 'nice' too? */
- if (dbs_tuners_ins.ignore_nice == 0)
- total_idle_ticks += kstat_cpu(j).cpustat.nice;
+ total_idle_ticks = get_cpu_idle_time(j);
tmp_idle_ticks = total_idle_ticks -
j_dbs_info->prev_cpu_idle_up;
j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
@@ -360,14 +354,14 @@ static void dbs_check_cpu(int cpu)
usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
if (idle_ticks < up_idle_ticks) {
+ down_skip[cpu] = 0;
+ this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
/* if we are already at full speed then break out early */
if (policy->cur == policy->max)
return;
__cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
- down_skip[cpu] = 0;
- this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
return;
}
@@ -376,11 +370,7 @@ static void dbs_check_cpu(int cpu)
if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
return;
- total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
- kstat_cpu(cpu).cpustat.iowait;
- /* consider 'nice' too? */
- if (dbs_tuners_ins.ignore_nice == 0)
- total_idle_ticks += kstat_cpu(cpu).cpustat.nice;
+ total_idle_ticks = this_dbs_info->prev_cpu_idle_up;
idle_ticks = total_idle_ticks -
this_dbs_info->prev_cpu_idle_down;
this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
@@ -393,12 +383,8 @@ static void dbs_check_cpu(int cpu)
continue;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
- /* Check for frequency increase */
- total_idle_ticks = kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait;
- /* consider 'nice' too? */
- if (dbs_tuners_ins.ignore_nice == 0)
- total_idle_ticks += kstat_cpu(j).cpustat.nice;
+ /* Check for frequency decrease */
+ total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
tmp_idle_ticks = total_idle_ticks -
j_dbs_info->prev_cpu_idle_down;
j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
@@ -414,7 +400,7 @@ static void dbs_check_cpu(int cpu)
freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
dbs_tuners_ins.sampling_down_factor;
down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
- usecs_to_jiffies(freq_down_sampling_rate);
+ usecs_to_jiffies(freq_down_sampling_rate);
if (idle_ticks > down_idle_ticks ) {
/* if we are already at the lowest speed then break out early
@@ -488,11 +474,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
- j_dbs_info->prev_cpu_idle_up =
- kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait +
- ( !dbs_tuners_ins.ignore_nice
- ? kstat_cpu(j).cpustat.nice : 0 );
+ j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
j_dbs_info->prev_cpu_idle_down
= j_dbs_info->prev_cpu_idle_up;
}
OpenPOWER on IntegriCloud