diff options
author | Dave Jones <davej@redhat.com> | 2005-05-31 19:03:49 -0700 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2005-05-31 19:03:49 -0700 |
commit | 9c7d269b9b05440dd0fe92d96f4e5d7e73dd7238 (patch) | |
tree | 4e4268cc4f075187135312d5243e24d3a4fcd155 /drivers/cpufreq | |
parent | 790d76fa979f55bfc49a6901bb911778949b582d (diff) | |
download | op-kernel-dev-9c7d269b9b05440dd0fe92d96f4e5d7e73dd7238.zip op-kernel-dev-9c7d269b9b05440dd0fe92d96f4e5d7e73dd7238.tar.gz |
[CPUFREQ] ondemand,conservative governor idle_tick clean-up
[PATCH] [3/5] ondemand,conservative governor idle_tick clean-up
Ondemand and conservative governor clean-up, it factorises the idle ticks
measurement.
Signed-off-by: Eric Piel <eric.piel@tremplin-utc.net>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 26 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 26 |
2 files changed, 10 insertions, 42 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index c503ec1..e1df376 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -297,7 +297,6 @@ static struct attribute_group dbs_attr_group = { static void dbs_check_cpu(int cpu) { unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; - unsigned int total_idle_ticks; unsigned int freq_step; unsigned int freq_down_sampling_rate; static int down_skip[NR_CPUS]; @@ -338,19 +337,12 @@ static void dbs_check_cpu(int cpu) */ /* Check for frequency increase */ - total_idle_ticks = get_cpu_idle_time(cpu); - idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_up; - this_dbs_info->prev_cpu_idle_up = total_idle_ticks; - + idle_ticks = UINT_MAX; for_each_cpu_mask(j, policy->cpus) { - unsigned int tmp_idle_ticks; + unsigned int tmp_idle_ticks, total_idle_ticks; struct cpu_dbs_info_s *j_dbs_info; - if (j == cpu) - continue; - j_dbs_info = &per_cpu(cpu_dbs_info, j); /* Check for frequency increase */ total_idle_ticks = get_cpu_idle_time(j); @@ -400,20 +392,12 @@ static void dbs_check_cpu(int cpu) if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) return; - total_idle_ticks = this_dbs_info->prev_cpu_idle_up; - idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_down; - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; - + idle_ticks = UINT_MAX; for_each_cpu_mask(j, policy->cpus) { - unsigned int tmp_idle_ticks; + unsigned int tmp_idle_ticks, total_idle_ticks; struct cpu_dbs_info_s *j_dbs_info; - if (j == cpu) - continue; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - /* Check for frequency increase */ total_idle_ticks = j_dbs_info->prev_cpu_idle_up; tmp_idle_ticks = total_idle_ticks - j_dbs_info->prev_cpu_idle_down; @@ -432,7 +416,7 @@ static void dbs_check_cpu(int cpu) down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * usecs_to_jiffies(freq_down_sampling_rate); - if (idle_ticks > down_idle_ticks ) { + if (idle_ticks > down_idle_ticks) { /* if we are already at the lowest speed then break out early * or if we 'cannot' reduce the speed as the user might want * freq_step to be zero */ diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index f239545..0482bd4 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -296,7 +296,6 @@ static struct attribute_group dbs_attr_group = { static void dbs_check_cpu(int cpu) { unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; - unsigned int total_idle_ticks; unsigned int freq_down_step; unsigned int freq_down_sampling_rate; static int down_skip[NR_CPUS]; @@ -325,20 +324,12 @@ static void dbs_check_cpu(int cpu) */ /* Check for frequency increase */ - total_idle_ticks = get_cpu_idle_time(cpu); - idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_up; - this_dbs_info->prev_cpu_idle_up = total_idle_ticks; - + idle_ticks = UINT_MAX; for_each_cpu_mask(j, policy->cpus) { - unsigned int tmp_idle_ticks; + unsigned int tmp_idle_ticks, total_idle_ticks; struct cpu_dbs_info_s *j_dbs_info; - if (j == cpu) - continue; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - /* Check for frequency increase */ total_idle_ticks = get_cpu_idle_time(j); tmp_idle_ticks = total_idle_ticks - j_dbs_info->prev_cpu_idle_up; @@ -376,18 +367,11 @@ static void dbs_check_cpu(int cpu) if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) return; - total_idle_ticks = this_dbs_info->prev_cpu_idle_up; - idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_down; - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; - + idle_ticks = UINT_MAX; for_each_cpu_mask(j, policy->cpus) { - unsigned int tmp_idle_ticks; + unsigned int tmp_idle_ticks, total_idle_ticks; struct cpu_dbs_info_s *j_dbs_info; - if (j == cpu) - continue; - j_dbs_info = &per_cpu(cpu_dbs_info, j); /* Check for frequency decrease */ total_idle_ticks = j_dbs_info->prev_cpu_idle_up; @@ -408,7 +392,7 @@ static void dbs_check_cpu(int cpu) down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * usecs_to_jiffies(freq_down_sampling_rate); - if (idle_ticks > down_idle_ticks ) { + if (idle_ticks > down_idle_ticks) { /* if we are already at the lowest speed then break out early * or if we 'cannot' reduce the speed as the user might want * freq_step to be zero */ |