diff options
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 180 |
1 files changed, 91 insertions, 89 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 8d83a21..c1fc9c6 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -34,13 +34,9 @@ */ #define DEF_FREQUENCY_UP_THRESHOLD (80) -#define MIN_FREQUENCY_UP_THRESHOLD (0) +#define MIN_FREQUENCY_UP_THRESHOLD (11) #define MAX_FREQUENCY_UP_THRESHOLD (100) -#define DEF_FREQUENCY_DOWN_THRESHOLD (20) -#define MIN_FREQUENCY_DOWN_THRESHOLD (0) -#define MAX_FREQUENCY_DOWN_THRESHOLD (100) - /* * The polling frequency of this governor depends on the capability of * the processor. Default polling frequency is 1000 times the transition @@ -55,9 +51,9 @@ static unsigned int def_sampling_rate; #define MIN_SAMPLING_RATE (def_sampling_rate / 2) #define MAX_SAMPLING_RATE (500 * def_sampling_rate) #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) -#define DEF_SAMPLING_DOWN_FACTOR (10) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (10) #define TRANSITION_LATENCY_LIMIT (10 * 1000) -#define sampling_rate_in_HZ(x) (((x * HZ) < (1000 * 1000))?1:((x * HZ) / (1000 * 1000))) static void do_dbs_timer(void *data); @@ -78,15 +74,23 @@ struct dbs_tuners { unsigned int sampling_rate; unsigned int sampling_down_factor; unsigned int up_threshold; - unsigned int down_threshold; + unsigned int ignore_nice; }; static struct dbs_tuners dbs_tuners_ins = { .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, }; +static inline unsigned int get_cpu_idle_time(unsigned int cpu) +{ + return kstat_cpu(cpu).cpustat.idle + + kstat_cpu(cpu).cpustat.iowait + + ( !dbs_tuners_ins.ignore_nice ? + kstat_cpu(cpu).cpustat.nice : + 0); +} + /************************** sysfs interface ************************/ static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) { @@ -115,7 +119,7 @@ static ssize_t show_##file_name \ show_one(sampling_rate, sampling_rate); show_one(sampling_down_factor, sampling_down_factor); show_one(up_threshold, up_threshold); -show_one(down_threshold, down_threshold); +show_one(ignore_nice, ignore_nice); static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, const char *buf, size_t count) @@ -126,6 +130,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, if (ret != 1 ) return -EINVAL; + if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + down(&dbs_sem); dbs_tuners_ins.sampling_down_factor = input; up(&dbs_sem); @@ -161,8 +168,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, down(&dbs_sem); if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || - input < MIN_FREQUENCY_UP_THRESHOLD || - input <= dbs_tuners_ins.down_threshold) { + input < MIN_FREQUENCY_UP_THRESHOLD) { up(&dbs_sem); return -EINVAL; } @@ -173,22 +179,35 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, return count; } -static ssize_t store_down_threshold(struct cpufreq_policy *unused, +static ssize_t store_ignore_nice(struct cpufreq_policy *policy, const char *buf, size_t count) { unsigned int input; int ret; + + unsigned int j; + ret = sscanf (buf, "%u", &input); + if ( ret != 1 ) + return -EINVAL; + if ( input > 1 ) + input = 1; + down(&dbs_sem); - if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || - input < MIN_FREQUENCY_DOWN_THRESHOLD || - input >= dbs_tuners_ins.up_threshold) { + if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ up(&dbs_sem); - return -EINVAL; + return count; } + dbs_tuners_ins.ignore_nice = input; - dbs_tuners_ins.down_threshold = input; + /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); + j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; + } up(&dbs_sem); return count; @@ -201,7 +220,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name) define_one_rw(sampling_rate); define_one_rw(sampling_down_factor); define_one_rw(up_threshold); -define_one_rw(down_threshold); +define_one_rw(ignore_nice); static struct attribute * dbs_attributes[] = { &sampling_rate_max.attr, @@ -209,7 +228,7 @@ static struct attribute * dbs_attributes[] = { &sampling_rate.attr, &sampling_down_factor.attr, &up_threshold.attr, - &down_threshold.attr, + &ignore_nice.attr, NULL }; @@ -222,9 +241,8 @@ static struct attribute_group dbs_attr_group = { static void dbs_check_cpu(int cpu) { - unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; - unsigned int total_idle_ticks; - unsigned int freq_down_step; + unsigned int idle_ticks, up_idle_ticks, total_ticks; + unsigned int freq_next; unsigned int freq_down_sampling_rate; static int down_skip[NR_CPUS]; struct cpu_dbs_info_s *this_dbs_info; @@ -238,38 +256,25 @@ static void dbs_check_cpu(int cpu) policy = this_dbs_info->cur_policy; /* - * The default safe range is 20% to 80% - * Every sampling_rate, we check - * - If current idle time is less than 20%, then we try to - * increase frequency - * Every sampling_rate*sampling_down_factor, we check - * - If current idle time is more than 80%, then we try to - * decrease frequency + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate*sampling_down_factor, we look for a the lowest + * frequency which can sustain the load while keeping idle time over + * 30%. If such a frequency exist, we try to decrease to this frequency. * * Any frequency increase takes it to the maximum frequency. * Frequency reduction happens at minimum steps of - * 5% of max_frequency + * 5% (default) of current frequency */ /* Check for frequency increase */ - total_idle_ticks = kstat_cpu(cpu).cpustat.idle + - kstat_cpu(cpu).cpustat.iowait; - idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_up; - this_dbs_info->prev_cpu_idle_up = total_idle_ticks; - - + idle_ticks = UINT_MAX; for_each_cpu_mask(j, policy->cpus) { - unsigned int tmp_idle_ticks; + unsigned int tmp_idle_ticks, total_idle_ticks; struct cpu_dbs_info_s *j_dbs_info; - if (j == cpu) - continue; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - /* Check for frequency increase */ - total_idle_ticks = kstat_cpu(j).cpustat.idle + - kstat_cpu(j).cpustat.iowait; + total_idle_ticks = get_cpu_idle_time(j); tmp_idle_ticks = total_idle_ticks - j_dbs_info->prev_cpu_idle_up; j_dbs_info->prev_cpu_idle_up = total_idle_ticks; @@ -281,13 +286,23 @@ static void dbs_check_cpu(int cpu) /* Scale idle ticks by 100 and compare with up and down ticks */ idle_ticks *= 100; up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * - sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate); + usecs_to_jiffies(dbs_tuners_ins.sampling_rate); if (idle_ticks < up_idle_ticks) { + down_skip[cpu] = 0; + for_each_cpu_mask(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + + j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info->prev_cpu_idle_down = + j_dbs_info->prev_cpu_idle_up; + } + /* if we are already at full speed then break out early */ + if (policy->cur == policy->max) + return; + __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); - down_skip[cpu] = 0; - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; return; } @@ -296,23 +311,14 @@ static void dbs_check_cpu(int cpu) if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) return; - total_idle_ticks = kstat_cpu(cpu).cpustat.idle + - kstat_cpu(cpu).cpustat.iowait; - idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_down; - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; - + idle_ticks = UINT_MAX; for_each_cpu_mask(j, policy->cpus) { - unsigned int tmp_idle_ticks; + unsigned int tmp_idle_ticks, total_idle_ticks; struct cpu_dbs_info_s *j_dbs_info; - if (j == cpu) - continue; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - /* Check for frequency increase */ - total_idle_ticks = kstat_cpu(j).cpustat.idle + - kstat_cpu(j).cpustat.iowait; + /* Check for frequency decrease */ + total_idle_ticks = j_dbs_info->prev_cpu_idle_up; tmp_idle_ticks = total_idle_ticks - j_dbs_info->prev_cpu_idle_down; j_dbs_info->prev_cpu_idle_down = total_idle_ticks; @@ -321,38 +327,37 @@ static void dbs_check_cpu(int cpu) idle_ticks = tmp_idle_ticks; } - /* Scale idle ticks by 100 and compare with up and down ticks */ - idle_ticks *= 100; down_skip[cpu] = 0; + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) + return; + /* Compute how many ticks there are between two measurements */ freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * dbs_tuners_ins.sampling_down_factor; - down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * - sampling_rate_in_HZ(freq_down_sampling_rate); + total_ticks = usecs_to_jiffies(freq_down_sampling_rate); - if (idle_ticks > down_idle_ticks ) { - freq_down_step = (5 * policy->max) / 100; - - /* max freq cannot be less than 100. But who knows.... */ - if (unlikely(freq_down_step == 0)) - freq_down_step = 5; + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks; + freq_next = (freq_next * policy->cur) / + (dbs_tuners_ins.up_threshold - 10); - __cpufreq_driver_target(policy, - policy->cur - freq_down_step, - CPUFREQ_RELATION_H); - return; - } + if (freq_next <= ((policy->cur * 95) / 100)) + __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); } static void do_dbs_timer(void *data) { int i; down(&dbs_sem); - for (i = 0; i < NR_CPUS; i++) - if (cpu_online(i)) - dbs_check_cpu(i); + for_each_online_cpu(i) + dbs_check_cpu(i); schedule_delayed_work(&dbs_work, - sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate)); + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); up(&dbs_sem); } @@ -360,7 +365,7 @@ static inline void dbs_timer_init(void) { INIT_WORK(&dbs_work, do_dbs_timer, NULL); schedule_delayed_work(&dbs_work, - sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate)); + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); return; } @@ -397,12 +402,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info->cur_policy = policy; - j_dbs_info->prev_cpu_idle_up = - kstat_cpu(j).cpustat.idle + - kstat_cpu(j).cpustat.iowait; - j_dbs_info->prev_cpu_idle_down = - kstat_cpu(j).cpustat.idle + - kstat_cpu(j).cpustat.iowait; + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); + j_dbs_info->prev_cpu_idle_down + = j_dbs_info->prev_cpu_idle_up; } this_dbs_info->enable = 1; sysfs_create_group(&policy->kobj, &dbs_attr_group); @@ -422,6 +424,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, def_sampling_rate = (latency / 1000) * DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; dbs_tuners_ins.sampling_rate = def_sampling_rate; + dbs_tuners_ins.ignore_nice = 0; dbs_timer_init(); } @@ -461,12 +464,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, return 0; } -struct cpufreq_governor cpufreq_gov_dbs = { +static struct cpufreq_governor cpufreq_gov_dbs = { .name = "ondemand", .governor = cpufreq_governor_dbs, .owner = THIS_MODULE, }; -EXPORT_SYMBOL(cpufreq_gov_dbs); static int __init cpufreq_gov_dbs_init(void) { |