diff options
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 74 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 6 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 18 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_stats.c | 54 |
4 files changed, 116 insertions, 36 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 109d62c..815902c 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -4,6 +4,9 @@ * Copyright (C) 2001 Russell King * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> * + * Oct 2005 - Ashok Raj <ashok.raj@intel.com> + * Added handling for CPU hotplug + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -35,14 +38,6 @@ static struct cpufreq_driver *cpufreq_driver; static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; static DEFINE_SPINLOCK(cpufreq_driver_lock); - -/* we keep a copy of all ->add'ed CPU's struct sys_device here; - * as it is only accessed in ->add and ->remove, no lock or reference - * count is necessary. - */ -static struct sys_device *cpu_sys_devices[NR_CPUS]; - - /* internal prototypes */ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); static void handle_update(void *data); @@ -574,6 +569,9 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) unsigned long flags; unsigned int j; + if (cpu_is_offline(cpu)) + return 0; + cpufreq_debug_disable_ratelimit(); dprintk("adding CPU %u\n", cpu); @@ -582,7 +580,6 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) * CPU because it is in the same boat. */ policy = cpufreq_cpu_get(cpu); if (unlikely(policy)) { - cpu_sys_devices[cpu] = sys_dev; dprintk("CPU already managed, adding link\n"); sysfs_create_link(&sys_dev->kobj, &policy->kobj, "cpufreq"); cpufreq_debug_enable_ratelimit(); @@ -595,12 +592,11 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) goto module_out; } - policy = kmalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); + policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); if (!policy) { ret = -ENOMEM; goto nomem_out; } - memset(policy, 0, sizeof(struct cpufreq_policy)); policy->cpu = cpu; policy->cpus = cpumask_of_cpu(cpu); @@ -657,7 +653,6 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) } module_put(cpufreq_driver->owner); - cpu_sys_devices[cpu] = sys_dev; dprintk("initialization complete\n"); cpufreq_debug_enable_ratelimit(); @@ -682,7 +677,7 @@ err_out: nomem_out: module_put(cpufreq_driver->owner); - module_out: +module_out: cpufreq_debug_enable_ratelimit(); return ret; } @@ -699,6 +694,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) unsigned long flags; struct cpufreq_policy *data; #ifdef CONFIG_SMP + struct sys_device *cpu_sys_dev; unsigned int j; #endif @@ -710,7 +706,6 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) if (!data) { spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - cpu_sys_devices[cpu] = NULL; cpufreq_debug_enable_ratelimit(); return -EINVAL; } @@ -725,14 +720,12 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) dprintk("removing link\n"); spin_unlock_irqrestore(&cpufreq_driver_lock, flags); sysfs_remove_link(&sys_dev->kobj, "cpufreq"); - cpu_sys_devices[cpu] = NULL; cpufreq_cpu_put(data); cpufreq_debug_enable_ratelimit(); return 0; } #endif - cpu_sys_devices[cpu] = NULL; if (!kobject_get(&data->kobj)) { spin_unlock_irqrestore(&cpufreq_driver_lock, flags); @@ -761,7 +754,8 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) if (j == cpu) continue; dprintk("removing link for cpu %u\n", j); - sysfs_remove_link(&cpu_sys_devices[j]->kobj, "cpufreq"); + cpu_sys_dev = get_cpu_sysdev(j); + sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq"); cpufreq_cpu_put(data); } } @@ -772,7 +766,6 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) down(&data->lock); if (cpufreq_driver->target) __cpufreq_governor(data, CPUFREQ_GOV_STOP); - cpufreq_driver->target = NULL; up(&data->lock); kobject_unregister(&data->kobj); @@ -1119,17 +1112,19 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int relation) { int retval = -EINVAL; + lock_cpu_hotplug(); dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, target_freq, relation); if (cpu_online(policy->cpu) && cpufreq_driver->target) retval = cpufreq_driver->target(policy, target_freq, relation); + unlock_cpu_hotplug(); + return retval; } EXPORT_SYMBOL_GPL(__cpufreq_driver_target); - int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) @@ -1416,6 +1411,45 @@ int cpufreq_update_policy(unsigned int cpu) } EXPORT_SYMBOL(cpufreq_update_policy); +static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + struct cpufreq_policy *policy; + struct sys_device *sys_dev; + + sys_dev = get_cpu_sysdev(cpu); + + if (sys_dev) { + switch (action) { + case CPU_ONLINE: + cpufreq_add_dev(sys_dev); + break; + case CPU_DOWN_PREPARE: + /* + * We attempt to put this cpu in lowest frequency + * possible before going down. This will permit + * hardware-managed P-State to switch other related + * threads to min or higher speeds if possible. + */ + policy = cpufreq_cpu_data[cpu]; + if (policy) { + cpufreq_driver_target(policy, policy->min, + CPUFREQ_RELATION_H); + } + break; + case CPU_DEAD: + cpufreq_remove_dev(sys_dev); + break; + } + } + return NOTIFY_OK; +} + +static struct notifier_block cpufreq_cpu_notifier = +{ + .notifier_call = cpufreq_cpu_callback, +}; /********************************************************************* * REGISTER / UNREGISTER CPUFREQ DRIVER * @@ -1476,6 +1510,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) } if (!ret) { + register_cpu_notifier(&cpufreq_cpu_notifier); dprintk("driver %s up and running\n", driver_data->name); cpufreq_debug_enable_ratelimit(); } @@ -1507,6 +1542,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) dprintk("unregistering driver %s\n", driver->name); sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); + unregister_cpu_notifier(&cpufreq_cpu_notifier); spin_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index e1df376..2ed5c43 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -315,9 +315,9 @@ static void dbs_check_cpu(int cpu) policy = this_dbs_info->cur_policy; if ( init_flag == 0 ) { - for ( /* NULL */; init_flag < NR_CPUS; init_flag++ ) { - dbs_info = &per_cpu(cpu_dbs_info, init_flag); - requested_freq[cpu] = dbs_info->cur_policy->cur; + for_each_online_cpu(j) { + dbs_info = &per_cpu(cpu_dbs_info, j); + requested_freq[j] = dbs_info->cur_policy->cur; } init_flag = 1; } diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index c1fc9c6..1774111 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -48,7 +48,10 @@ * All times here are in uS. */ static unsigned int def_sampling_rate; -#define MIN_SAMPLING_RATE (def_sampling_rate / 2) +#define MIN_SAMPLING_RATE_RATIO (2) +/* for correct statistics, we need at least 10 ticks between each measure */ +#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) +#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) #define MAX_SAMPLING_RATE (500 * def_sampling_rate) #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) #define DEF_SAMPLING_DOWN_FACTOR (1) @@ -416,13 +419,16 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (dbs_enable == 1) { unsigned int latency; /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; - latency = policy->cpuinfo.transition_latency; - if (latency < 1000) - latency = 1000; - - def_sampling_rate = (latency / 1000) * + def_sampling_rate = latency * DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; + + if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) + def_sampling_rate = MIN_STAT_SAMPLING_RATE; + dbs_tuners_ins.sampling_rate = def_sampling_rate; dbs_tuners_ins.ignore_nice = 0; diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 741b6b1..0bddb8e 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -19,6 +19,7 @@ #include <linux/percpu.h> #include <linux/kobject.h> #include <linux/spinlock.h> +#include <linux/notifier.h> #include <asm/cputime.h> static spinlock_t cpufreq_stats_lock; @@ -192,11 +193,15 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy, unsigned int cpu = policy->cpu; if (cpufreq_stats_table[cpu]) return -EBUSY; - if ((stat = kmalloc(sizeof(struct cpufreq_stats), GFP_KERNEL)) == NULL) + if ((stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL)) == NULL) return -ENOMEM; - memset(stat, 0, sizeof (struct cpufreq_stats)); data = cpufreq_cpu_get(cpu); + if (data == NULL) { + ret = -EINVAL; + goto error_get_fail; + } + if ((ret = sysfs_create_group(&data->kobj, &stats_attr_group))) goto error_out; @@ -216,12 +221,11 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy, alloc_size += count * count * sizeof(int); #endif stat->max_state = count; - stat->time_in_state = kmalloc(alloc_size, GFP_KERNEL); + stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL); if (!stat->time_in_state) { ret = -ENOMEM; goto error_out; } - memset(stat->time_in_state, 0, alloc_size); stat->freq_table = (unsigned int *)(stat->time_in_state + count); #ifdef CONFIG_CPU_FREQ_STAT_DETAILS @@ -244,6 +248,7 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy, return 0; error_out: cpufreq_cpu_put(data); +error_get_fail: kfree(stat); cpufreq_stats_table[cpu] = NULL; return ret; @@ -298,6 +303,27 @@ cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val, return 0; } +static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + + switch (action) { + case CPU_ONLINE: + cpufreq_update_policy(cpu); + break; + case CPU_DEAD: + cpufreq_stats_free_table(cpu); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block cpufreq_stat_cpu_notifier = +{ + .notifier_call = cpufreq_stat_cpu_callback, +}; + static struct notifier_block notifier_policy_block = { .notifier_call = cpufreq_stat_notifier_policy }; @@ -311,6 +337,7 @@ __init cpufreq_stats_init(void) { int ret; unsigned int cpu; + spin_lock_init(&cpufreq_stats_lock); if ((ret = cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER))) @@ -323,20 +350,31 @@ __init cpufreq_stats_init(void) return ret; } - for_each_cpu(cpu) - cpufreq_update_policy(cpu); + register_cpu_notifier(&cpufreq_stat_cpu_notifier); + lock_cpu_hotplug(); + for_each_online_cpu(cpu) { + cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE, + (void *)(long)cpu); + } + unlock_cpu_hotplug(); return 0; } static void __exit cpufreq_stats_exit(void) { unsigned int cpu; + cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); cpufreq_unregister_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); - for_each_cpu(cpu) - cpufreq_stats_free_table(cpu); + unregister_cpu_notifier(&cpufreq_stat_cpu_notifier); + lock_cpu_hotplug(); + for_each_online_cpu(cpu) { + cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD, + (void *)(long)cpu); + } + unlock_cpu_hotplug(); } MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>"); |