From c120069779e3e35917c15393cf2847fa79811eb6 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Mon, 5 Feb 2007 16:12:43 -0800 Subject: [CPUFREQ] Remove hotplug cpu crap The hotplug CPU locking in cpufreq is horrendous. No-one seems to care enough to fix it, so just remove it so that the 99.9% of the real world users of this code can use cpufreq without being bothered by warnings. Signed-off-by: Andrew Morton Signed-off-by: Dave Jones --- drivers/cpufreq/cpufreq.c | 16 ---------------- drivers/cpufreq/cpufreq_conservative.c | 2 -- drivers/cpufreq/cpufreq_ondemand.c | 2 -- drivers/cpufreq/cpufreq_stats.c | 2 -- drivers/cpufreq/cpufreq_userspace.c | 2 -- 5 files changed, 24 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index a45cc89..9bdcdbd 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -415,12 +415,10 @@ static ssize_t store_##file_name \ if (ret != 1) \ return -EINVAL; \ \ - lock_cpu_hotplug(); \ mutex_lock(&policy->lock); \ ret = __cpufreq_set_policy(policy, &new_policy); \ policy->user_policy.object = policy->object; \ mutex_unlock(&policy->lock); \ - unlock_cpu_hotplug(); \ \ return ret ? ret : count; \ } @@ -479,8 +477,6 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy, &new_policy.governor)) return -EINVAL; - lock_cpu_hotplug(); - /* Do not use cpufreq_set_policy here or the user_policy.max will be wrongly overridden */ mutex_lock(&policy->lock); @@ -490,8 +486,6 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy, policy->user_policy.governor = policy->governor; mutex_unlock(&policy->lock); - unlock_cpu_hotplug(); - if (ret) return ret; else @@ -1278,7 +1272,6 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); *********************************************************************/ -/* Must be called with lock_cpu_hotplug held */ int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) @@ -1304,13 +1297,11 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, if (!policy) return -EINVAL; - lock_cpu_hotplug(); mutex_lock(&policy->lock); ret = __cpufreq_driver_target(policy, target_freq, relation); mutex_unlock(&policy->lock); - unlock_cpu_hotplug(); cpufreq_cpu_put(policy); return ret; @@ -1338,7 +1329,6 @@ int cpufreq_driver_getavg(struct cpufreq_policy *policy) EXPORT_SYMBOL_GPL(cpufreq_driver_getavg); /* - * Locking: Must be called with the lock_cpu_hotplug() lock held * when "event" is CPUFREQ_GOV_LIMITS */ @@ -1433,7 +1423,6 @@ EXPORT_SYMBOL(cpufreq_get_policy); /* * data : current policy. * policy : policy to be set. - * Locking: Must be called with the lock_cpu_hotplug() lock held */ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) @@ -1539,8 +1528,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) if (!data) return -EINVAL; - lock_cpu_hotplug(); - /* lock this CPU */ mutex_lock(&data->lock); @@ -1552,7 +1539,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) mutex_unlock(&data->lock); - unlock_cpu_hotplug(); cpufreq_cpu_put(data); return ret; @@ -1576,7 +1562,6 @@ int cpufreq_update_policy(unsigned int cpu) if (!data) return -ENODEV; - lock_cpu_hotplug(); mutex_lock(&data->lock); dprintk("updating policy for CPU %u\n", cpu); @@ -1603,7 +1588,6 @@ int cpufreq_update_policy(unsigned int cpu) ret = __cpufreq_set_policy(data, &policy); mutex_unlock(&data->lock); - unlock_cpu_hotplug(); cpufreq_cpu_put(data); return ret; } diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index eef0270..787e841 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -430,14 +430,12 @@ static void dbs_check_cpu(int cpu) static void do_dbs_timer(struct work_struct *work) { int i; - lock_cpu_hotplug(); mutex_lock(&dbs_mutex); for_each_online_cpu(i) dbs_check_cpu(i); schedule_delayed_work(&dbs_work, usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); mutex_unlock(&dbs_mutex); - unlock_cpu_hotplug(); } static inline void dbs_timer_init(void) diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index f697449..d52f9b4 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -440,9 +440,7 @@ static void do_dbs_timer(struct work_struct *work) dbs_info->sample_type = DBS_NORMAL_SAMPLE; if (!dbs_tuners_ins.powersave_bias || sample_type == DBS_NORMAL_SAMPLE) { - lock_cpu_hotplug(); dbs_check_cpu(dbs_info); - unlock_cpu_hotplug(); if (dbs_info->freq_lo) { /* Setup timer for SUB_SAMPLE */ dbs_info->sample_type = DBS_SUB_SAMPLE; diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 91ad342..d1c7cac 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -370,12 +370,10 @@ __exit cpufreq_stats_exit(void) cpufreq_unregister_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); - lock_cpu_hotplug(); for_each_online_cpu(cpu) { cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD, (void *)(long)cpu); } - unlock_cpu_hotplug(); } MODULE_AUTHOR ("Zou Nan hai "); diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 2a4eb0b..860345c 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -71,7 +71,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy) dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); - lock_cpu_hotplug(); mutex_lock(&userspace_mutex); if (!cpu_is_managed[policy->cpu]) goto err; @@ -94,7 +93,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy) err: mutex_unlock(&userspace_mutex); - unlock_cpu_hotplug(); return ret; } -- cgit v1.1 From 5a01f2e8f3ac134e24144d74bb48a60236f7024d Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Mon, 5 Feb 2007 16:12:44 -0800 Subject: [CPUFREQ] Rewrite lock in cpufreq to eliminate cpufreq/hotplug related issues Yet another attempt to resolve cpufreq and hotplug locking issues. Patchset has 3 patches: * Rewrite the lock infrastructure of cpufreq using a per cpu rwsem. * Minor restructuring of work callback in ondemand driver. * Use the new cpufreq rwsem infrastructure in ondemand work. This patch: Convert policy->lock to rwsem and move it to per_cpu area. This rwsem will protect against both changing/accessing policy related parameters and CPU hot plug/unplug. [malattia@linux.it: fix oops in kref_put()] Cc: Gautham R Shenoy Signed-off-by: Venkatesh Pallipadi Cc: Gautham R Shenoy Signed-off-by: Mattia Dongili Signed-off-by: Andrew Morton Signed-off-by: Dave Jones --- drivers/cpufreq/cpufreq.c | 244 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 181 insertions(+), 63 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 9bdcdbd..f52facc 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -41,8 +41,67 @@ static struct cpufreq_driver *cpufreq_driver; static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; static DEFINE_SPINLOCK(cpufreq_driver_lock); +/* + * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure + * all cpufreq/hotplug/workqueue/etc related lock issues. + * + * The rules for this semaphore: + * - Any routine that wants to read from the policy structure will + * do a down_read on this semaphore. + * - Any routine that will write to the policy structure and/or may take away + * the policy altogether (eg. CPU hotplug), will hold this lock in write + * mode before doing so. + * + * Additional rules: + * - All holders of the lock should check to make sure that the CPU they + * are concerned with are online after they get the lock. + * - Governor routines that can be called in cpufreq hotplug path should not + * take this sem as top level hotplug notifier handler takes this. + */ +static DEFINE_PER_CPU(int, policy_cpu); +static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); + +#define lock_policy_rwsem(mode, cpu) \ +int lock_policy_rwsem_##mode \ +(int cpu) \ +{ \ + int policy_cpu = per_cpu(policy_cpu, cpu); \ + BUG_ON(policy_cpu == -1); \ + down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ + if (unlikely(!cpu_online(cpu))) { \ + up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ + return -1; \ + } \ + \ + return 0; \ +} + +lock_policy_rwsem(read, cpu); +EXPORT_SYMBOL_GPL(lock_policy_rwsem_read); + +lock_policy_rwsem(write, cpu); +EXPORT_SYMBOL_GPL(lock_policy_rwsem_write); + +void unlock_policy_rwsem_read(int cpu) +{ + int policy_cpu = per_cpu(policy_cpu, cpu); + BUG_ON(policy_cpu == -1); + up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); +} +EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read); + +void unlock_policy_rwsem_write(int cpu) +{ + int policy_cpu = per_cpu(policy_cpu, cpu); + BUG_ON(policy_cpu == -1); + up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); +} +EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write); + + /* internal prototypes */ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); +static unsigned int __cpufreq_get(unsigned int cpu); static void handle_update(struct work_struct *work); /** @@ -415,10 +474,8 @@ static ssize_t store_##file_name \ if (ret != 1) \ return -EINVAL; \ \ - mutex_lock(&policy->lock); \ ret = __cpufreq_set_policy(policy, &new_policy); \ policy->user_policy.object = policy->object; \ - mutex_unlock(&policy->lock); \ \ return ret ? ret : count; \ } @@ -432,7 +489,7 @@ store_one(scaling_max_freq,max); static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf) { - unsigned int cur_freq = cpufreq_get(policy->cpu); + unsigned int cur_freq = __cpufreq_get(policy->cpu); if (!cur_freq) return sprintf(buf, ""); return sprintf(buf, "%u\n", cur_freq); @@ -479,12 +536,10 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy, /* Do not use cpufreq_set_policy here or the user_policy.max will be wrongly overridden */ - mutex_lock(&policy->lock); ret = __cpufreq_set_policy(policy, &new_policy); policy->user_policy.policy = policy->policy; policy->user_policy.governor = policy->governor; - mutex_unlock(&policy->lock); if (ret) return ret; @@ -589,11 +644,17 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) policy = cpufreq_cpu_get(policy->cpu); if (!policy) return -EINVAL; + + if (lock_policy_rwsem_read(policy->cpu) < 0) + return -EINVAL; + if (fattr->show) ret = fattr->show(policy, buf); else ret = -EIO; + unlock_policy_rwsem_read(policy->cpu); + cpufreq_cpu_put(policy); return ret; } @@ -607,11 +668,17 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr, policy = cpufreq_cpu_get(policy->cpu); if (!policy) return -EINVAL; + + if (lock_policy_rwsem_write(policy->cpu) < 0) + return -EINVAL; + if (fattr->store) ret = fattr->store(policy, buf, count); else ret = -EIO; + unlock_policy_rwsem_write(policy->cpu); + cpufreq_cpu_put(policy); return ret; } @@ -685,8 +752,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) policy->cpu = cpu; policy->cpus = cpumask_of_cpu(cpu); - mutex_init(&policy->lock); - mutex_lock(&policy->lock); + /* Initially set CPU itself as the policy_cpu */ + per_cpu(policy_cpu, cpu) = cpu; + lock_policy_rwsem_write(cpu); + init_completion(&policy->kobj_unregister); INIT_WORK(&policy->update, handle_update); @@ -696,7 +765,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) ret = cpufreq_driver->init(policy); if (ret) { dprintk("initialization failed\n"); - mutex_unlock(&policy->lock); + unlock_policy_rwsem_write(cpu); goto err_out; } @@ -710,6 +779,14 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) */ managed_policy = cpufreq_cpu_get(j); if (unlikely(managed_policy)) { + + /* Set proper policy_cpu */ + unlock_policy_rwsem_write(cpu); + per_cpu(policy_cpu, cpu) = managed_policy->cpu; + + if (lock_policy_rwsem_write(cpu) < 0) + goto err_out_driver_exit; + spin_lock_irqsave(&cpufreq_driver_lock, flags); managed_policy->cpus = policy->cpus; cpufreq_cpu_data[cpu] = managed_policy; @@ -720,13 +797,13 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) &managed_policy->kobj, "cpufreq"); if (ret) { - mutex_unlock(&policy->lock); + unlock_policy_rwsem_write(cpu); goto err_out_driver_exit; } cpufreq_debug_enable_ratelimit(); - mutex_unlock(&policy->lock); ret = 0; + unlock_policy_rwsem_write(cpu); goto err_out_driver_exit; /* call driver->exit() */ } } @@ -740,7 +817,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) ret = kobject_register(&policy->kobj); if (ret) { - mutex_unlock(&policy->lock); + unlock_policy_rwsem_write(cpu); goto err_out_driver_exit; } /* set up files for this cpu device */ @@ -755,8 +832,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); spin_lock_irqsave(&cpufreq_driver_lock, flags); - for_each_cpu_mask(j, policy->cpus) + for_each_cpu_mask(j, policy->cpus) { cpufreq_cpu_data[j] = policy; + per_cpu(policy_cpu, j) = policy->cpu; + } spin_unlock_irqrestore(&cpufreq_driver_lock, flags); /* symlink affected CPUs */ @@ -772,14 +851,14 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, "cpufreq"); if (ret) { - mutex_unlock(&policy->lock); + unlock_policy_rwsem_write(cpu); goto err_out_unregister; } } policy->governor = NULL; /* to assure that the starting sequence is * run in cpufreq_set_policy */ - mutex_unlock(&policy->lock); + unlock_policy_rwsem_write(cpu); /* set default policy */ ret = cpufreq_set_policy(&new_policy); @@ -820,11 +899,13 @@ module_out: /** - * cpufreq_remove_dev - remove a CPU device + * __cpufreq_remove_dev - remove a CPU device * * Removes the cpufreq interface for a CPU device. + * Caller should already have policy_rwsem in write mode for this CPU. + * This routine frees the rwsem before returning. */ -static int cpufreq_remove_dev (struct sys_device * sys_dev) +static int __cpufreq_remove_dev (struct sys_device * sys_dev) { unsigned int cpu = sys_dev->id; unsigned long flags; @@ -843,6 +924,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) if (!data) { spin_unlock_irqrestore(&cpufreq_driver_lock, flags); cpufreq_debug_enable_ratelimit(); + unlock_policy_rwsem_write(cpu); return -EINVAL; } cpufreq_cpu_data[cpu] = NULL; @@ -859,6 +941,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) sysfs_remove_link(&sys_dev->kobj, "cpufreq"); cpufreq_cpu_put(data); cpufreq_debug_enable_ratelimit(); + unlock_policy_rwsem_write(cpu); return 0; } #endif @@ -867,6 +950,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) if (!kobject_get(&data->kobj)) { spin_unlock_irqrestore(&cpufreq_driver_lock, flags); cpufreq_debug_enable_ratelimit(); + unlock_policy_rwsem_write(cpu); return -EFAULT; } @@ -900,10 +984,10 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) spin_unlock_irqrestore(&cpufreq_driver_lock, flags); #endif - mutex_lock(&data->lock); if (cpufreq_driver->target) __cpufreq_governor(data, CPUFREQ_GOV_STOP); - mutex_unlock(&data->lock); + + unlock_policy_rwsem_write(cpu); kobject_unregister(&data->kobj); @@ -927,6 +1011,18 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) } +static int cpufreq_remove_dev (struct sys_device * sys_dev) +{ + unsigned int cpu = sys_dev->id; + int retval; + if (unlikely(lock_policy_rwsem_write(cpu))) + BUG(); + + retval = __cpufreq_remove_dev(sys_dev); + return retval; +} + + static void handle_update(struct work_struct *work) { struct cpufreq_policy *policy = @@ -974,9 +1070,12 @@ unsigned int cpufreq_quick_get(unsigned int cpu) unsigned int ret_freq = 0; if (policy) { - mutex_lock(&policy->lock); + if (unlikely(lock_policy_rwsem_read(cpu))) + return ret_freq; + ret_freq = policy->cur; - mutex_unlock(&policy->lock); + + unlock_policy_rwsem_read(cpu); cpufreq_cpu_put(policy); } @@ -985,24 +1084,13 @@ unsigned int cpufreq_quick_get(unsigned int cpu) EXPORT_SYMBOL(cpufreq_quick_get); -/** - * cpufreq_get - get the current CPU frequency (in kHz) - * @cpu: CPU number - * - * Get the CPU current (static) CPU frequency - */ -unsigned int cpufreq_get(unsigned int cpu) +static unsigned int __cpufreq_get(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + struct cpufreq_policy *policy = cpufreq_cpu_data[cpu]; unsigned int ret_freq = 0; - if (!policy) - return 0; - if (!cpufreq_driver->get) - goto out; - - mutex_lock(&policy->lock); + return (ret_freq); ret_freq = cpufreq_driver->get(cpu); @@ -1016,11 +1104,33 @@ unsigned int cpufreq_get(unsigned int cpu) } } - mutex_unlock(&policy->lock); + return (ret_freq); +} -out: - cpufreq_cpu_put(policy); +/** + * cpufreq_get - get the current CPU frequency (in kHz) + * @cpu: CPU number + * + * Get the CPU current (static) CPU frequency + */ +unsigned int cpufreq_get(unsigned int cpu) +{ + unsigned int ret_freq = 0; + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + + if (!policy) + goto out; + + if (unlikely(lock_policy_rwsem_read(cpu))) + goto out_policy; + + ret_freq = __cpufreq_get(cpu); + + unlock_policy_rwsem_read(cpu); +out_policy: + cpufreq_cpu_put(policy); +out: return (ret_freq); } EXPORT_SYMBOL(cpufreq_get); @@ -1297,18 +1407,19 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, if (!policy) return -EINVAL; - mutex_lock(&policy->lock); + if (unlikely(lock_policy_rwsem_write(policy->cpu))) + return -EINVAL; ret = __cpufreq_driver_target(policy, target_freq, relation); - mutex_unlock(&policy->lock); + unlock_policy_rwsem_write(policy->cpu); cpufreq_cpu_put(policy); return ret; } EXPORT_SYMBOL_GPL(cpufreq_driver_target); -int cpufreq_driver_getavg(struct cpufreq_policy *policy) +int __cpufreq_driver_getavg(struct cpufreq_policy *policy) { int ret = 0; @@ -1316,17 +1427,13 @@ int cpufreq_driver_getavg(struct cpufreq_policy *policy) if (!policy) return -EINVAL; - mutex_lock(&policy->lock); - if (cpu_online(policy->cpu) && cpufreq_driver->getavg) ret = cpufreq_driver->getavg(policy->cpu); - mutex_unlock(&policy->lock); - cpufreq_cpu_put(policy); return ret; } -EXPORT_SYMBOL_GPL(cpufreq_driver_getavg); +EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg); /* * when "event" is CPUFREQ_GOV_LIMITS @@ -1410,9 +1517,7 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) if (!cpu_policy) return -EINVAL; - mutex_lock(&cpu_policy->lock); memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); - mutex_unlock(&cpu_policy->lock); cpufreq_cpu_put(cpu_policy); return 0; @@ -1528,8 +1633,9 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) if (!data) return -EINVAL; - /* lock this CPU */ - mutex_lock(&data->lock); + if (unlikely(lock_policy_rwsem_write(policy->cpu))) + return -EINVAL; + ret = __cpufreq_set_policy(data, policy); data->user_policy.min = data->min; @@ -1537,7 +1643,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) data->user_policy.policy = data->policy; data->user_policy.governor = data->governor; - mutex_unlock(&data->lock); + unlock_policy_rwsem_write(policy->cpu); cpufreq_cpu_put(data); @@ -1562,7 +1668,8 @@ int cpufreq_update_policy(unsigned int cpu) if (!data) return -ENODEV; - mutex_lock(&data->lock); + if (unlikely(lock_policy_rwsem_write(cpu))) + return -EINVAL; dprintk("updating policy for CPU %u\n", cpu); memcpy(&policy, data, sizeof(struct cpufreq_policy)); @@ -1587,7 +1694,8 @@ int cpufreq_update_policy(unsigned int cpu) ret = __cpufreq_set_policy(data, &policy); - mutex_unlock(&data->lock); + unlock_policy_rwsem_write(cpu); + cpufreq_cpu_put(data); return ret; } @@ -1597,31 +1705,28 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct cpufreq_policy *policy; struct sys_device *sys_dev; + struct cpufreq_policy *policy; sys_dev = get_cpu_sysdev(cpu); - if (sys_dev) { switch (action) { case CPU_ONLINE: cpufreq_add_dev(sys_dev); break; case CPU_DOWN_PREPARE: - /* - * We attempt to put this cpu in lowest frequency - * possible before going down. This will permit - * hardware-managed P-State to switch other related - * threads to min or higher speeds if possible. - */ + if (unlikely(lock_policy_rwsem_write(cpu))) + BUG(); + policy = cpufreq_cpu_data[cpu]; if (policy) { - cpufreq_driver_target(policy, policy->min, + __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_H); } + __cpufreq_remove_dev(sys_dev); break; - case CPU_DEAD: - cpufreq_remove_dev(sys_dev); + case CPU_DOWN_FAILED: + cpufreq_add_dev(sys_dev); break; } } @@ -1735,3 +1840,16 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) return 0; } EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); + +static int __init cpufreq_core_init(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + per_cpu(policy_cpu, cpu) = -1; + init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); + } + return 0; +} + +core_initcall(cpufreq_core_init); -- cgit v1.1 From 529af7a14f04f92213bac371931a2b2b060c63fa Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Mon, 5 Feb 2007 16:12:44 -0800 Subject: [CPUFREQ] ondemand governor restructure the work callback Restructure the delayed_work callback in ondemand. This eliminates the need for smp_processor_id in the callback function and also helps in proper locking and avoiding flush_workqueue when stopping the governor (done in subsequent patch). Signed-off-by: Venkatesh Pallipadi Cc: Gautham R Shenoy Signed-off-by: Andrew Morton Signed-off-by: Dave Jones --- drivers/cpufreq/cpufreq_ondemand.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index d52f9b4..a480834 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -52,19 +52,20 @@ static unsigned int def_sampling_rate; static void do_dbs_timer(struct work_struct *work); /* Sampling types */ -enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; struct cpu_dbs_info_s { cputime64_t prev_cpu_idle; cputime64_t prev_cpu_wall; struct cpufreq_policy *cur_policy; struct delayed_work work; - enum dbs_sample sample_type; - unsigned int enable; struct cpufreq_frequency_table *freq_table; unsigned int freq_lo; unsigned int freq_lo_jiffies; unsigned int freq_hi_jiffies; + int cpu; + unsigned int enable:1, + sample_type:1; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); @@ -402,7 +403,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) if (load < (dbs_tuners_ins.up_threshold - 10)) { unsigned int freq_next, freq_cur; - freq_cur = cpufreq_driver_getavg(policy); + freq_cur = __cpufreq_driver_getavg(policy); if (!freq_cur) freq_cur = policy->cur; @@ -423,9 +424,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) static void do_dbs_timer(struct work_struct *work) { - unsigned int cpu = smp_processor_id(); - struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); - enum dbs_sample sample_type = dbs_info->sample_type; + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int sample_type = dbs_info->sample_type; + /* We want all CPUs to do sampling nearly on same jiffy */ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); @@ -454,17 +457,17 @@ static void do_dbs_timer(struct work_struct *work) queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); } -static inline void dbs_timer_init(unsigned int cpu) +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) { - struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); /* We want all CPUs to do sampling nearly on same jiffy */ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); delay -= jiffies % delay; ondemand_powersave_bias_init(); - INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); dbs_info->sample_type = DBS_NORMAL_SAMPLE; - queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); + INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); + queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, + delay); } static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) @@ -528,6 +531,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); j_dbs_info->prev_cpu_wall = get_jiffies_64(); } + this_dbs_info->cpu = cpu; this_dbs_info->enable = 1; /* * Start the timerschedule work, when this governor @@ -548,7 +552,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, dbs_tuners_ins.sampling_rate = def_sampling_rate; } - dbs_timer_init(policy->cpu); + dbs_timer_init(this_dbs_info); mutex_unlock(&dbs_mutex); break; -- cgit v1.1 From 56463b78cdca8e9ff8cc1759bca0c0777a061d6b Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Mon, 5 Feb 2007 16:12:45 -0800 Subject: [CPUFREQ] ondemand governor use new cpufreq rwsem locking in work callback Eliminate flush_workqueue in cpufreq_governor(STOP) callpath. Using flush there has a deadlock potential as in http://uwsg.iu.edu/hypermail/linux/kernel/0611.3/1223.html Also, cleanup the locking issues with do_dbs_timer delayed_work callback. As it changes the CPU frequency using __cpufreq_target, it needs to have policy_rwsem in write mode, which also protects it from hot plug. Signed-off-by: Venkatesh Pallipadi Cc: Gautham R Shenoy Signed-off-by: Andrew Morton Signed-off-by: Dave Jones --- drivers/cpufreq/cpufreq_ondemand.c | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index a480834..bcf8bb8 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -437,8 +437,14 @@ static void do_dbs_timer(struct work_struct *work) delay -= jiffies % delay; - if (!dbs_info->enable) + if (lock_policy_rwsem_write(cpu) < 0) return; + + if (!dbs_info->enable) { + unlock_policy_rwsem_write(cpu); + return; + } + /* Common NORMAL_SAMPLE setup */ dbs_info->sample_type = DBS_NORMAL_SAMPLE; if (!dbs_tuners_ins.powersave_bias || @@ -455,6 +461,7 @@ static void do_dbs_timer(struct work_struct *work) CPUFREQ_RELATION_H); } queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); + unlock_policy_rwsem_write(cpu); } static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) @@ -463,6 +470,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); delay -= jiffies % delay; + dbs_info->enable = 1; ondemand_powersave_bias_init(); dbs_info->sample_type = DBS_NORMAL_SAMPLE; INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); @@ -474,7 +482,6 @@ static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) { dbs_info->enable = 0; cancel_delayed_work(&dbs_info->work); - flush_workqueue(kondemand_wq); } static int cpufreq_governor_dbs(struct cpufreq_policy *policy, @@ -503,21 +510,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, mutex_lock(&dbs_mutex); dbs_enable++; - if (dbs_enable == 1) { - kondemand_wq = create_workqueue("kondemand"); - if (!kondemand_wq) { - printk(KERN_ERR - "Creation of kondemand failed\n"); - dbs_enable--; - mutex_unlock(&dbs_mutex); - return -ENOSPC; - } - } rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); if (rc) { - if (dbs_enable == 1) - destroy_workqueue(kondemand_wq); dbs_enable--; mutex_unlock(&dbs_mutex); return rc; @@ -532,7 +527,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info->prev_cpu_wall = get_jiffies_64(); } this_dbs_info->cpu = cpu; - this_dbs_info->enable = 1; /* * Start the timerschedule work, when this governor * is used for first time @@ -562,9 +556,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, dbs_timer_exit(this_dbs_info); sysfs_remove_group(&policy->kobj, &dbs_attr_group); dbs_enable--; - if (dbs_enable == 0) - destroy_workqueue(kondemand_wq); - mutex_unlock(&dbs_mutex); break; @@ -593,12 +584,18 @@ static struct cpufreq_governor cpufreq_gov_dbs = { static int __init cpufreq_gov_dbs_init(void) { + kondemand_wq = create_workqueue("kondemand"); + if (!kondemand_wq) { + printk(KERN_ERR "Creation of kondemand failed\n"); + return -EFAULT; + } return cpufreq_register_governor(&cpufreq_gov_dbs); } static void __exit cpufreq_gov_dbs_exit(void) { cpufreq_unregister_governor(&cpufreq_gov_dbs); + destroy_workqueue(kondemand_wq); } @@ -610,3 +607,4 @@ MODULE_LICENSE("GPL"); module_init(cpufreq_gov_dbs_init); module_exit(cpufreq_gov_dbs_exit); + -- cgit v1.1 From f0ec313a89a7377f440c815f82b0370bd67f62c6 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Mon, 5 Feb 2007 16:12:45 -0800 Subject: [CPUFREQ] CPU_FREQ_TABLE shouldn't be a def_tristate CPU_FREQ_TABLE enables helper code and gets select'ed when it's required. Building it as a module when it's not required doesn't seem to make much sense. Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Dave Jones --- drivers/cpufreq/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 491779a..d155e81 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -16,7 +16,7 @@ config CPU_FREQ if CPU_FREQ config CPU_FREQ_TABLE - def_tristate m + tristate config CPU_FREQ_DEBUG bool "Enable CPUfreq debugging" -- cgit v1.1 From c18a1483f478adbeb4cc7148db22c4a9c10aaee3 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 10 Feb 2007 20:03:51 -0500 Subject: [CPUFREQ] Whitespace fixup Signed-off-by: Dave Jones --- drivers/cpufreq/cpufreq_ondemand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index bcf8bb8..d60bcb9 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -470,7 +470,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); delay -= jiffies % delay; - dbs_info->enable = 1; + dbs_info->enable = 1; ondemand_powersave_bias_init(); dbs_info->sample_type = DBS_NORMAL_SAMPLE; INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); -- cgit v1.1