diff options
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/Kconfig.arm | 15 | ||||
-rw-r--r-- | drivers/cpufreq/Makefile | 2 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 3 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 3 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_userspace.c | 8 | ||||
-rw-r--r-- | drivers/cpufreq/exynos-cpufreq.c | 290 | ||||
-rw-r--r-- | drivers/cpufreq/exynos4210-cpufreq.c | 643 | ||||
-rw-r--r-- | drivers/cpufreq/omap-cpufreq.c | 274 | ||||
-rw-r--r-- | drivers/cpufreq/powernow-k8.c | 47 | ||||
-rw-r--r-- | drivers/cpufreq/s3c64xx-cpufreq.c | 35 |
10 files changed, 765 insertions, 555 deletions
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 72a0044..e0664fe 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -21,12 +21,19 @@ config ARM_S5PV210_CPUFREQ If in doubt, say N. +config ARM_EXYNOS_CPUFREQ + bool "SAMSUNG EXYNOS SoCs" + depends on ARCH_EXYNOS + select ARM_EXYNOS4210_CPUFREQ if CPU_EXYNOS4210 + default y + help + This adds the CPUFreq driver common part for Samsung + EXYNOS SoCs. + + If in doubt, say N. + config ARM_EXYNOS4210_CPUFREQ bool "Samsung EXYNOS4210" - depends on CPU_EXYNOS4210 - default y help This adds the CPUFreq driver for Samsung EXYNOS4210 SoC (S5PV310 or S5PC210). - - If in doubt, say N. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index a48bc02..ac000fa 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -42,7 +42,9 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o +obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o +obj-$(CONFIG_ARCH_OMAP2PLUS) += omap-cpufreq.o ################################################################################## # PowerPC platform drivers diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 8c2df34..622013f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -204,8 +204,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) pr_debug("saving %lu as reference value for loops_per_jiffy; " "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); } - if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || - (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) || + if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) || (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 3d679ee..c3e0652 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -713,11 +713,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, static int __init cpufreq_gov_dbs_init(void) { - cputime64_t wall; u64 idle_time; int cpu = get_cpu(); - idle_time = get_cpu_idle_time_us(cpu, &wall); + idle_time = get_cpu_idle_time_us(cpu, NULL); put_cpu(); if (idle_time != -1ULL) { /* Idle micro accounting is supported. Use finer thresholds */ diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index f231015..bedac1a 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -47,9 +47,11 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, if (!per_cpu(cpu_is_managed, freq->cpu)) return 0; - pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n", - freq->cpu, freq->new); - per_cpu(cpu_cur_freq, freq->cpu) = freq->new; + if (val == CPUFREQ_POSTCHANGE) { + pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n", + freq->cpu, freq->new); + per_cpu(cpu_cur_freq, freq->cpu) = freq->new; + } return 0; } diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c new file mode 100644 index 0000000..5467879 --- /dev/null +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * EXYNOS - CPU frequency scaling support for EXYNOS series + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/regulator/consumer.h> +#include <linux/cpufreq.h> +#include <linux/suspend.h> + +#include <mach/cpufreq.h> + +#include <plat/cpu.h> + +static struct exynos_dvfs_info *exynos_info; + +static struct regulator *arm_regulator; +static struct cpufreq_freqs freqs; + +static unsigned int locking_frequency; +static bool frequency_locked; +static DEFINE_MUTEX(cpufreq_lock); + +int exynos_verify_speed(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, + exynos_info->freq_table); +} + +unsigned int exynos_getspeed(unsigned int cpu) +{ + return clk_get_rate(exynos_info->cpu_clk) / 1000; +} + +static int exynos_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int index, old_index; + unsigned int arm_volt, safe_arm_volt = 0; + int ret = 0; + struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; + unsigned int *volt_table = exynos_info->volt_table; + unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz; + + mutex_lock(&cpufreq_lock); + + freqs.old = policy->cur; + + if (frequency_locked && target_freq != locking_frequency) { + ret = -EAGAIN; + goto out; + } + + if (cpufreq_frequency_table_target(policy, freq_table, + freqs.old, relation, &old_index)) { + ret = -EINVAL; + goto out; + } + + if (cpufreq_frequency_table_target(policy, freq_table, + target_freq, relation, &index)) { + ret = -EINVAL; + goto out; + } + + freqs.new = freq_table[index].frequency; + freqs.cpu = policy->cpu; + + /* + * ARM clock source will be changed APLL to MPLL temporary + * To support this level, need to control regulator for + * required voltage level + */ + if (exynos_info->need_apll_change != NULL) { + if (exynos_info->need_apll_change(old_index, index) && + (freq_table[index].frequency < mpll_freq_khz) && + (freq_table[old_index].frequency < mpll_freq_khz)) + safe_arm_volt = volt_table[exynos_info->pll_safe_idx]; + } + arm_volt = volt_table[index]; + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + /* When the new frequency is higher than current frequency */ + if ((freqs.new > freqs.old) && !safe_arm_volt) { + /* Firstly, voltage up to increase frequency */ + regulator_set_voltage(arm_regulator, arm_volt, + arm_volt); + } + + if (safe_arm_volt) + regulator_set_voltage(arm_regulator, safe_arm_volt, + safe_arm_volt); + if (freqs.new != freqs.old) + exynos_info->set_freq(old_index, index); + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + /* When the new frequency is lower than current frequency */ + if ((freqs.new < freqs.old) || + ((freqs.new > freqs.old) && safe_arm_volt)) { + /* down the voltage after frequency change */ + regulator_set_voltage(arm_regulator, arm_volt, + arm_volt); + } + +out: + mutex_unlock(&cpufreq_lock); + + return ret; +} + +#ifdef CONFIG_PM +static int exynos_cpufreq_suspend(struct cpufreq_policy *policy) +{ + return 0; +} + +static int exynos_cpufreq_resume(struct cpufreq_policy *policy) +{ + return 0; +} +#endif + +/** + * exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume + * context + * @notifier + * @pm_event + * @v + * + * While frequency_locked == true, target() ignores every frequency but + * locking_frequency. The locking_frequency value is the initial frequency, + * which is set by the bootloader. In order to eliminate possible + * inconsistency in clock values, we save and restore frequencies during + * suspend and resume and block CPUFREQ activities. Note that the standard + * suspend/resume cannot be used as they are too deep (syscore_ops) for + * regulator actions. + */ +static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier, + unsigned long pm_event, void *v) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */ + static unsigned int saved_frequency; + unsigned int temp; + + mutex_lock(&cpufreq_lock); + switch (pm_event) { + case PM_SUSPEND_PREPARE: + if (frequency_locked) + goto out; + + frequency_locked = true; + + if (locking_frequency) { + saved_frequency = exynos_getspeed(0); + + mutex_unlock(&cpufreq_lock); + exynos_target(policy, locking_frequency, + CPUFREQ_RELATION_H); + mutex_lock(&cpufreq_lock); + } + break; + + case PM_POST_SUSPEND: + if (saved_frequency) { + /* + * While frequency_locked, only locking_frequency + * is valid for target(). In order to use + * saved_frequency while keeping frequency_locked, + * we temporarly overwrite locking_frequency. + */ + temp = locking_frequency; + locking_frequency = saved_frequency; + + mutex_unlock(&cpufreq_lock); + exynos_target(policy, locking_frequency, + CPUFREQ_RELATION_H); + mutex_lock(&cpufreq_lock); + + locking_frequency = temp; + } + frequency_locked = false; + break; + } +out: + mutex_unlock(&cpufreq_lock); + + return NOTIFY_OK; +} + +static struct notifier_block exynos_cpufreq_nb = { + .notifier_call = exynos_cpufreq_pm_notifier, +}; + +static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu); + + cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu); + + /* set the transition latency value */ + policy->cpuinfo.transition_latency = 100000; + + /* + * EXYNOS4 multi-core processors has 2 cores + * that the frequency cannot be set independently. + * Each cpu is bound to the same speed. + * So the affected cpu is all of the cpus. + */ + if (num_online_cpus() == 1) { + cpumask_copy(policy->related_cpus, cpu_possible_mask); + cpumask_copy(policy->cpus, cpu_online_mask); + } else { + cpumask_setall(policy->cpus); + } + + return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table); +} + +static struct cpufreq_driver exynos_driver = { + .flags = CPUFREQ_STICKY, + .verify = exynos_verify_speed, + .target = exynos_target, + .get = exynos_getspeed, + .init = exynos_cpufreq_cpu_init, + .name = "exynos_cpufreq", +#ifdef CONFIG_PM + .suspend = exynos_cpufreq_suspend, + .resume = exynos_cpufreq_resume, +#endif +}; + +static int __init exynos_cpufreq_init(void) +{ + int ret = -EINVAL; + + exynos_info = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL); + if (!exynos_info) + return -ENOMEM; + + if (soc_is_exynos4210()) + ret = exynos4210_cpufreq_init(exynos_info); + else + pr_err("%s: CPU type not found\n", __func__); + + if (ret) + goto err_vdd_arm; + + if (exynos_info->set_freq == NULL) { + pr_err("%s: No set_freq function (ERR)\n", __func__); + goto err_vdd_arm; + } + + arm_regulator = regulator_get(NULL, "vdd_arm"); + if (IS_ERR(arm_regulator)) { + pr_err("%s: failed to get resource vdd_arm\n", __func__); + goto err_vdd_arm; + } + + register_pm_notifier(&exynos_cpufreq_nb); + + if (cpufreq_register_driver(&exynos_driver)) { + pr_err("%s: failed to register cpufreq driver\n", __func__); + goto err_cpufreq; + } + + return 0; +err_cpufreq: + unregister_pm_notifier(&exynos_cpufreq_nb); + + if (!IS_ERR(arm_regulator)) + regulator_put(arm_regulator); +err_vdd_arm: + kfree(exynos_info); + pr_debug("%s: failed initialization\n", __func__); + return -EINVAL; +} +late_initcall(exynos_cpufreq_init); diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index ab9741f..065da5b 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c @@ -2,61 +2,52 @@ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * EXYNOS4 - CPU frequency scaling support + * EXYNOS4210 - CPU frequency scaling support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include <linux/types.h> +#include <linux/module.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> -#include <linux/regulator/consumer.h> #include <linux/cpufreq.h> -#include <linux/notifier.h> -#include <linux/suspend.h> -#include <mach/map.h> #include <mach/regs-clock.h> -#include <mach/regs-mem.h> +#include <mach/cpufreq.h> -#include <plat/clock.h> -#include <plat/pm.h> +#define CPUFREQ_LEVEL_END L5 + +static int max_support_idx = L0; +static int min_support_idx = (CPUFREQ_LEVEL_END - 1); static struct clk *cpu_clk; static struct clk *moutcore; static struct clk *mout_mpll; static struct clk *mout_apll; -static struct regulator *arm_regulator; -static struct regulator *int_regulator; - -static struct cpufreq_freqs freqs; -static unsigned int memtype; - -static unsigned int locking_frequency; -static bool frequency_locked; -static DEFINE_MUTEX(cpufreq_lock); - -enum exynos4_memory_type { - DDR2 = 4, - LPDDR2, - DDR3, +struct cpufreq_clkdiv { + unsigned int index; + unsigned int clkdiv; }; -enum cpufreq_level_index { - L0, L1, L2, L3, CPUFREQ_LEVEL_END, +static unsigned int exynos4210_volt_table[CPUFREQ_LEVEL_END] = { + 1250000, 1150000, 1050000, 975000, 950000, }; -static struct cpufreq_frequency_table exynos4_freq_table[] = { - {L0, 1000*1000}, - {L1, 800*1000}, - {L2, 400*1000}, - {L3, 100*1000}, + +static struct cpufreq_clkdiv exynos4210_clkdiv_table[CPUFREQ_LEVEL_END]; + +static struct cpufreq_frequency_table exynos4210_freq_table[] = { + {L0, 1200*1000}, + {L1, 1000*1000}, + {L2, 800*1000}, + {L3, 500*1000}, + {L4, 200*1000}, {0, CPUFREQ_TABLE_END}, }; @@ -67,17 +58,20 @@ static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = { * DIVATB, DIVPCLK_DBG, DIVAPLL } */ - /* ARM L0: 1000MHz */ - { 0, 3, 7, 3, 3, 0, 1 }, + /* ARM L0: 1200MHz */ + { 0, 3, 7, 3, 4, 1, 7 }, - /* ARM L1: 800MHz */ - { 0, 3, 7, 3, 3, 0, 1 }, + /* ARM L1: 1000MHz */ + { 0, 3, 7, 3, 4, 1, 7 }, - /* ARM L2: 400MHz */ - { 0, 1, 3, 1, 3, 0, 1 }, + /* ARM L2: 800MHz */ + { 0, 3, 7, 3, 3, 1, 7 }, - /* ARM L3: 100MHz */ - { 0, 0, 1, 0, 3, 1, 1 }, + /* ARM L3: 500MHz */ + { 0, 3, 7, 3, 3, 1, 7 }, + + /* ARM L4: 200MHz */ + { 0, 1, 3, 1, 3, 1, 0 }, }; static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = { @@ -86,147 +80,46 @@ static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = { * { DIVCOPY, DIVHPM } */ - /* ARM L0: 1000MHz */ - { 3, 0 }, + /* ARM L0: 1200MHz */ + { 5, 0 }, - /* ARM L1: 800MHz */ - { 3, 0 }, + /* ARM L1: 1000MHz */ + { 4, 0 }, - /* ARM L2: 400MHz */ + /* ARM L2: 800MHz */ { 3, 0 }, - /* ARM L3: 100MHz */ + /* ARM L3: 500MHz */ { 3, 0 }, -}; - -static unsigned int clkdiv_dmc0[CPUFREQ_LEVEL_END][8] = { - /* - * Clock divider value for following - * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD - * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS } - */ - - /* DMC L0: 400MHz */ - { 3, 1, 1, 1, 1, 1, 3, 1 }, - - /* DMC L1: 400MHz */ - { 3, 1, 1, 1, 1, 1, 3, 1 }, - - /* DMC L2: 266.7MHz */ - { 7, 1, 1, 2, 1, 1, 3, 1 }, - - /* DMC L3: 200MHz */ - { 7, 1, 1, 3, 1, 1, 3, 1 }, -}; - -static unsigned int clkdiv_top[CPUFREQ_LEVEL_END][5] = { - /* - * Clock divider value for following - * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND } - */ - /* ACLK200 L0: 200MHz */ - { 3, 7, 4, 5, 1 }, - - /* ACLK200 L1: 200MHz */ - { 3, 7, 4, 5, 1 }, - - /* ACLK200 L2: 160MHz */ - { 4, 7, 5, 7, 1 }, - - /* ACLK200 L3: 133.3MHz */ - { 5, 7, 7, 7, 1 }, -}; - -static unsigned int clkdiv_lr_bus[CPUFREQ_LEVEL_END][2] = { - /* - * Clock divider value for following - * { DIVGDL/R, DIVGPL/R } - */ - - /* ACLK_GDL/R L0: 200MHz */ - { 3, 1 }, - - /* ACLK_GDL/R L1: 200MHz */ - { 3, 1 }, - - /* ACLK_GDL/R L2: 160MHz */ - { 4, 1 }, - - /* ACLK_GDL/R L3: 133.3MHz */ - { 5, 1 }, -}; - -struct cpufreq_voltage_table { - unsigned int index; /* any */ - unsigned int arm_volt; /* uV */ - unsigned int int_volt; + /* ARM L4: 200MHz */ + { 3, 0 }, }; -static struct cpufreq_voltage_table exynos4_volt_table[CPUFREQ_LEVEL_END] = { - { - .index = L0, - .arm_volt = 1200000, - .int_volt = 1100000, - }, { - .index = L1, - .arm_volt = 1100000, - .int_volt = 1100000, - }, { - .index = L2, - .arm_volt = 1000000, - .int_volt = 1000000, - }, { - .index = L3, - .arm_volt = 900000, - .int_volt = 1000000, - }, -}; +static unsigned int exynos4210_apll_pms_table[CPUFREQ_LEVEL_END] = { + /* APLL FOUT L0: 1200MHz */ + ((150 << 16) | (3 << 8) | 1), -static unsigned int exynos4_apll_pms_table[CPUFREQ_LEVEL_END] = { - /* APLL FOUT L0: 1000MHz */ + /* APLL FOUT L1: 1000MHz */ ((250 << 16) | (6 << 8) | 1), - /* APLL FOUT L1: 800MHz */ + /* APLL FOUT L2: 800MHz */ ((200 << 16) | (6 << 8) | 1), - /* APLL FOUT L2 : 400MHz */ - ((200 << 16) | (6 << 8) | 2), + /* APLL FOUT L3: 500MHz */ + ((250 << 16) | (6 << 8) | 2), - /* APLL FOUT L3: 100MHz */ - ((200 << 16) | (6 << 8) | 4), + /* APLL FOUT L4: 200MHz */ + ((200 << 16) | (6 << 8) | 3), }; -static int exynos4_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, exynos4_freq_table); -} - -static unsigned int exynos4_getspeed(unsigned int cpu) -{ - return clk_get_rate(cpu_clk) / 1000; -} - -static void exynos4_set_clkdiv(unsigned int div_index) +static void exynos4210_set_clkdiv(unsigned int div_index) { unsigned int tmp; /* Change Divider - CPU0 */ - tmp = __raw_readl(S5P_CLKDIV_CPU); - - tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | S5P_CLKDIV_CPU0_COREM0_MASK | - S5P_CLKDIV_CPU0_COREM1_MASK | S5P_CLKDIV_CPU0_PERIPH_MASK | - S5P_CLKDIV_CPU0_ATB_MASK | S5P_CLKDIV_CPU0_PCLKDBG_MASK | - S5P_CLKDIV_CPU0_APLL_MASK); - - tmp |= ((clkdiv_cpu0[div_index][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) | - (clkdiv_cpu0[div_index][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) | - (clkdiv_cpu0[div_index][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) | - (clkdiv_cpu0[div_index][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) | - (clkdiv_cpu0[div_index][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) | - (clkdiv_cpu0[div_index][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) | - (clkdiv_cpu0[div_index][6] << S5P_CLKDIV_CPU0_APLL_SHIFT)); + tmp = exynos4210_clkdiv_table[div_index].clkdiv; __raw_writel(tmp, S5P_CLKDIV_CPU); @@ -248,83 +141,9 @@ static void exynos4_set_clkdiv(unsigned int div_index) do { tmp = __raw_readl(S5P_CLKDIV_STATCPU1); } while (tmp & 0x11); - - /* Change Divider - DMC0 */ - - tmp = __raw_readl(S5P_CLKDIV_DMC0); - - tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK | S5P_CLKDIV_DMC0_ACPPCLK_MASK | - S5P_CLKDIV_DMC0_DPHY_MASK | S5P_CLKDIV_DMC0_DMC_MASK | - S5P_CLKDIV_DMC0_DMCD_MASK | S5P_CLKDIV_DMC0_DMCP_MASK | - S5P_CLKDIV_DMC0_COPY2_MASK | S5P_CLKDIV_DMC0_CORETI_MASK); - - tmp |= ((clkdiv_dmc0[div_index][0] << S5P_CLKDIV_DMC0_ACP_SHIFT) | - (clkdiv_dmc0[div_index][1] << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) | - (clkdiv_dmc0[div_index][2] << S5P_CLKDIV_DMC0_DPHY_SHIFT) | - (clkdiv_dmc0[div_index][3] << S5P_CLKDIV_DMC0_DMC_SHIFT) | - (clkdiv_dmc0[div_index][4] << S5P_CLKDIV_DMC0_DMCD_SHIFT) | - (clkdiv_dmc0[div_index][5] << S5P_CLKDIV_DMC0_DMCP_SHIFT) | - (clkdiv_dmc0[div_index][6] << S5P_CLKDIV_DMC0_COPY2_SHIFT) | - (clkdiv_dmc0[div_index][7] << S5P_CLKDIV_DMC0_CORETI_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_DMC0); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0); - } while (tmp & 0x11111111); - - /* Change Divider - TOP */ - - tmp = __raw_readl(S5P_CLKDIV_TOP); - - tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK | S5P_CLKDIV_TOP_ACLK100_MASK | - S5P_CLKDIV_TOP_ACLK160_MASK | S5P_CLKDIV_TOP_ACLK133_MASK | - S5P_CLKDIV_TOP_ONENAND_MASK); - - tmp |= ((clkdiv_top[div_index][0] << S5P_CLKDIV_TOP_ACLK200_SHIFT) | - (clkdiv_top[div_index][1] << S5P_CLKDIV_TOP_ACLK100_SHIFT) | - (clkdiv_top[div_index][2] << S5P_CLKDIV_TOP_ACLK160_SHIFT) | - (clkdiv_top[div_index][3] << S5P_CLKDIV_TOP_ACLK133_SHIFT) | - (clkdiv_top[div_index][4] << S5P_CLKDIV_TOP_ONENAND_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_TOP); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_TOP); - } while (tmp & 0x11111); - - /* Change Divider - LEFTBUS */ - - tmp = __raw_readl(S5P_CLKDIV_LEFTBUS); - - tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK); - - tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) | - (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_LEFTBUS); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS); - } while (tmp & 0x11); - - /* Change Divider - RIGHTBUS */ - - tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS); - - tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK); - - tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) | - (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS); - } while (tmp & 0x11); } -static void exynos4_set_apll(unsigned int index) +static void exynos4210_set_apll(unsigned int index) { unsigned int tmp; @@ -343,7 +162,7 @@ static void exynos4_set_apll(unsigned int index) /* 3. Change PLL PMS values */ tmp = __raw_readl(S5P_APLL_CON0); tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); - tmp |= exynos4_apll_pms_table[index]; + tmp |= exynos4210_apll_pms_table[index]; __raw_writel(tmp, S5P_APLL_CON0); /* 4. wait_lock_time */ @@ -360,328 +179,126 @@ static void exynos4_set_apll(unsigned int index) } while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT)); } -static void exynos4_set_frequency(unsigned int old_index, unsigned int new_index) +bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index) +{ + unsigned int old_pm = (exynos4210_apll_pms_table[old_index] >> 8); + unsigned int new_pm = (exynos4210_apll_pms_table[new_index] >> 8); + + return (old_pm == new_pm) ? 0 : 1; +} + +static void exynos4210_set_frequency(unsigned int old_index, + unsigned int new_index) { unsigned int tmp; if (old_index > new_index) { - /* The frequency changing to L0 needs to change apll */ - if (freqs.new == exynos4_freq_table[L0].frequency) { - /* 1. Change the system clock divider values */ - exynos4_set_clkdiv(new_index); - - /* 2. Change the apll m,p,s value */ - exynos4_set_apll(new_index); - } else { + if (!exynos4210_pms_change(old_index, new_index)) { /* 1. Change the system clock divider values */ - exynos4_set_clkdiv(new_index); + exynos4210_set_clkdiv(new_index); /* 2. Change just s value in apll m,p,s value */ tmp = __raw_readl(S5P_APLL_CON0); tmp &= ~(0x7 << 0); - tmp |= (exynos4_apll_pms_table[new_index] & 0x7); + tmp |= (exynos4210_apll_pms_table[new_index] & 0x7); __raw_writel(tmp, S5P_APLL_CON0); - } - } - - else if (old_index < new_index) { - /* The frequency changing from L0 needs to change apll */ - if (freqs.old == exynos4_freq_table[L0].frequency) { - /* 1. Change the apll m,p,s value */ - exynos4_set_apll(new_index); - - /* 2. Change the system clock divider values */ - exynos4_set_clkdiv(new_index); } else { + /* Clock Configuration Procedure */ + /* 1. Change the system clock divider values */ + exynos4210_set_clkdiv(new_index); + /* 2. Change the apll m,p,s value */ + exynos4210_set_apll(new_index); + } + } else if (old_index < new_index) { + if (!exynos4210_pms_change(old_index, new_index)) { /* 1. Change just s value in apll m,p,s value */ tmp = __raw_readl(S5P_APLL_CON0); tmp &= ~(0x7 << 0); - tmp |= (exynos4_apll_pms_table[new_index] & 0x7); + tmp |= (exynos4210_apll_pms_table[new_index] & 0x7); __raw_writel(tmp, S5P_APLL_CON0); /* 2. Change the system clock divider values */ - exynos4_set_clkdiv(new_index); + exynos4210_set_clkdiv(new_index); + } else { + /* Clock Configuration Procedure */ + /* 1. Change the apll m,p,s value */ + exynos4210_set_apll(new_index); + /* 2. Change the system clock divider values */ + exynos4210_set_clkdiv(new_index); } } } -static int exynos4_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int index, old_index; - unsigned int arm_volt, int_volt; - int err = -EINVAL; - - freqs.old = exynos4_getspeed(policy->cpu); - - mutex_lock(&cpufreq_lock); - - if (frequency_locked && target_freq != locking_frequency) { - err = -EAGAIN; - goto out; - } - - if (cpufreq_frequency_table_target(policy, exynos4_freq_table, - freqs.old, relation, &old_index)) - goto out; - - if (cpufreq_frequency_table_target(policy, exynos4_freq_table, - target_freq, relation, &index)) - goto out; - - err = 0; - - freqs.new = exynos4_freq_table[index].frequency; - freqs.cpu = policy->cpu; - - if (freqs.new == freqs.old) - goto out; - - /* get the voltage value */ - arm_volt = exynos4_volt_table[index].arm_volt; - int_volt = exynos4_volt_table[index].int_volt; - - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - /* control regulator */ - if (freqs.new > freqs.old) { - /* Voltage up */ - regulator_set_voltage(arm_regulator, arm_volt, arm_volt); - regulator_set_voltage(int_regulator, int_volt, int_volt); - } - - /* Clock Configuration Procedure */ - exynos4_set_frequency(old_index, index); - - /* control regulator */ - if (freqs.new < freqs.old) { - /* Voltage down */ - regulator_set_voltage(arm_regulator, arm_volt, arm_volt); - regulator_set_voltage(int_regulator, int_volt, int_volt); - } - - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - -out: - mutex_unlock(&cpufreq_lock); - return err; -} - -#ifdef CONFIG_PM -/* - * These suspend/resume are used as syscore_ops, it is already too - * late to set regulator voltages at this stage. - */ -static int exynos4_cpufreq_suspend(struct cpufreq_policy *policy) -{ - return 0; -} - -static int exynos4_cpufreq_resume(struct cpufreq_policy *policy) +int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) { - return 0; -} -#endif - -/** - * exynos4_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume - * context - * @notifier - * @pm_event - * @v - * - * While frequency_locked == true, target() ignores every frequency but - * locking_frequency. The locking_frequency value is the initial frequency, - * which is set by the bootloader. In order to eliminate possible - * inconsistency in clock values, we save and restore frequencies during - * suspend and resume and block CPUFREQ activities. Note that the standard - * suspend/resume cannot be used as they are too deep (syscore_ops) for - * regulator actions. - */ -static int exynos4_cpufreq_pm_notifier(struct notifier_block *notifier, - unsigned long pm_event, void *v) -{ - struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */ - static unsigned int saved_frequency; - unsigned int temp; - - mutex_lock(&cpufreq_lock); - switch (pm_event) { - case PM_SUSPEND_PREPARE: - if (frequency_locked) - goto out; - frequency_locked = true; - - if (locking_frequency) { - saved_frequency = exynos4_getspeed(0); - - mutex_unlock(&cpufreq_lock); - exynos4_target(policy, locking_frequency, - CPUFREQ_RELATION_H); - mutex_lock(&cpufreq_lock); - } - - break; - case PM_POST_SUSPEND: - - if (saved_frequency) { - /* - * While frequency_locked, only locking_frequency - * is valid for target(). In order to use - * saved_frequency while keeping frequency_locked, - * we temporarly overwrite locking_frequency. - */ - temp = locking_frequency; - locking_frequency = saved_frequency; - - mutex_unlock(&cpufreq_lock); - exynos4_target(policy, locking_frequency, - CPUFREQ_RELATION_H); - mutex_lock(&cpufreq_lock); - - locking_frequency = temp; - } - - frequency_locked = false; - break; - } -out: - mutex_unlock(&cpufreq_lock); - - return NOTIFY_OK; -} - -static struct notifier_block exynos4_cpufreq_nb = { - .notifier_call = exynos4_cpufreq_pm_notifier, -}; - -static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - int ret; - - policy->cur = policy->min = policy->max = exynos4_getspeed(policy->cpu); - - cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu); - - /* set the transition latency value */ - policy->cpuinfo.transition_latency = 100000; - - /* - * EXYNOS4 multi-core processors has 2 cores - * that the frequency cannot be set independently. - * Each cpu is bound to the same speed. - * So the affected cpu is all of the cpus. - */ - cpumask_setall(policy->cpus); - - ret = cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table); - if (ret) - return ret; - - cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu); - - return 0; -} - -static int exynos4_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static struct freq_attr *exynos4_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver exynos4_driver = { - .flags = CPUFREQ_STICKY, - .verify = exynos4_verify_speed, - .target = exynos4_target, - .get = exynos4_getspeed, - .init = exynos4_cpufreq_cpu_init, - .exit = exynos4_cpufreq_cpu_exit, - .name = "exynos4_cpufreq", - .attr = exynos4_cpufreq_attr, -#ifdef CONFIG_PM - .suspend = exynos4_cpufreq_suspend, - .resume = exynos4_cpufreq_resume, -#endif -}; + int i; + unsigned int tmp; + unsigned long rate; -static int __init exynos4_cpufreq_init(void) -{ cpu_clk = clk_get(NULL, "armclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); - locking_frequency = exynos4_getspeed(0); - moutcore = clk_get(NULL, "moutcore"); if (IS_ERR(moutcore)) - goto out; + goto err_moutcore; mout_mpll = clk_get(NULL, "mout_mpll"); if (IS_ERR(mout_mpll)) - goto out; + goto err_mout_mpll; + + rate = clk_get_rate(mout_mpll) / 1000; mout_apll = clk_get(NULL, "mout_apll"); if (IS_ERR(mout_apll)) - goto out; + goto err_mout_apll; - arm_regulator = regulator_get(NULL, "vdd_arm"); - if (IS_ERR(arm_regulator)) { - printk(KERN_ERR "failed to get resource %s\n", "vdd_arm"); - goto out; - } + tmp = __raw_readl(S5P_CLKDIV_CPU); - int_regulator = regulator_get(NULL, "vdd_int"); - if (IS_ERR(int_regulator)) { - printk(KERN_ERR "failed to get resource %s\n", "vdd_int"); - goto out; + for (i = L0; i < CPUFREQ_LEVEL_END; i++) { + tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | + S5P_CLKDIV_CPU0_COREM0_MASK | + S5P_CLKDIV_CPU0_COREM1_MASK | + S5P_CLKDIV_CPU0_PERIPH_MASK | + S5P_CLKDIV_CPU0_ATB_MASK | + S5P_CLKDIV_CPU0_PCLKDBG_MASK | + S5P_CLKDIV_CPU0_APLL_MASK); + + tmp |= ((clkdiv_cpu0[i][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) | + (clkdiv_cpu0[i][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) | + (clkdiv_cpu0[i][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) | + (clkdiv_cpu0[i][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) | + (clkdiv_cpu0[i][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) | + (clkdiv_cpu0[i][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) | + (clkdiv_cpu0[i][6] << S5P_CLKDIV_CPU0_APLL_SHIFT)); + + exynos4210_clkdiv_table[i].clkdiv = tmp; } - /* - * Check DRAM type. - * Because DVFS level is different according to DRAM type. - */ - memtype = __raw_readl(S5P_VA_DMC0 + S5P_DMC0_MEMCON_OFFSET); - memtype = (memtype >> S5P_DMC0_MEMTYPE_SHIFT); - memtype &= S5P_DMC0_MEMTYPE_MASK; - - if ((memtype < DDR2) && (memtype > DDR3)) { - printk(KERN_ERR "%s: wrong memtype= 0x%x\n", __func__, memtype); - goto out; - } else { - printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype); - } - - register_pm_notifier(&exynos4_cpufreq_nb); - - return cpufreq_register_driver(&exynos4_driver); - -out: - if (!IS_ERR(cpu_clk)) - clk_put(cpu_clk); + info->mpll_freq_khz = rate; + info->pm_lock_idx = L2; + info->pll_safe_idx = L2; + info->max_support_idx = max_support_idx; + info->min_support_idx = min_support_idx; + info->cpu_clk = cpu_clk; + info->volt_table = exynos4210_volt_table; + info->freq_table = exynos4210_freq_table; + info->set_freq = exynos4210_set_frequency; + info->need_apll_change = exynos4210_pms_change; - if (!IS_ERR(moutcore)) - clk_put(moutcore); + return 0; +err_mout_apll: if (!IS_ERR(mout_mpll)) clk_put(mout_mpll); +err_mout_mpll: + if (!IS_ERR(moutcore)) + clk_put(moutcore); +err_moutcore: + if (!IS_ERR(cpu_clk)) + clk_put(cpu_clk); - if (!IS_ERR(mout_apll)) - clk_put(mout_apll); - - if (!IS_ERR(arm_regulator)) - regulator_put(arm_regulator); - - if (!IS_ERR(int_regulator)) - regulator_put(int_regulator); - - printk(KERN_ERR "%s: failed initialization\n", __func__); - + pr_debug("%s: failed initialization\n", __func__); return -EINVAL; } -late_initcall(exynos4_cpufreq_init); +EXPORT_SYMBOL(exynos4210_cpufreq_init); diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c new file mode 100644 index 0000000..5d04c57 --- /dev/null +++ b/drivers/cpufreq/omap-cpufreq.c @@ -0,0 +1,274 @@ +/* + * CPU frequency scaling for OMAP using OPP information + * + * Copyright (C) 2005 Nokia Corporation + * Written by Tony Lindgren <tony@atomide.com> + * + * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King + * + * Copyright (C) 2007-2011 Texas Instruments, Inc. + * - OMAP3/4 support by Rajendra Nayak, Santosh Shilimkar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/cpufreq.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/opp.h> +#include <linux/cpu.h> +#include <linux/module.h> + +#include <asm/system.h> +#include <asm/smp_plat.h> +#include <asm/cpu.h> + +#include <plat/clock.h> +#include <plat/omap-pm.h> +#include <plat/common.h> +#include <plat/omap_device.h> + +#include <mach/hardware.h> + +#ifdef CONFIG_SMP +struct lpj_info { + unsigned long ref; + unsigned int freq; +}; + +static DEFINE_PER_CPU(struct lpj_info, lpj_ref); +static struct lpj_info global_lpj_ref; +#endif + +static struct cpufreq_frequency_table *freq_table; +static atomic_t freq_table_users = ATOMIC_INIT(0); +static struct clk *mpu_clk; +static char *mpu_clk_name; +static struct device *mpu_dev; + +static int omap_verify_speed(struct cpufreq_policy *policy) +{ + if (!freq_table) + return -EINVAL; + return cpufreq_frequency_table_verify(policy, freq_table); +} + +static unsigned int omap_getspeed(unsigned int cpu) +{ + unsigned long rate; + + if (cpu >= NR_CPUS) + return 0; + + rate = clk_get_rate(mpu_clk) / 1000; + return rate; +} + +static int omap_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int i; + int ret = 0; + struct cpufreq_freqs freqs; + + if (!freq_table) { + dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__, + policy->cpu); + return -EINVAL; + } + + ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, + relation, &i); + if (ret) { + dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n", + __func__, policy->cpu, target_freq, ret); + return ret; + } + freqs.new = freq_table[i].frequency; + if (!freqs.new) { + dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__, + policy->cpu, target_freq); + return -EINVAL; + } + + freqs.old = omap_getspeed(policy->cpu); + freqs.cpu = policy->cpu; + + if (freqs.old == freqs.new && policy->cur == freqs.new) + return ret; + + /* notifiers */ + for_each_cpu(i, policy->cpus) { + freqs.cpu = i; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + } + +#ifdef CONFIG_CPU_FREQ_DEBUG + pr_info("cpufreq-omap: transition: %u --> %u\n", freqs.old, freqs.new); +#endif + + ret = clk_set_rate(mpu_clk, freqs.new * 1000); + freqs.new = omap_getspeed(policy->cpu); + +#ifdef CONFIG_SMP + /* + * Note that loops_per_jiffy is not updated on SMP systems in + * cpufreq driver. So, update the per-CPU loops_per_jiffy value + * on frequency transition. We need to update all dependent CPUs. + */ + for_each_cpu(i, policy->cpus) { + struct lpj_info *lpj = &per_cpu(lpj_ref, i); + if (!lpj->freq) { + lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy; + lpj->freq = freqs.old; + } + + per_cpu(cpu_data, i).loops_per_jiffy = + cpufreq_scale(lpj->ref, lpj->freq, freqs.new); + } + + /* And don't forget to adjust the global one */ + if (!global_lpj_ref.freq) { + global_lpj_ref.ref = loops_per_jiffy; + global_lpj_ref.freq = freqs.old; + } + loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq, + freqs.new); +#endif + + /* notifiers */ + for_each_cpu(i, policy->cpus) { + freqs.cpu = i; + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + } + + return ret; +} + +static inline void freq_table_free(void) +{ + if (atomic_dec_and_test(&freq_table_users)) + opp_free_cpufreq_table(mpu_dev, &freq_table); +} + +static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) +{ + int result = 0; + + mpu_clk = clk_get(NULL, mpu_clk_name); + if (IS_ERR(mpu_clk)) + return PTR_ERR(mpu_clk); + + if (policy->cpu >= NR_CPUS) { + result = -EINVAL; + goto fail_ck; + } + + policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); + + if (atomic_inc_return(&freq_table_users) == 1) + result = opp_init_cpufreq_table(mpu_dev, &freq_table); + + if (result) { + dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n", + __func__, policy->cpu, result); + goto fail_ck; + } + + result = cpufreq_frequency_table_cpuinfo(policy, freq_table); + if (result) + goto fail_table; + + cpufreq_frequency_table_get_attr(freq_table, policy->cpu); + + policy->min = policy->cpuinfo.min_freq; + policy->max = policy->cpuinfo.max_freq; + policy->cur = omap_getspeed(policy->cpu); + + /* + * On OMAP SMP configuartion, both processors share the voltage + * and clock. So both CPUs needs to be scaled together and hence + * needs software co-ordination. Use cpufreq affected_cpus + * interface to handle this scenario. Additional is_smp() check + * is to keep SMP_ON_UP build working. + */ + if (is_smp()) { + policy->shared_type = CPUFREQ_SHARED_TYPE_ANY; + cpumask_setall(policy->cpus); + } + + /* FIXME: what's the actual transition time? */ + policy->cpuinfo.transition_latency = 300 * 1000; + + return 0; + +fail_table: + freq_table_free(); +fail_ck: + clk_put(mpu_clk); + return result; +} + +static int omap_cpu_exit(struct cpufreq_policy *policy) +{ + freq_table_free(); + clk_put(mpu_clk); + return 0; +} + +static struct freq_attr *omap_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver omap_driver = { + .flags = CPUFREQ_STICKY, + .verify = omap_verify_speed, + .target = omap_target, + .get = omap_getspeed, + .init = omap_cpu_init, + .exit = omap_cpu_exit, + .name = "omap", + .attr = omap_cpufreq_attr, +}; + +static int __init omap_cpufreq_init(void) +{ + if (cpu_is_omap24xx()) + mpu_clk_name = "virt_prcm_set"; + else if (cpu_is_omap34xx()) + mpu_clk_name = "dpll1_ck"; + else if (cpu_is_omap44xx()) + mpu_clk_name = "dpll_mpu_ck"; + + if (!mpu_clk_name) { + pr_err("%s: unsupported Silicon?\n", __func__); + return -EINVAL; + } + + mpu_dev = omap_device_get_by_hwmod_name("mpu"); + if (!mpu_dev) { + pr_warning("%s: unable to get the mpu device\n", __func__); + return -EINVAL; + } + + return cpufreq_register_driver(&omap_driver); +} + +static void __exit omap_cpufreq_exit(void) +{ + cpufreq_unregister_driver(&omap_driver); +} + +MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs"); +MODULE_LICENSE("GPL"); +module_init(omap_cpufreq_init); +module_exit(omap_cpufreq_exit); diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index bce576d..8f9b2cee 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1,10 +1,11 @@ /* - * (c) 2003-2010 Advanced Micro Devices, Inc. + * (c) 2003-2012 Advanced Micro Devices, Inc. * Your use of this code is subject to the terms and conditions of the * GNU general public license version 2. See "COPYING" or * http://www.gnu.org/licenses/gpl.html * - * Support : mark.langsdorf@amd.com + * Maintainer: + * Andreas Herrmann <andreas.herrmann3@amd.com> * * Based on the powernow-k7.c module written by Dave Jones. * (C) 2003 Dave Jones on behalf of SuSE Labs @@ -16,12 +17,14 @@ * Valuable input gratefully received from Dave Jones, Pavel Machek, * Dominik Brodowski, Jacob Shin, and others. * Originally developed by Paul Devriendt. - * Processor information obtained from Chapter 9 (Power and Thermal Management) - * of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD - * Opteron Processors" available for download from www.amd.com * - * Tables for specific CPUs can be inferred from - * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf + * Processor information obtained from Chapter 9 (Power and Thermal + * Management) of the "BIOS and Kernel Developer's Guide (BKDG) for + * the AMD Athlon 64 and AMD Opteron Processors" and section "2.x + * Power Management" in BKDGs for newer AMD CPU families. + * + * Tables for specific CPUs can be inferred from AMD's processor + * power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf) */ #include <linux/kernel.h> @@ -54,6 +57,9 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); static int cpu_family = CPU_OPTERON; +/* array to map SW pstate number to acpi state */ +static u32 ps_to_as[8]; + /* core performance boost */ static bool cpb_capable, cpb_enabled; static struct msr __percpu *msrs; @@ -80,9 +86,9 @@ static u32 find_khz_freq_from_fid(u32 fid) } static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, - u32 pstate) + u32 pstate) { - return data[pstate].frequency; + return data[ps_to_as[pstate]].frequency; } /* Return the vco fid for an input fid @@ -926,23 +932,27 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, invalidate_entry(powernow_table, i); continue; } - rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); - if (!(hi & HW_PSTATE_VALID_MASK)) { - pr_debug("invalid pstate %d, ignoring\n", index); - invalidate_entry(powernow_table, i); - continue; - } - powernow_table[i].index = index; + ps_to_as[index] = i; /* Frequency may be rounded for these */ if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) || boot_cpu_data.x86 == 0x11) { + + rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); + if (!(hi & HW_PSTATE_VALID_MASK)) { + pr_debug("invalid pstate %d, ignoring\n", index); + invalidate_entry(powernow_table, i); + continue; + } + powernow_table[i].frequency = freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7); } else powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; + + powernow_table[i].index = index; } return 0; } @@ -1189,7 +1199,8 @@ static int powernowk8_target(struct cpufreq_policy *pol, powernow_k8_acpi_pst_values(data, newstate); if (cpu_family == CPU_HW_PSTATE) - ret = transition_frequency_pstate(data, newstate); + ret = transition_frequency_pstate(data, + data->powernow_table[newstate].index); else ret = transition_frequency_fidvid(data, newstate); if (ret) { @@ -1202,7 +1213,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, if (cpu_family == CPU_HW_PSTATE) pol->cur = find_khz_freq_from_pstate(data->powernow_table, - newstate); + data->powernow_table[newstate].index); else pol->cur = find_khz_freq_from_fid(data->currfid); ret = 0; diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c index 3475f65..a5e72cb 100644 --- a/drivers/cpufreq/s3c64xx-cpufreq.c +++ b/drivers/cpufreq/s3c64xx-cpufreq.c @@ -8,6 +8,8 @@ * published by the Free Software Foundation. */ +#define pr_fmt(fmt) "cpufreq: " fmt + #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> @@ -91,7 +93,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, if (freqs.old == freqs.new) return 0; - pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new); + pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); @@ -101,7 +103,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, dvfs->vddarm_min, dvfs->vddarm_max); if (ret != 0) { - pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n", + pr_err("Failed to set VDDARM for %dkHz: %d\n", freqs.new, ret); goto err; } @@ -110,7 +112,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, ret = clk_set_rate(armclk, freqs.new * 1000); if (ret < 0) { - pr_err("cpufreq: Failed to set rate %dkHz: %d\n", + pr_err("Failed to set rate %dkHz: %d\n", freqs.new, ret); goto err; } @@ -123,14 +125,14 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, dvfs->vddarm_min, dvfs->vddarm_max); if (ret != 0) { - pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n", + pr_err("Failed to set VDDARM for %dkHz: %d\n", freqs.new, ret); goto err_clk; } } #endif - pr_debug("cpufreq: Set actual frequency %lukHz\n", + pr_debug("Set actual frequency %lukHz\n", clk_get_rate(armclk) / 1000); return 0; @@ -153,7 +155,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void) count = regulator_count_voltages(vddarm); if (count < 0) { - pr_err("cpufreq: Unable to check supported voltages\n"); + pr_err("Unable to check supported voltages\n"); } freq = s3c64xx_freq_table; @@ -171,7 +173,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void) } if (!found) { - pr_debug("cpufreq: %dkHz unsupported by regulator\n", + pr_debug("%dkHz unsupported by regulator\n", freq->frequency); freq->frequency = CPUFREQ_ENTRY_INVALID; } @@ -194,13 +196,13 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) return -EINVAL; if (s3c64xx_freq_table == NULL) { - pr_err("cpufreq: No frequency information for this CPU\n"); + pr_err("No frequency information for this CPU\n"); return -ENODEV; } armclk = clk_get(NULL, "armclk"); if (IS_ERR(armclk)) { - pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n", + pr_err("Unable to obtain ARMCLK: %ld\n", PTR_ERR(armclk)); return PTR_ERR(armclk); } @@ -209,12 +211,19 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) vddarm = regulator_get(NULL, "vddarm"); if (IS_ERR(vddarm)) { ret = PTR_ERR(vddarm); - pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret); - pr_err("cpufreq: Only frequency scaling available\n"); + pr_err("Failed to obtain VDDARM: %d\n", ret); + pr_err("Only frequency scaling available\n"); vddarm = NULL; } else { s3c64xx_cpufreq_config_regulator(); } + + vddint = regulator_get(NULL, "vddint"); + if (IS_ERR(vddint)) { + ret = PTR_ERR(vddint); + pr_err("Failed to obtain VDDINT: %d\n", ret); + vddint = NULL; + } #endif freq = s3c64xx_freq_table; @@ -225,7 +234,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) r = clk_round_rate(armclk, freq->frequency * 1000); r /= 1000; if (r != freq->frequency) { - pr_debug("cpufreq: %dkHz unsupported by clock\n", + pr_debug("%dkHz unsupported by clock\n", freq->frequency); freq->frequency = CPUFREQ_ENTRY_INVALID; } @@ -248,7 +257,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table); if (ret != 0) { - pr_err("cpufreq: Failed to configure frequency table: %d\n", + pr_err("Failed to configure frequency table: %d\n", ret); regulator_put(vddarm); clk_put(armclk); |