summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq-dt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/cpufreq-dt.c')
-rw-r--r--drivers/cpufreq/cpufreq-dt.c151
1 files changed, 100 insertions, 51 deletions
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 6bbb8b9..9bc2720 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -18,6 +18,7 @@
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
+#include <linux/cpufreq-dt.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -57,6 +58,8 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
old_freq = clk_get_rate(cpu_clk) / 1000;
if (!IS_ERR(cpu_reg)) {
+ unsigned long opp_freq;
+
rcu_read_lock();
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
if (IS_ERR(opp)) {
@@ -66,13 +69,16 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
return PTR_ERR(opp);
}
volt = dev_pm_opp_get_voltage(opp);
+ opp_freq = dev_pm_opp_get_freq(opp);
rcu_read_unlock();
tol = volt * priv->voltage_tolerance / 100;
volt_old = regulator_get_voltage(cpu_reg);
+ dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
+ opp_freq / 1000, volt);
}
dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
- old_freq / 1000, volt_old ? volt_old / 1000 : -1,
+ old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
new_freq / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */
@@ -88,7 +94,7 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
ret = clk_set_rate(cpu_clk, freq_exact);
if (ret) {
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
- if (!IS_ERR(cpu_reg))
+ if (!IS_ERR(cpu_reg) && volt_old > 0)
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
return ret;
}
@@ -146,8 +152,8 @@ try_again:
goto try_again;
}
- dev_warn(cpu_dev, "failed to get cpu%d regulator: %ld\n",
- cpu, PTR_ERR(cpu_reg));
+ dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
+ cpu, PTR_ERR(cpu_reg));
}
cpu_clk = clk_get(cpu_dev, NULL);
@@ -165,8 +171,8 @@ try_again:
if (ret == -EPROBE_DEFER)
dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
else
- dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", ret,
- cpu);
+ dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
+ ret);
} else {
*cdev = cpu_dev;
*creg = cpu_reg;
@@ -178,13 +184,14 @@ try_again:
static int cpufreq_init(struct cpufreq_policy *policy)
{
+ struct cpufreq_dt_platform_data *pd;
struct cpufreq_frequency_table *freq_table;
- struct thermal_cooling_device *cdev;
struct device_node *np;
struct private_data *priv;
struct device *cpu_dev;
struct regulator *cpu_reg;
struct clk *cpu_clk;
+ unsigned long min_uV = ~0, max_uV = 0;
unsigned int transition_latency;
int ret;
@@ -204,16 +211,10 @@ static int cpufreq_init(struct cpufreq_policy *policy)
/* OPPs might be populated at runtime, don't check for error here */
of_init_opp_table(cpu_dev);
- ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
- if (ret) {
- dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
- goto out_put_node;
- }
-
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
- goto out_free_table;
+ goto out_free_opp;
}
of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
@@ -222,42 +223,49 @@ static int cpufreq_init(struct cpufreq_policy *policy)
transition_latency = CPUFREQ_ETERNAL;
if (!IS_ERR(cpu_reg)) {
- struct dev_pm_opp *opp;
- unsigned long min_uV, max_uV;
- int i;
+ unsigned long opp_freq = 0;
/*
- * OPP is maintained in order of increasing frequency, and
- * freq_table initialised from OPP is therefore sorted in the
- * same order.
+ * Disable any OPPs where the connected regulator isn't able to
+ * provide the specified voltage and record minimum and maximum
+ * voltage levels.
*/
- for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
- ;
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_exact(cpu_dev,
- freq_table[0].frequency * 1000, true);
- min_uV = dev_pm_opp_get_voltage(opp);
- opp = dev_pm_opp_find_freq_exact(cpu_dev,
- freq_table[i-1].frequency * 1000, true);
- max_uV = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
+ while (1) {
+ struct dev_pm_opp *opp;
+ unsigned long opp_uV, tol_uV;
+
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ break;
+ }
+ opp_uV = dev_pm_opp_get_voltage(opp);
+ rcu_read_unlock();
+
+ tol_uV = opp_uV * priv->voltage_tolerance / 100;
+ if (regulator_is_supported_voltage(cpu_reg, opp_uV,
+ opp_uV + tol_uV)) {
+ if (opp_uV < min_uV)
+ min_uV = opp_uV;
+ if (opp_uV > max_uV)
+ max_uV = opp_uV;
+ } else {
+ dev_pm_opp_disable(cpu_dev, opp_freq);
+ }
+
+ opp_freq++;
+ }
+
ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
if (ret > 0)
transition_latency += ret * 1000;
}
- /*
- * For now, just loading the cooling device;
- * thermal DT code takes care of matching them.
- */
- if (of_find_property(np, "#cooling-cells", NULL)) {
- cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
- if (IS_ERR(cdev))
- dev_err(cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(cdev));
- else
- priv->cdev = cdev;
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ pr_err("failed to init cpufreq table: %d\n", ret);
+ goto out_free_priv;
}
priv->cpu_dev = cpu_dev;
@@ -265,20 +273,29 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->driver_data = priv;
policy->clk = cpu_clk;
- ret = cpufreq_generic_init(policy, freq_table, transition_latency);
- if (ret)
- goto out_cooling_unregister;
+ ret = cpufreq_table_validate_and_show(policy, freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
+ ret);
+ goto out_free_cpufreq_table;
+ }
+
+ policy->cpuinfo.transition_latency = transition_latency;
+
+ pd = cpufreq_get_driver_data();
+ if (!pd || !pd->independent_clocks)
+ cpumask_setall(policy->cpus);
of_node_put(np);
return 0;
-out_cooling_unregister:
- cpufreq_cooling_unregister(priv->cdev);
- kfree(priv);
-out_free_table:
+out_free_cpufreq_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
-out_put_node:
+out_free_priv:
+ kfree(priv);
+out_free_opp:
+ of_free_opp_table(cpu_dev);
of_node_put(np);
out_put_reg_clk:
clk_put(cpu_clk);
@@ -292,8 +309,10 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
- cpufreq_cooling_unregister(priv->cdev);
+ if (priv->cdev)
+ cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+ of_free_opp_table(priv->cpu_dev);
clk_put(policy->clk);
if (!IS_ERR(priv->cpu_reg))
regulator_put(priv->cpu_reg);
@@ -302,6 +321,33 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
return 0;
}
+static void cpufreq_ready(struct cpufreq_policy *policy)
+{
+ struct private_data *priv = policy->driver_data;
+ struct device_node *np = of_node_get(priv->cpu_dev->of_node);
+
+ if (WARN_ON(!np))
+ return;
+
+ /*
+ * For now, just loading the cooling device;
+ * thermal DT code takes care of matching them.
+ */
+ if (of_find_property(np, "#cooling-cells", NULL)) {
+ priv->cdev = of_cpufreq_cooling_register(np,
+ policy->related_cpus);
+ if (IS_ERR(priv->cdev)) {
+ dev_err(priv->cpu_dev,
+ "running cpufreq without cooling device: %ld\n",
+ PTR_ERR(priv->cdev));
+
+ priv->cdev = NULL;
+ }
+ }
+
+ of_node_put(np);
+}
+
static struct cpufreq_driver dt_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
@@ -309,6 +355,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.get = cpufreq_generic_get,
.init = cpufreq_init,
.exit = cpufreq_exit,
+ .ready = cpufreq_ready,
.name = "cpufreq-dt",
.attr = cpufreq_generic_attr,
};
@@ -335,6 +382,8 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
if (!IS_ERR(cpu_reg))
regulator_put(cpu_reg);
+ dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
+
ret = cpufreq_register_driver(&dt_cpufreq_driver);
if (ret)
dev_err(cpu_dev, "failed register driver: %d\n", ret);
OpenPOWER on IntegriCloud