From 29fd2a34ef8d863e48183bd473ba57c8d7839e25 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Tue, 19 Dec 2017 09:33:29 +0100 Subject: clk: check ops pointer on clock register Nothing really prevents a provider from (trying to) register a clock without providing the clock ops structure. We do check the individual fields before using them, but not the structure pointer itself. This may have the usual nasty consequences when the pointer is dereferenced, most likely when checking one the field during the initialization. This is fixed by returning an error on clock register if the ops pointer is NULL. Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171219083329.24746-1-jbrunet@baylibre.com --- drivers/clk/clk.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 647d056..e3e98ac 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2678,7 +2678,13 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw) ret = -ENOMEM; goto fail_name; } + + if (WARN_ON(!hw->init->ops)) { + ret = -EINVAL; + goto fail_ops; + } core->ops = hw->init->ops; + if (dev && pm_runtime_enabled(dev)) core->dev = dev; if (dev && dev->driver) @@ -2740,6 +2746,7 @@ fail_parent_names_copy: kfree_const(core->parent_names[i]); kfree(core->parent_names); fail_parent_names: +fail_ops: kfree_const(core->name); fail_name: kfree(core); -- cgit v1.1 From 56e7ceddbeb812a6f79f37523eb4e6e4620f4416 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:51 +0100 Subject: clk: fix incorrect usage of ENOSYS ENOSYS is special and should only be used for incorrect syscall number. It does not seem to be the case here. Reported by checkpatch.pl while working on clock protection. Acked-by: Linus Walleij Tested-by: Quentin Schulz Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-2-jbrunet@baylibre.com --- drivers/clk/clk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 647d056..5fe9e63 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1888,7 +1888,7 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent) /* verify ops for for multi-parent clks */ if ((core->num_parents > 1) && (!core->ops->set_parent)) { - ret = -ENOSYS; + ret = -EPERM; goto out; } -- cgit v1.1 From 91baa9ffe6d4e8191ef55b4ece3e556294141515 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:52 +0100 Subject: clk: take the prepare lock out of clk_core_set_parent Rework set_parent core function so it can be called when the prepare lock is already held by the caller. This rework is done to ease the integration of the "protected" clock functionality. Acked-by: Linus Walleij Tested-by: Quentin Schulz Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-3-jbrunet@baylibre.com --- drivers/clk/clk.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 5fe9e63..e60b2a2 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1871,32 +1871,28 @@ bool clk_has_parent(struct clk *clk, struct clk *parent) } EXPORT_SYMBOL_GPL(clk_has_parent); -static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent) +static int clk_core_set_parent_nolock(struct clk_core *core, + struct clk_core *parent) { int ret = 0; int p_index = 0; unsigned long p_rate = 0; + lockdep_assert_held(&prepare_lock); + if (!core) return 0; - /* prevent racing with updates to the clock topology */ - clk_prepare_lock(); - if (core->parent == parent) - goto out; + return 0; /* verify ops for for multi-parent clks */ - if ((core->num_parents > 1) && (!core->ops->set_parent)) { - ret = -EPERM; - goto out; - } + if (core->num_parents > 1 && !core->ops->set_parent) + return -EPERM; /* check that we are allowed to re-parent if the clock is in use */ - if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { - ret = -EBUSY; - goto out; - } + if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) + return -EBUSY; /* try finding the new parent index */ if (parent) { @@ -1904,15 +1900,14 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent) if (p_index < 0) { pr_debug("%s: clk %s can not be parent of clk %s\n", __func__, parent->name, core->name); - ret = p_index; - goto out; + return p_index; } p_rate = parent->rate; } ret = clk_pm_runtime_get(core); if (ret) - goto out; + return ret; /* propagate PRE_RATE_CHANGE notifications */ ret = __clk_speculate_rates(core, p_rate); @@ -1934,8 +1929,6 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent) runtime_put: clk_pm_runtime_put(core); -out: - clk_prepare_unlock(); return ret; } @@ -1959,10 +1952,17 @@ out: */ int clk_set_parent(struct clk *clk, struct clk *parent) { + int ret; + if (!clk) return 0; - return clk_core_set_parent(clk->core, parent ? parent->core : NULL); + clk_prepare_lock(); + ret = clk_core_set_parent_nolock(clk->core, + parent ? parent->core : NULL); + clk_prepare_unlock(); + + return ret; } EXPORT_SYMBOL_GPL(clk_set_parent); @@ -2851,7 +2851,7 @@ void clk_unregister(struct clk *clk) /* Reparent all children to the orphan list. */ hlist_for_each_entry_safe(child, t, &clk->core->children, child_node) - clk_core_set_parent(child, NULL); + clk_core_set_parent_nolock(child, NULL); } hlist_del_init(&clk->core->child_node); -- cgit v1.1 From 9e4d04adeb1a957be212aace20ac5cee6f14013c Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:53 +0100 Subject: clk: add clk_core_set_phase_nolock function Create a core function for set_phase, as it is done for set_rate and set_parent. This rework is done to ease the integration of "protected" clock functionality. Acked-by: Linus Walleij Tested-by: Quentin Schulz Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-4-jbrunet@baylibre.com --- drivers/clk/clk.c | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index e60b2a2..7946a06 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1966,6 +1966,25 @@ int clk_set_parent(struct clk *clk, struct clk *parent) } EXPORT_SYMBOL_GPL(clk_set_parent); +static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) +{ + int ret = -EINVAL; + + lockdep_assert_held(&prepare_lock); + + if (!core) + return 0; + + trace_clk_set_phase(core, degrees); + + if (core->ops->set_phase) + ret = core->ops->set_phase(core->hw, degrees); + + trace_clk_set_phase_complete(core, degrees); + + return ret; +} + /** * clk_set_phase - adjust the phase shift of a clock signal * @clk: clock signal source @@ -1988,7 +2007,7 @@ EXPORT_SYMBOL_GPL(clk_set_parent); */ int clk_set_phase(struct clk *clk, int degrees) { - int ret = -EINVAL; + int ret; if (!clk) return 0; @@ -1999,17 +2018,7 @@ int clk_set_phase(struct clk *clk, int degrees) degrees += 360; clk_prepare_lock(); - - trace_clk_set_phase(clk->core, degrees); - - if (clk->core->ops->set_phase) - ret = clk->core->ops->set_phase(clk->core->hw, degrees); - - trace_clk_set_phase_complete(clk->core, degrees); - - if (!ret) - clk->core->phase = degrees; - + ret = clk_core_set_phase_nolock(clk->core, degrees); clk_prepare_unlock(); return ret; -- cgit v1.1 From 0f6cc2b8e94da5400528c0ba7fd910392ec598a2 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:54 +0100 Subject: clk: rework calls to round and determine rate callbacks Rework the way the callbacks round_rate() and determine_rate() are called. The goal is to do this at a single point and make it easier to add conditions before calling them. Because of this factorization, rate returned by determine_rate() is also checked against the min and max rate values This rework is done to ease the integration of "protected" clock functionality. Acked-by: Linus Walleij Tested-by: Quentin Schulz Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-5-jbrunet@baylibre.com --- drivers/clk/clk.c | 82 +++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 52 insertions(+), 30 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 7946a06..322d9ba 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -905,10 +905,9 @@ static int clk_disable_unused(void) } late_initcall_sync(clk_disable_unused); -static int clk_core_round_rate_nolock(struct clk_core *core, - struct clk_rate_request *req) +static int clk_core_determine_round_nolock(struct clk_core *core, + struct clk_rate_request *req) { - struct clk_core *parent; long rate; lockdep_assert_held(&prepare_lock); @@ -916,15 +915,6 @@ static int clk_core_round_rate_nolock(struct clk_core *core, if (!core) return 0; - parent = core->parent; - if (parent) { - req->best_parent_hw = parent->hw; - req->best_parent_rate = parent->rate; - } else { - req->best_parent_hw = NULL; - req->best_parent_rate = 0; - } - if (core->ops->determine_rate) { return core->ops->determine_rate(core->hw, req); } else if (core->ops->round_rate) { @@ -934,15 +924,58 @@ static int clk_core_round_rate_nolock(struct clk_core *core, return rate; req->rate = rate; - } else if (core->flags & CLK_SET_RATE_PARENT) { - return clk_core_round_rate_nolock(parent, req); } else { - req->rate = core->rate; + return -EINVAL; } return 0; } +static void clk_core_init_rate_req(struct clk_core * const core, + struct clk_rate_request *req) +{ + struct clk_core *parent; + + if (WARN_ON(!core || !req)) + return; + + parent = core->parent; + if (parent) { + req->best_parent_hw = parent->hw; + req->best_parent_rate = parent->rate; + } else { + req->best_parent_hw = NULL; + req->best_parent_rate = 0; + } +} + +static bool clk_core_can_round(struct clk_core * const core) +{ + if (core->ops->determine_rate || core->ops->round_rate) + return true; + + return false; +} + +static int clk_core_round_rate_nolock(struct clk_core *core, + struct clk_rate_request *req) +{ + lockdep_assert_held(&prepare_lock); + + if (!core) + return 0; + + clk_core_init_rate_req(core, req); + + if (clk_core_can_round(core)) + return clk_core_determine_round_nolock(core, req); + else if (core->flags & CLK_SET_RATE_PARENT) + return clk_core_round_rate_nolock(core->parent, req); + + req->rate = core->rate; + return 0; +} + /** * __clk_determine_rate - get the closest rate actually supported by a clock * @hw: determine the rate of this clock @@ -1432,34 +1465,23 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core, clk_core_get_boundaries(core, &min_rate, &max_rate); /* find the closest rate and parent clk/rate */ - if (core->ops->determine_rate) { + if (clk_core_can_round(core)) { struct clk_rate_request req; req.rate = rate; req.min_rate = min_rate; req.max_rate = max_rate; - if (parent) { - req.best_parent_hw = parent->hw; - req.best_parent_rate = parent->rate; - } else { - req.best_parent_hw = NULL; - req.best_parent_rate = 0; - } - ret = core->ops->determine_rate(core->hw, &req); + clk_core_init_rate_req(core, &req); + + ret = clk_core_determine_round_nolock(core, &req); if (ret < 0) return NULL; best_parent_rate = req.best_parent_rate; new_rate = req.rate; parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; - } else if (core->ops->round_rate) { - ret = core->ops->round_rate(core->hw, rate, - &best_parent_rate); - if (ret < 0) - return NULL; - new_rate = ret; if (new_rate < min_rate || new_rate > max_rate) return NULL; } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { -- cgit v1.1 From ca5e089a32c5ffba6c5101fdabdd6dea18041c34 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:55 +0100 Subject: clk: use round rate to bail out early in set_rate The current implementation of clk_core_set_rate_nolock() bails out early if the requested rate is exactly the same as the one set. It should bail out if the request would not result in a rate a change. This is important when the rate is not exactly what is requested, which is fairly common with PLLs. Ex: provider able to give any rate with steps of 100Hz - 1st consumer request 48000Hz and gets it. - 2nd consumer request 48010Hz as well. If we were to perform the usual mechanism, we would get 48000Hz as well. The clock would not change so there is no point performing any checks to make sure the clock can change, we know it won't. This is important to prepare the addition of the clock protection mechanism Acked-by: Linus Walleij Tested-by: Quentin Schulz Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-6-jbrunet@baylibre.com --- drivers/clk/clk.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 322d9ba..bbe90ba 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1658,16 +1658,37 @@ static void clk_change_rate(struct clk_core *core) clk_change_rate(core->new_child); } +static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, + unsigned long req_rate) +{ + int ret; + struct clk_rate_request req; + + lockdep_assert_held(&prepare_lock); + + if (!core) + return 0; + + clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); + req.rate = req_rate; + + ret = clk_core_round_rate_nolock(core, &req); + + return ret ? 0 : req.rate; +} + static int clk_core_set_rate_nolock(struct clk_core *core, unsigned long req_rate) { struct clk_core *top, *fail_clk; - unsigned long rate = req_rate; + unsigned long rate; int ret = 0; if (!core) return 0; + rate = clk_core_req_round_rate_nolock(core, req_rate); + /* bail early if nothing to do */ if (rate == clk_core_get_rate_nolock(core)) return 0; @@ -1676,7 +1697,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core, return -EBUSY; /* calculate new rates and get the topmost changed clock */ - top = clk_calc_new_rates(core, rate); + top = clk_calc_new_rates(core, req_rate); if (!top) return -EINVAL; -- cgit v1.1 From e55a839a7a1c561b7d2fbd9cc50b7d40dd2b3361 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:56 +0100 Subject: clk: add clock protection mechanism to clk core The patch adds clk_core_protect and clk_core_unprotect to the internal CCF API. These functions allow to set a new constraint along the clock tree to prevent any change, even indirect, which may result in rate change or glitch. Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-7-jbrunet@baylibre.com --- drivers/clk/clk.c | 119 ++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 112 insertions(+), 7 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index bbe90ba..f69a217 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -62,6 +62,7 @@ struct clk_core { bool orphan; unsigned int enable_count; unsigned int prepare_count; + unsigned int protect_count; unsigned long min_rate; unsigned long max_rate; unsigned long accuracy; @@ -170,6 +171,11 @@ static void clk_enable_unlock(unsigned long flags) spin_unlock_irqrestore(&enable_lock, flags); } +static bool clk_core_rate_is_protected(struct clk_core *core) +{ + return core->protect_count; +} + static bool clk_core_is_prepared(struct clk_core *core) { bool ret = false; @@ -381,6 +387,11 @@ bool clk_hw_is_prepared(const struct clk_hw *hw) return clk_core_is_prepared(hw->core); } +bool clk_hw_rate_is_protected(const struct clk_hw *hw) +{ + return clk_core_rate_is_protected(hw->core); +} + bool clk_hw_is_enabled(const struct clk_hw *hw) { return clk_core_is_enabled(hw->core); @@ -519,6 +530,68 @@ EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); /*** clk api ***/ +static void clk_core_rate_unprotect(struct clk_core *core) +{ + lockdep_assert_held(&prepare_lock); + + if (!core) + return; + + if (WARN_ON(core->protect_count == 0)) + return; + + if (--core->protect_count > 0) + return; + + clk_core_rate_unprotect(core->parent); +} + +static int clk_core_rate_nuke_protect(struct clk_core *core) +{ + int ret; + + lockdep_assert_held(&prepare_lock); + + if (!core) + return -EINVAL; + + if (core->protect_count == 0) + return 0; + + ret = core->protect_count; + core->protect_count = 1; + clk_core_rate_unprotect(core); + + return ret; +} + +static void clk_core_rate_protect(struct clk_core *core) +{ + lockdep_assert_held(&prepare_lock); + + if (!core) + return; + + if (core->protect_count == 0) + clk_core_rate_protect(core->parent); + + core->protect_count++; +} + +static void clk_core_rate_restore_protect(struct clk_core *core, int count) +{ + lockdep_assert_held(&prepare_lock); + + if (!core) + return; + + if (count == 0) + return; + + clk_core_rate_protect(core); + core->protect_count = count; +} + static void clk_core_unprepare(struct clk_core *core) { lockdep_assert_held(&prepare_lock); @@ -915,7 +988,9 @@ static int clk_core_determine_round_nolock(struct clk_core *core, if (!core) return 0; - if (core->ops->determine_rate) { + if (clk_core_rate_is_protected(core)) { + req->rate = core->rate; + } else if (core->ops->determine_rate) { return core->ops->determine_rate(core->hw, req); } else if (core->ops->round_rate) { rate = core->ops->round_rate(core->hw, req->rate, @@ -1661,7 +1736,7 @@ static void clk_change_rate(struct clk_core *core) static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, unsigned long req_rate) { - int ret; + int ret, cnt; struct clk_rate_request req; lockdep_assert_held(&prepare_lock); @@ -1669,11 +1744,19 @@ static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, if (!core) return 0; + /* simulate what the rate would be if it could be freely set */ + cnt = clk_core_rate_nuke_protect(core); + if (cnt < 0) + return cnt; + clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); req.rate = req_rate; ret = clk_core_round_rate_nolock(core, &req); + /* restore the protection */ + clk_core_rate_restore_protect(core, cnt); + return ret ? 0 : req.rate; } @@ -1693,6 +1776,10 @@ static int clk_core_set_rate_nolock(struct clk_core *core, if (rate == clk_core_get_rate_nolock(core)) return 0; + /* fail on a direct rate set of a protected provider */ + if (clk_core_rate_is_protected(core)) + return -EBUSY; + if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count) return -EBUSY; @@ -1937,6 +2024,9 @@ static int clk_core_set_parent_nolock(struct clk_core *core, if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) return -EBUSY; + if (clk_core_rate_is_protected(core)) + return -EBUSY; + /* try finding the new parent index */ if (parent) { p_index = clk_fetch_parent_index(core, parent); @@ -2018,6 +2108,9 @@ static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) if (!core) return 0; + if (clk_core_rate_is_protected(core)) + return -EBUSY; + trace_clk_set_phase(core, degrees); if (core->ops->set_phase) @@ -2148,11 +2241,12 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, if (!c) return; - seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n", + seq_printf(s, "%*s%-*s %11d %12d %12d %11lu %10lu %-3d\n", level * 3 + 1, "", 30 - level * 3, c->name, - c->enable_count, c->prepare_count, clk_core_get_rate(c), - clk_core_get_accuracy(c), clk_core_get_phase(c)); + c->enable_count, c->prepare_count, c->protect_count, + clk_core_get_rate(c), clk_core_get_accuracy(c), + clk_core_get_phase(c)); } static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, @@ -2174,8 +2268,8 @@ static int clk_summary_show(struct seq_file *s, void *data) struct clk_core *c; struct hlist_head **lists = (struct hlist_head **)s->private; - seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n"); - seq_puts(s, "----------------------------------------------------------------------------------------\n"); + seq_puts(s, " clock enable_cnt prepare_cnt protect_cnt rate accuracy phase\n"); + seq_puts(s, "----------------------------------------------------------------------------------------------------\n"); clk_prepare_lock(); @@ -2210,6 +2304,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) seq_printf(s, "\"%s\": { ", c->name); seq_printf(s, "\"enable_count\": %d,", c->enable_count); seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); + seq_printf(s, "\"protect_count\": %d,", c->protect_count); seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); @@ -2340,6 +2435,11 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) if (!d) goto err_out; + d = debugfs_create_u32("clk_protect_count", S_IRUGO, core->dentry, + (u32 *)&core->protect_count); + if (!d) + goto err_out; + d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry, (u32 *)&core->notifier_count); if (!d) @@ -2911,6 +3011,11 @@ void clk_unregister(struct clk *clk) if (clk->core->prepare_count) pr_warn("%s: unregistering prepared clock: %s\n", __func__, clk->core->name); + + if (clk->core->protect_count) + pr_warn("%s: unregistering protected clock: %s\n", + __func__, clk->core->name); + kref_put(&clk->core->ref, __clk_release); unlock: clk_prepare_unlock(); -- cgit v1.1 From c5ce26edb4fec178232b9cb37f334ec574931514 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:57 +0100 Subject: clk: cosmetic changes to clk_summary debugfs entry clk_summary debugfs entry was already well over the traditional 80 characters per line limit but it grew even larger with the addition of clock protection. clock enable_cnt prepare_cnt protect_cnt rate accuracy phase ---------------------------------------------------------------------------------------------------- wifi32k 1 1 0 32768 0 0 vcpu 0 0 0 2016000000 0 0 xtal 5 5 0 24000000 0 0 This patch reduce the width a bit: enable prepare protect clock count count count rate accuracy phase ---------------------------------------------------------------------------------------- wifi32k 1 1 0 32768 0 0 vcpu 0 0 0 2016000000 0 0 xtal 5 5 0 24000000 0 0 Acked-by: Linus Walleij Tested-by: Quentin Schulz Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-8-jbrunet@baylibre.com --- drivers/clk/clk.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index f69a217..f6fe5e5 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2241,7 +2241,7 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, if (!c) return; - seq_printf(s, "%*s%-*s %11d %12d %12d %11lu %10lu %-3d\n", + seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %-3d\n", level * 3 + 1, "", 30 - level * 3, c->name, c->enable_count, c->prepare_count, c->protect_count, @@ -2268,8 +2268,9 @@ static int clk_summary_show(struct seq_file *s, void *data) struct clk_core *c; struct hlist_head **lists = (struct hlist_head **)s->private; - seq_puts(s, " clock enable_cnt prepare_cnt protect_cnt rate accuracy phase\n"); - seq_puts(s, "----------------------------------------------------------------------------------------------------\n"); + seq_puts(s, " enable prepare protect \n"); + seq_puts(s, " clock count count count rate accuracy phase\n"); + seq_puts(s, "----------------------------------------------------------------------------------------\n"); clk_prepare_lock(); -- cgit v1.1 From 55e9b8b7b806ec3f9a8817e13596682a5981c19c Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:59 +0100 Subject: clk: add clk_rate_exclusive api Using clock rate protection, we can now provide a way for clock consumer to claim exclusive control over the rate of a producer So far, rate change operations have been a "last write wins" affair. This changes allows drivers to explicitly protect against this behavior, if required. Of course, if exclusivity over a producer is claimed more than once, the rate is effectively locked as exclusivity cannot be preempted Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-10-jbrunet@baylibre.com --- drivers/clk/clk.c | 172 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 172 insertions(+) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index f6fe5e5..8e728f3 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -87,6 +87,7 @@ struct clk { const char *con_id; unsigned long min_rate; unsigned long max_rate; + unsigned int exclusive_count; struct hlist_node clks_node; }; @@ -565,6 +566,45 @@ static int clk_core_rate_nuke_protect(struct clk_core *core) return ret; } +/** + * clk_rate_exclusive_put - release exclusivity over clock rate control + * @clk: the clk over which the exclusivity is released + * + * clk_rate_exclusive_put() completes a critical section during which a clock + * consumer cannot tolerate any other consumer making any operation on the + * clock which could result in a rate change or rate glitch. Exclusive clocks + * cannot have their rate changed, either directly or indirectly due to changes + * further up the parent chain of clocks. As a result, clocks up parent chain + * also get under exclusive control of the calling consumer. + * + * If exlusivity is claimed more than once on clock, even by the same consumer, + * the rate effectively gets locked as exclusivity can't be preempted. + * + * Calls to clk_rate_exclusive_put() must be balanced with calls to + * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return + * error status. + */ +void clk_rate_exclusive_put(struct clk *clk) +{ + if (!clk) + return; + + clk_prepare_lock(); + + /* + * if there is something wrong with this consumer protect count, stop + * here before messing with the provider + */ + if (WARN_ON(clk->exclusive_count <= 0)) + goto out; + + clk_core_rate_unprotect(clk->core); + clk->exclusive_count--; +out: + clk_prepare_unlock(); +} +EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); + static void clk_core_rate_protect(struct clk_core *core) { lockdep_assert_held(&prepare_lock); @@ -592,6 +632,38 @@ static void clk_core_rate_restore_protect(struct clk_core *core, int count) core->protect_count = count; } +/** + * clk_rate_exclusive_get - get exclusivity over the clk rate control + * @clk: the clk over which the exclusity of rate control is requested + * + * clk_rate_exlusive_get() begins a critical section during which a clock + * consumer cannot tolerate any other consumer making any operation on the + * clock which could result in a rate change or rate glitch. Exclusive clocks + * cannot have their rate changed, either directly or indirectly due to changes + * further up the parent chain of clocks. As a result, clocks up parent chain + * also get under exclusive control of the calling consumer. + * + * If exlusivity is claimed more than once on clock, even by the same consumer, + * the rate effectively gets locked as exclusivity can't be preempted. + * + * Calls to clk_rate_exclusive_get() should be balanced with calls to + * clk_rate_exclusive_put(). Calls to this function may sleep. + * Returns 0 on success, -EERROR otherwise + */ +int clk_rate_exclusive_get(struct clk *clk) +{ + if (!clk) + return 0; + + clk_prepare_lock(); + clk_core_rate_protect(clk->core); + clk->exclusive_count++; + clk_prepare_unlock(); + + return 0; +} +EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); + static void clk_core_unprepare(struct clk_core *core) { lockdep_assert_held(&prepare_lock); @@ -988,6 +1060,12 @@ static int clk_core_determine_round_nolock(struct clk_core *core, if (!core) return 0; + /* + * At this point, core protection will be disabled if + * - if the provider is not protected at all + * - if the calling consumer is the only one which has exclusivity + * over the provider + */ if (clk_core_rate_is_protected(core)) { req->rate = core->rate; } else if (core->ops->determine_rate) { @@ -1104,10 +1182,17 @@ long clk_round_rate(struct clk *clk, unsigned long rate) clk_prepare_lock(); + if (clk->exclusive_count) + clk_core_rate_unprotect(clk->core); + clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); req.rate = rate; ret = clk_core_round_rate_nolock(clk->core, &req); + + if (clk->exclusive_count) + clk_core_rate_protect(clk->core); + clk_prepare_unlock(); if (ret) @@ -1843,8 +1928,14 @@ int clk_set_rate(struct clk *clk, unsigned long rate) /* prevent racing with updates to the clock topology */ clk_prepare_lock(); + if (clk->exclusive_count) + clk_core_rate_unprotect(clk->core); + ret = clk_core_set_rate_nolock(clk->core, rate); + if (clk->exclusive_count) + clk_core_rate_protect(clk->core); + clk_prepare_unlock(); return ret; @@ -1852,6 +1943,53 @@ int clk_set_rate(struct clk *clk, unsigned long rate) EXPORT_SYMBOL_GPL(clk_set_rate); /** + * clk_set_rate_exclusive - specify a new rate get exclusive control + * @clk: the clk whose rate is being changed + * @rate: the new rate for clk + * + * This is a combination of clk_set_rate() and clk_rate_exclusive_get() + * within a critical section + * + * This can be used initially to ensure that at least 1 consumer is + * statisfied when several consumers are competing for exclusivity over the + * same clock provider. + * + * The exclusivity is not applied if setting the rate failed. + * + * Calls to clk_rate_exclusive_get() should be balanced with calls to + * clk_rate_exclusive_put(). + * + * Returns 0 on success, -EERROR otherwise. + */ +int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) +{ + int ret; + + if (!clk) + return 0; + + /* prevent racing with updates to the clock topology */ + clk_prepare_lock(); + + /* + * The temporary protection removal is not here, on purpose + * This function is meant to be used instead of clk_rate_protect, + * so before the consumer code path protect the clock provider + */ + + ret = clk_core_set_rate_nolock(clk->core, rate); + if (!ret) { + clk_core_rate_protect(clk->core); + clk->exclusive_count++; + } + + clk_prepare_unlock(); + + return ret; +} +EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); + +/** * clk_set_rate_range - set a rate range for a clock source * @clk: clock source * @min: desired minimum clock rate in Hz, inclusive @@ -1875,12 +2013,18 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) clk_prepare_lock(); + if (clk->exclusive_count) + clk_core_rate_unprotect(clk->core); + if (min != clk->min_rate || max != clk->max_rate) { clk->min_rate = min; clk->max_rate = max; ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate); } + if (clk->exclusive_count) + clk_core_rate_protect(clk->core); + clk_prepare_unlock(); return ret; @@ -2091,8 +2235,16 @@ int clk_set_parent(struct clk *clk, struct clk *parent) return 0; clk_prepare_lock(); + + if (clk->exclusive_count) + clk_core_rate_unprotect(clk->core); + ret = clk_core_set_parent_nolock(clk->core, parent ? parent->core : NULL); + + if (clk->exclusive_count) + clk_core_rate_protect(clk->core); + clk_prepare_unlock(); return ret; @@ -2154,7 +2306,15 @@ int clk_set_phase(struct clk *clk, int degrees) degrees += 360; clk_prepare_lock(); + + if (clk->exclusive_count) + clk_core_rate_unprotect(clk->core); + ret = clk_core_set_phase_nolock(clk->core, degrees); + + if (clk->exclusive_count) + clk_core_rate_protect(clk->core); + clk_prepare_unlock(); return ret; @@ -3175,6 +3335,18 @@ void __clk_put(struct clk *clk) clk_prepare_lock(); + /* + * Before calling clk_put, all calls to clk_rate_exclusive_get() from a + * given user should be balanced with calls to clk_rate_exclusive_put() + * and by that same consumer + */ + if (WARN_ON(clk->exclusive_count)) { + /* We voiced our concern, let's sanitize the situation */ + clk->core->protect_count -= (clk->exclusive_count - 1); + clk_core_rate_unprotect(clk->core); + clk->exclusive_count = 0; + } + hlist_del(&clk->clks_node); if (clk->min_rate > clk->core->req_rate || clk->max_rate < clk->core->req_rate) -- cgit v1.1 From 6562fbcf3ad5ffa56f1fc79bb1afae909cf3627b Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:52:00 +0100 Subject: clk: fix set_rate_range when current rate is out of range Calling clk_core_set_rate() with core->req_rate is basically a no-op because of the early bail-out mechanism. This may leave the clock in inconsistent state if the rate is out the requested range. Calling clk_core_set_rate() with the closest rate limit could solve the problem but: - The underlying determine_rate() callback needs to account for this corner case (rounding within the range, if possible) - if only round_rate() is available, we rely on luck unfortunately. Fixes: 1c8e600440c7 ("clk: Add rate constraints to clocks") Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-11-jbrunet@baylibre.com --- drivers/clk/clk.c | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 8e728f3..efbc802 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2000,6 +2000,7 @@ EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) { int ret = 0; + unsigned long old_min, old_max, rate; if (!clk) return 0; @@ -2016,10 +2017,38 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) if (clk->exclusive_count) clk_core_rate_unprotect(clk->core); - if (min != clk->min_rate || max != clk->max_rate) { - clk->min_rate = min; - clk->max_rate = max; - ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate); + /* Save the current values in case we need to rollback the change */ + old_min = clk->min_rate; + old_max = clk->max_rate; + clk->min_rate = min; + clk->max_rate = max; + + rate = clk_core_get_rate_nolock(clk->core); + if (rate < min || rate > max) { + /* + * FIXME: + * We are in bit of trouble here, current rate is outside the + * the requested range. We are going try to request appropriate + * range boundary but there is a catch. It may fail for the + * usual reason (clock broken, clock protected, etc) but also + * because: + * - round_rate() was not favorable and fell on the wrong + * side of the boundary + * - the determine_rate() callback does not really check for + * this corner case when determining the rate + */ + + if (rate < min) + rate = min; + else + rate = max; + + ret = clk_core_set_rate_nolock(clk->core, rate); + if (ret) { + /* rollback the changes */ + clk->min_rate = old_min; + clk->max_rate = old_max; + } } if (clk->exclusive_count) -- cgit v1.1