From 29fd2a34ef8d863e48183bd473ba57c8d7839e25 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Tue, 19 Dec 2017 09:33:29 +0100 Subject: clk: check ops pointer on clock register Nothing really prevents a provider from (trying to) register a clock without providing the clock ops structure. We do check the individual fields before using them, but not the structure pointer itself. This may have the usual nasty consequences when the pointer is dereferenced, most likely when checking one the field during the initialization. This is fixed by returning an error on clock register if the ops pointer is NULL. Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171219083329.24746-1-jbrunet@baylibre.com --- drivers/clk/clk.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 647d056..e3e98ac 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2678,7 +2678,13 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw) ret = -ENOMEM; goto fail_name; } + + if (WARN_ON(!hw->init->ops)) { + ret = -EINVAL; + goto fail_ops; + } core->ops = hw->init->ops; + if (dev && pm_runtime_enabled(dev)) core->dev = dev; if (dev && dev->driver) @@ -2740,6 +2746,7 @@ fail_parent_names_copy: kfree_const(core->parent_names[i]); kfree(core->parent_names); fail_parent_names: +fail_ops: kfree_const(core->name); fail_name: kfree(core); -- cgit v1.1 From 56e7ceddbeb812a6f79f37523eb4e6e4620f4416 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:51 +0100 Subject: clk: fix incorrect usage of ENOSYS ENOSYS is special and should only be used for incorrect syscall number. It does not seem to be the case here. Reported by checkpatch.pl while working on clock protection. Acked-by: Linus Walleij Tested-by: Quentin Schulz Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-2-jbrunet@baylibre.com --- drivers/clk/clk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 647d056..5fe9e63 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1888,7 +1888,7 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent) /* verify ops for for multi-parent clks */ if ((core->num_parents > 1) && (!core->ops->set_parent)) { - ret = -ENOSYS; + ret = -EPERM; goto out; } -- cgit v1.1 From 91baa9ffe6d4e8191ef55b4ece3e556294141515 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:52 +0100 Subject: clk: take the prepare lock out of clk_core_set_parent Rework set_parent core function so it can be called when the prepare lock is already held by the caller. This rework is done to ease the integration of the "protected" clock functionality. Acked-by: Linus Walleij Tested-by: Quentin Schulz Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-3-jbrunet@baylibre.com --- drivers/clk/clk.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 5fe9e63..e60b2a2 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1871,32 +1871,28 @@ bool clk_has_parent(struct clk *clk, struct clk *parent) } EXPORT_SYMBOL_GPL(clk_has_parent); -static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent) +static int clk_core_set_parent_nolock(struct clk_core *core, + struct clk_core *parent) { int ret = 0; int p_index = 0; unsigned long p_rate = 0; + lockdep_assert_held(&prepare_lock); + if (!core) return 0; - /* prevent racing with updates to the clock topology */ - clk_prepare_lock(); - if (core->parent == parent) - goto out; + return 0; /* verify ops for for multi-parent clks */ - if ((core->num_parents > 1) && (!core->ops->set_parent)) { - ret = -EPERM; - goto out; - } + if (core->num_parents > 1 && !core->ops->set_parent) + return -EPERM; /* check that we are allowed to re-parent if the clock is in use */ - if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { - ret = -EBUSY; - goto out; - } + if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) + return -EBUSY; /* try finding the new parent index */ if (parent) { @@ -1904,15 +1900,14 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent) if (p_index < 0) { pr_debug("%s: clk %s can not be parent of clk %s\n", __func__, parent->name, core->name); - ret = p_index; - goto out; + return p_index; } p_rate = parent->rate; } ret = clk_pm_runtime_get(core); if (ret) - goto out; + return ret; /* propagate PRE_RATE_CHANGE notifications */ ret = __clk_speculate_rates(core, p_rate); @@ -1934,8 +1929,6 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent) runtime_put: clk_pm_runtime_put(core); -out: - clk_prepare_unlock(); return ret; } @@ -1959,10 +1952,17 @@ out: */ int clk_set_parent(struct clk *clk, struct clk *parent) { + int ret; + if (!clk) return 0; - return clk_core_set_parent(clk->core, parent ? parent->core : NULL); + clk_prepare_lock(); + ret = clk_core_set_parent_nolock(clk->core, + parent ? parent->core : NULL); + clk_prepare_unlock(); + + return ret; } EXPORT_SYMBOL_GPL(clk_set_parent); @@ -2851,7 +2851,7 @@ void clk_unregister(struct clk *clk) /* Reparent all children to the orphan list. */ hlist_for_each_entry_safe(child, t, &clk->core->children, child_node) - clk_core_set_parent(child, NULL); + clk_core_set_parent_nolock(child, NULL); } hlist_del_init(&clk->core->child_node); -- cgit v1.1 From 9e4d04adeb1a957be212aace20ac5cee6f14013c Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:53 +0100 Subject: clk: add clk_core_set_phase_nolock function Create a core function for set_phase, as it is done for set_rate and set_parent. This rework is done to ease the integration of "protected" clock functionality. Acked-by: Linus Walleij Tested-by: Quentin Schulz Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-4-jbrunet@baylibre.com --- drivers/clk/clk.c | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index e60b2a2..7946a06 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1966,6 +1966,25 @@ int clk_set_parent(struct clk *clk, struct clk *parent) } EXPORT_SYMBOL_GPL(clk_set_parent); +static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) +{ + int ret = -EINVAL; + + lockdep_assert_held(&prepare_lock); + + if (!core) + return 0; + + trace_clk_set_phase(core, degrees); + + if (core->ops->set_phase) + ret = core->ops->set_phase(core->hw, degrees); + + trace_clk_set_phase_complete(core, degrees); + + return ret; +} + /** * clk_set_phase - adjust the phase shift of a clock signal * @clk: clock signal source @@ -1988,7 +2007,7 @@ EXPORT_SYMBOL_GPL(clk_set_parent); */ int clk_set_phase(struct clk *clk, int degrees) { - int ret = -EINVAL; + int ret; if (!clk) return 0; @@ -1999,17 +2018,7 @@ int clk_set_phase(struct clk *clk, int degrees) degrees += 360; clk_prepare_lock(); - - trace_clk_set_phase(clk->core, degrees); - - if (clk->core->ops->set_phase) - ret = clk->core->ops->set_phase(clk->core->hw, degrees); - - trace_clk_set_phase_complete(clk->core, degrees); - - if (!ret) - clk->core->phase = degrees; - + ret = clk_core_set_phase_nolock(clk->core, degrees); clk_prepare_unlock(); return ret; -- cgit v1.1 From 0f6cc2b8e94da5400528c0ba7fd910392ec598a2 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:54 +0100 Subject: clk: rework calls to round and determine rate callbacks Rework the way the callbacks round_rate() and determine_rate() are called. The goal is to do this at a single point and make it easier to add conditions before calling them. Because of this factorization, rate returned by determine_rate() is also checked against the min and max rate values This rework is done to ease the integration of "protected" clock functionality. Acked-by: Linus Walleij Tested-by: Quentin Schulz Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-5-jbrunet@baylibre.com --- drivers/clk/clk.c | 82 +++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 52 insertions(+), 30 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 7946a06..322d9ba 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -905,10 +905,9 @@ static int clk_disable_unused(void) } late_initcall_sync(clk_disable_unused); -static int clk_core_round_rate_nolock(struct clk_core *core, - struct clk_rate_request *req) +static int clk_core_determine_round_nolock(struct clk_core *core, + struct clk_rate_request *req) { - struct clk_core *parent; long rate; lockdep_assert_held(&prepare_lock); @@ -916,15 +915,6 @@ static int clk_core_round_rate_nolock(struct clk_core *core, if (!core) return 0; - parent = core->parent; - if (parent) { - req->best_parent_hw = parent->hw; - req->best_parent_rate = parent->rate; - } else { - req->best_parent_hw = NULL; - req->best_parent_rate = 0; - } - if (core->ops->determine_rate) { return core->ops->determine_rate(core->hw, req); } else if (core->ops->round_rate) { @@ -934,15 +924,58 @@ static int clk_core_round_rate_nolock(struct clk_core *core, return rate; req->rate = rate; - } else if (core->flags & CLK_SET_RATE_PARENT) { - return clk_core_round_rate_nolock(parent, req); } else { - req->rate = core->rate; + return -EINVAL; } return 0; } +static void clk_core_init_rate_req(struct clk_core * const core, + struct clk_rate_request *req) +{ + struct clk_core *parent; + + if (WARN_ON(!core || !req)) + return; + + parent = core->parent; + if (parent) { + req->best_parent_hw = parent->hw; + req->best_parent_rate = parent->rate; + } else { + req->best_parent_hw = NULL; + req->best_parent_rate = 0; + } +} + +static bool clk_core_can_round(struct clk_core * const core) +{ + if (core->ops->determine_rate || core->ops->round_rate) + return true; + + return false; +} + +static int clk_core_round_rate_nolock(struct clk_core *core, + struct clk_rate_request *req) +{ + lockdep_assert_held(&prepare_lock); + + if (!core) + return 0; + + clk_core_init_rate_req(core, req); + + if (clk_core_can_round(core)) + return clk_core_determine_round_nolock(core, req); + else if (core->flags & CLK_SET_RATE_PARENT) + return clk_core_round_rate_nolock(core->parent, req); + + req->rate = core->rate; + return 0; +} + /** * __clk_determine_rate - get the closest rate actually supported by a clock * @hw: determine the rate of this clock @@ -1432,34 +1465,23 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core, clk_core_get_boundaries(core, &min_rate, &max_rate); /* find the closest rate and parent clk/rate */ - if (core->ops->determine_rate) { + if (clk_core_can_round(core)) { struct clk_rate_request req; req.rate = rate; req.min_rate = min_rate; req.max_rate = max_rate; - if (parent) { - req.best_parent_hw = parent->hw; - req.best_parent_rate = parent->rate; - } else { - req.best_parent_hw = NULL; - req.best_parent_rate = 0; - } - ret = core->ops->determine_rate(core->hw, &req); + clk_core_init_rate_req(core, &req); + + ret = clk_core_determine_round_nolock(core, &req); if (ret < 0) return NULL; best_parent_rate = req.best_parent_rate; new_rate = req.rate; parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; - } else if (core->ops->round_rate) { - ret = core->ops->round_rate(core->hw, rate, - &best_parent_rate); - if (ret < 0) - return NULL; - new_rate = ret; if (new_rate < min_rate || new_rate > max_rate) return NULL; } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { -- cgit v1.1 From ca5e089a32c5ffba6c5101fdabdd6dea18041c34 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:55 +0100 Subject: clk: use round rate to bail out early in set_rate The current implementation of clk_core_set_rate_nolock() bails out early if the requested rate is exactly the same as the one set. It should bail out if the request would not result in a rate a change. This is important when the rate is not exactly what is requested, which is fairly common with PLLs. Ex: provider able to give any rate with steps of 100Hz - 1st consumer request 48000Hz and gets it. - 2nd consumer request 48010Hz as well. If we were to perform the usual mechanism, we would get 48000Hz as well. The clock would not change so there is no point performing any checks to make sure the clock can change, we know it won't. This is important to prepare the addition of the clock protection mechanism Acked-by: Linus Walleij Tested-by: Quentin Schulz Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-6-jbrunet@baylibre.com --- drivers/clk/clk.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 322d9ba..bbe90ba 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1658,16 +1658,37 @@ static void clk_change_rate(struct clk_core *core) clk_change_rate(core->new_child); } +static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, + unsigned long req_rate) +{ + int ret; + struct clk_rate_request req; + + lockdep_assert_held(&prepare_lock); + + if (!core) + return 0; + + clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); + req.rate = req_rate; + + ret = clk_core_round_rate_nolock(core, &req); + + return ret ? 0 : req.rate; +} + static int clk_core_set_rate_nolock(struct clk_core *core, unsigned long req_rate) { struct clk_core *top, *fail_clk; - unsigned long rate = req_rate; + unsigned long rate; int ret = 0; if (!core) return 0; + rate = clk_core_req_round_rate_nolock(core, req_rate); + /* bail early if nothing to do */ if (rate == clk_core_get_rate_nolock(core)) return 0; @@ -1676,7 +1697,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core, return -EBUSY; /* calculate new rates and get the topmost changed clock */ - top = clk_calc_new_rates(core, rate); + top = clk_calc_new_rates(core, req_rate); if (!top) return -EINVAL; -- cgit v1.1 From e55a839a7a1c561b7d2fbd9cc50b7d40dd2b3361 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:56 +0100 Subject: clk: add clock protection mechanism to clk core The patch adds clk_core_protect and clk_core_unprotect to the internal CCF API. These functions allow to set a new constraint along the clock tree to prevent any change, even indirect, which may result in rate change or glitch. Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-7-jbrunet@baylibre.com --- drivers/clk/clk.c | 119 ++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 112 insertions(+), 7 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index bbe90ba..f69a217 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -62,6 +62,7 @@ struct clk_core { bool orphan; unsigned int enable_count; unsigned int prepare_count; + unsigned int protect_count; unsigned long min_rate; unsigned long max_rate; unsigned long accuracy; @@ -170,6 +171,11 @@ static void clk_enable_unlock(unsigned long flags) spin_unlock_irqrestore(&enable_lock, flags); } +static bool clk_core_rate_is_protected(struct clk_core *core) +{ + return core->protect_count; +} + static bool clk_core_is_prepared(struct clk_core *core) { bool ret = false; @@ -381,6 +387,11 @@ bool clk_hw_is_prepared(const struct clk_hw *hw) return clk_core_is_prepared(hw->core); } +bool clk_hw_rate_is_protected(const struct clk_hw *hw) +{ + return clk_core_rate_is_protected(hw->core); +} + bool clk_hw_is_enabled(const struct clk_hw *hw) { return clk_core_is_enabled(hw->core); @@ -519,6 +530,68 @@ EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); /*** clk api ***/ +static void clk_core_rate_unprotect(struct clk_core *core) +{ + lockdep_assert_held(&prepare_lock); + + if (!core) + return; + + if (WARN_ON(core->protect_count == 0)) + return; + + if (--core->protect_count > 0) + return; + + clk_core_rate_unprotect(core->parent); +} + +static int clk_core_rate_nuke_protect(struct clk_core *core) +{ + int ret; + + lockdep_assert_held(&prepare_lock); + + if (!core) + return -EINVAL; + + if (core->protect_count == 0) + return 0; + + ret = core->protect_count; + core->protect_count = 1; + clk_core_rate_unprotect(core); + + return ret; +} + +static void clk_core_rate_protect(struct clk_core *core) +{ + lockdep_assert_held(&prepare_lock); + + if (!core) + return; + + if (core->protect_count == 0) + clk_core_rate_protect(core->parent); + + core->protect_count++; +} + +static void clk_core_rate_restore_protect(struct clk_core *core, int count) +{ + lockdep_assert_held(&prepare_lock); + + if (!core) + return; + + if (count == 0) + return; + + clk_core_rate_protect(core); + core->protect_count = count; +} + static void clk_core_unprepare(struct clk_core *core) { lockdep_assert_held(&prepare_lock); @@ -915,7 +988,9 @@ static int clk_core_determine_round_nolock(struct clk_core *core, if (!core) return 0; - if (core->ops->determine_rate) { + if (clk_core_rate_is_protected(core)) { + req->rate = core->rate; + } else if (core->ops->determine_rate) { return core->ops->determine_rate(core->hw, req); } else if (core->ops->round_rate) { rate = core->ops->round_rate(core->hw, req->rate, @@ -1661,7 +1736,7 @@ static void clk_change_rate(struct clk_core *core) static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, unsigned long req_rate) { - int ret; + int ret, cnt; struct clk_rate_request req; lockdep_assert_held(&prepare_lock); @@ -1669,11 +1744,19 @@ static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, if (!core) return 0; + /* simulate what the rate would be if it could be freely set */ + cnt = clk_core_rate_nuke_protect(core); + if (cnt < 0) + return cnt; + clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); req.rate = req_rate; ret = clk_core_round_rate_nolock(core, &req); + /* restore the protection */ + clk_core_rate_restore_protect(core, cnt); + return ret ? 0 : req.rate; } @@ -1693,6 +1776,10 @@ static int clk_core_set_rate_nolock(struct clk_core *core, if (rate == clk_core_get_rate_nolock(core)) return 0; + /* fail on a direct rate set of a protected provider */ + if (clk_core_rate_is_protected(core)) + return -EBUSY; + if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count) return -EBUSY; @@ -1937,6 +2024,9 @@ static int clk_core_set_parent_nolock(struct clk_core *core, if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) return -EBUSY; + if (clk_core_rate_is_protected(core)) + return -EBUSY; + /* try finding the new parent index */ if (parent) { p_index = clk_fetch_parent_index(core, parent); @@ -2018,6 +2108,9 @@ static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) if (!core) return 0; + if (clk_core_rate_is_protected(core)) + return -EBUSY; + trace_clk_set_phase(core, degrees); if (core->ops->set_phase) @@ -2148,11 +2241,12 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, if (!c) return; - seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n", + seq_printf(s, "%*s%-*s %11d %12d %12d %11lu %10lu %-3d\n", level * 3 + 1, "", 30 - level * 3, c->name, - c->enable_count, c->prepare_count, clk_core_get_rate(c), - clk_core_get_accuracy(c), clk_core_get_phase(c)); + c->enable_count, c->prepare_count, c->protect_count, + clk_core_get_rate(c), clk_core_get_accuracy(c), + clk_core_get_phase(c)); } static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, @@ -2174,8 +2268,8 @@ static int clk_summary_show(struct seq_file *s, void *data) struct clk_core *c; struct hlist_head **lists = (struct hlist_head **)s->private; - seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n"); - seq_puts(s, "----------------------------------------------------------------------------------------\n"); + seq_puts(s, " clock enable_cnt prepare_cnt protect_cnt rate accuracy phase\n"); + seq_puts(s, "----------------------------------------------------------------------------------------------------\n"); clk_prepare_lock(); @@ -2210,6 +2304,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) seq_printf(s, "\"%s\": { ", c->name); seq_printf(s, "\"enable_count\": %d,", c->enable_count); seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); + seq_printf(s, "\"protect_count\": %d,", c->protect_count); seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); @@ -2340,6 +2435,11 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) if (!d) goto err_out; + d = debugfs_create_u32("clk_protect_count", S_IRUGO, core->dentry, + (u32 *)&core->protect_count); + if (!d) + goto err_out; + d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry, (u32 *)&core->notifier_count); if (!d) @@ -2911,6 +3011,11 @@ void clk_unregister(struct clk *clk) if (clk->core->prepare_count) pr_warn("%s: unregistering prepared clock: %s\n", __func__, clk->core->name); + + if (clk->core->protect_count) + pr_warn("%s: unregistering protected clock: %s\n", + __func__, clk->core->name); + kref_put(&clk->core->ref, __clk_release); unlock: clk_prepare_unlock(); -- cgit v1.1 From c5ce26edb4fec178232b9cb37f334ec574931514 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:57 +0100 Subject: clk: cosmetic changes to clk_summary debugfs entry clk_summary debugfs entry was already well over the traditional 80 characters per line limit but it grew even larger with the addition of clock protection. clock enable_cnt prepare_cnt protect_cnt rate accuracy phase ---------------------------------------------------------------------------------------------------- wifi32k 1 1 0 32768 0 0 vcpu 0 0 0 2016000000 0 0 xtal 5 5 0 24000000 0 0 This patch reduce the width a bit: enable prepare protect clock count count count rate accuracy phase ---------------------------------------------------------------------------------------- wifi32k 1 1 0 32768 0 0 vcpu 0 0 0 2016000000 0 0 xtal 5 5 0 24000000 0 0 Acked-by: Linus Walleij Tested-by: Quentin Schulz Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-8-jbrunet@baylibre.com --- drivers/clk/clk.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index f69a217..f6fe5e5 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2241,7 +2241,7 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, if (!c) return; - seq_printf(s, "%*s%-*s %11d %12d %12d %11lu %10lu %-3d\n", + seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %-3d\n", level * 3 + 1, "", 30 - level * 3, c->name, c->enable_count, c->prepare_count, c->protect_count, @@ -2268,8 +2268,9 @@ static int clk_summary_show(struct seq_file *s, void *data) struct clk_core *c; struct hlist_head **lists = (struct hlist_head **)s->private; - seq_puts(s, " clock enable_cnt prepare_cnt protect_cnt rate accuracy phase\n"); - seq_puts(s, "----------------------------------------------------------------------------------------------------\n"); + seq_puts(s, " enable prepare protect \n"); + seq_puts(s, " clock count count count rate accuracy phase\n"); + seq_puts(s, "----------------------------------------------------------------------------------------\n"); clk_prepare_lock(); -- cgit v1.1 From 55e9b8b7b806ec3f9a8817e13596682a5981c19c Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:51:59 +0100 Subject: clk: add clk_rate_exclusive api Using clock rate protection, we can now provide a way for clock consumer to claim exclusive control over the rate of a producer So far, rate change operations have been a "last write wins" affair. This changes allows drivers to explicitly protect against this behavior, if required. Of course, if exclusivity over a producer is claimed more than once, the rate is effectively locked as exclusivity cannot be preempted Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-10-jbrunet@baylibre.com --- drivers/clk/clk.c | 172 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 172 insertions(+) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index f6fe5e5..8e728f3 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -87,6 +87,7 @@ struct clk { const char *con_id; unsigned long min_rate; unsigned long max_rate; + unsigned int exclusive_count; struct hlist_node clks_node; }; @@ -565,6 +566,45 @@ static int clk_core_rate_nuke_protect(struct clk_core *core) return ret; } +/** + * clk_rate_exclusive_put - release exclusivity over clock rate control + * @clk: the clk over which the exclusivity is released + * + * clk_rate_exclusive_put() completes a critical section during which a clock + * consumer cannot tolerate any other consumer making any operation on the + * clock which could result in a rate change or rate glitch. Exclusive clocks + * cannot have their rate changed, either directly or indirectly due to changes + * further up the parent chain of clocks. As a result, clocks up parent chain + * also get under exclusive control of the calling consumer. + * + * If exlusivity is claimed more than once on clock, even by the same consumer, + * the rate effectively gets locked as exclusivity can't be preempted. + * + * Calls to clk_rate_exclusive_put() must be balanced with calls to + * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return + * error status. + */ +void clk_rate_exclusive_put(struct clk *clk) +{ + if (!clk) + return; + + clk_prepare_lock(); + + /* + * if there is something wrong with this consumer protect count, stop + * here before messing with the provider + */ + if (WARN_ON(clk->exclusive_count <= 0)) + goto out; + + clk_core_rate_unprotect(clk->core); + clk->exclusive_count--; +out: + clk_prepare_unlock(); +} +EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); + static void clk_core_rate_protect(struct clk_core *core) { lockdep_assert_held(&prepare_lock); @@ -592,6 +632,38 @@ static void clk_core_rate_restore_protect(struct clk_core *core, int count) core->protect_count = count; } +/** + * clk_rate_exclusive_get - get exclusivity over the clk rate control + * @clk: the clk over which the exclusity of rate control is requested + * + * clk_rate_exlusive_get() begins a critical section during which a clock + * consumer cannot tolerate any other consumer making any operation on the + * clock which could result in a rate change or rate glitch. Exclusive clocks + * cannot have their rate changed, either directly or indirectly due to changes + * further up the parent chain of clocks. As a result, clocks up parent chain + * also get under exclusive control of the calling consumer. + * + * If exlusivity is claimed more than once on clock, even by the same consumer, + * the rate effectively gets locked as exclusivity can't be preempted. + * + * Calls to clk_rate_exclusive_get() should be balanced with calls to + * clk_rate_exclusive_put(). Calls to this function may sleep. + * Returns 0 on success, -EERROR otherwise + */ +int clk_rate_exclusive_get(struct clk *clk) +{ + if (!clk) + return 0; + + clk_prepare_lock(); + clk_core_rate_protect(clk->core); + clk->exclusive_count++; + clk_prepare_unlock(); + + return 0; +} +EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); + static void clk_core_unprepare(struct clk_core *core) { lockdep_assert_held(&prepare_lock); @@ -988,6 +1060,12 @@ static int clk_core_determine_round_nolock(struct clk_core *core, if (!core) return 0; + /* + * At this point, core protection will be disabled if + * - if the provider is not protected at all + * - if the calling consumer is the only one which has exclusivity + * over the provider + */ if (clk_core_rate_is_protected(core)) { req->rate = core->rate; } else if (core->ops->determine_rate) { @@ -1104,10 +1182,17 @@ long clk_round_rate(struct clk *clk, unsigned long rate) clk_prepare_lock(); + if (clk->exclusive_count) + clk_core_rate_unprotect(clk->core); + clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); req.rate = rate; ret = clk_core_round_rate_nolock(clk->core, &req); + + if (clk->exclusive_count) + clk_core_rate_protect(clk->core); + clk_prepare_unlock(); if (ret) @@ -1843,8 +1928,14 @@ int clk_set_rate(struct clk *clk, unsigned long rate) /* prevent racing with updates to the clock topology */ clk_prepare_lock(); + if (clk->exclusive_count) + clk_core_rate_unprotect(clk->core); + ret = clk_core_set_rate_nolock(clk->core, rate); + if (clk->exclusive_count) + clk_core_rate_protect(clk->core); + clk_prepare_unlock(); return ret; @@ -1852,6 +1943,53 @@ int clk_set_rate(struct clk *clk, unsigned long rate) EXPORT_SYMBOL_GPL(clk_set_rate); /** + * clk_set_rate_exclusive - specify a new rate get exclusive control + * @clk: the clk whose rate is being changed + * @rate: the new rate for clk + * + * This is a combination of clk_set_rate() and clk_rate_exclusive_get() + * within a critical section + * + * This can be used initially to ensure that at least 1 consumer is + * statisfied when several consumers are competing for exclusivity over the + * same clock provider. + * + * The exclusivity is not applied if setting the rate failed. + * + * Calls to clk_rate_exclusive_get() should be balanced with calls to + * clk_rate_exclusive_put(). + * + * Returns 0 on success, -EERROR otherwise. + */ +int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) +{ + int ret; + + if (!clk) + return 0; + + /* prevent racing with updates to the clock topology */ + clk_prepare_lock(); + + /* + * The temporary protection removal is not here, on purpose + * This function is meant to be used instead of clk_rate_protect, + * so before the consumer code path protect the clock provider + */ + + ret = clk_core_set_rate_nolock(clk->core, rate); + if (!ret) { + clk_core_rate_protect(clk->core); + clk->exclusive_count++; + } + + clk_prepare_unlock(); + + return ret; +} +EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); + +/** * clk_set_rate_range - set a rate range for a clock source * @clk: clock source * @min: desired minimum clock rate in Hz, inclusive @@ -1875,12 +2013,18 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) clk_prepare_lock(); + if (clk->exclusive_count) + clk_core_rate_unprotect(clk->core); + if (min != clk->min_rate || max != clk->max_rate) { clk->min_rate = min; clk->max_rate = max; ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate); } + if (clk->exclusive_count) + clk_core_rate_protect(clk->core); + clk_prepare_unlock(); return ret; @@ -2091,8 +2235,16 @@ int clk_set_parent(struct clk *clk, struct clk *parent) return 0; clk_prepare_lock(); + + if (clk->exclusive_count) + clk_core_rate_unprotect(clk->core); + ret = clk_core_set_parent_nolock(clk->core, parent ? parent->core : NULL); + + if (clk->exclusive_count) + clk_core_rate_protect(clk->core); + clk_prepare_unlock(); return ret; @@ -2154,7 +2306,15 @@ int clk_set_phase(struct clk *clk, int degrees) degrees += 360; clk_prepare_lock(); + + if (clk->exclusive_count) + clk_core_rate_unprotect(clk->core); + ret = clk_core_set_phase_nolock(clk->core, degrees); + + if (clk->exclusive_count) + clk_core_rate_protect(clk->core); + clk_prepare_unlock(); return ret; @@ -3175,6 +3335,18 @@ void __clk_put(struct clk *clk) clk_prepare_lock(); + /* + * Before calling clk_put, all calls to clk_rate_exclusive_get() from a + * given user should be balanced with calls to clk_rate_exclusive_put() + * and by that same consumer + */ + if (WARN_ON(clk->exclusive_count)) { + /* We voiced our concern, let's sanitize the situation */ + clk->core->protect_count -= (clk->exclusive_count - 1); + clk_core_rate_unprotect(clk->core); + clk->exclusive_count = 0; + } + hlist_del(&clk->clks_node); if (clk->min_rate > clk->core->req_rate || clk->max_rate < clk->core->req_rate) -- cgit v1.1 From 6562fbcf3ad5ffa56f1fc79bb1afae909cf3627b Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 1 Dec 2017 22:52:00 +0100 Subject: clk: fix set_rate_range when current rate is out of range Calling clk_core_set_rate() with core->req_rate is basically a no-op because of the early bail-out mechanism. This may leave the clock in inconsistent state if the rate is out the requested range. Calling clk_core_set_rate() with the closest rate limit could solve the problem but: - The underlying determine_rate() callback needs to account for this corner case (rounding within the range, if possible) - if only round_rate() is available, we rely on luck unfortunately. Fixes: 1c8e600440c7 ("clk: Add rate constraints to clocks") Tested-by: Maxime Ripard Acked-by: Michael Turquette Signed-off-by: Jerome Brunet Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171201215200.23523-11-jbrunet@baylibre.com --- drivers/clk/clk.c | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 8e728f3..efbc802 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2000,6 +2000,7 @@ EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) { int ret = 0; + unsigned long old_min, old_max, rate; if (!clk) return 0; @@ -2016,10 +2017,38 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) if (clk->exclusive_count) clk_core_rate_unprotect(clk->core); - if (min != clk->min_rate || max != clk->max_rate) { - clk->min_rate = min; - clk->max_rate = max; - ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate); + /* Save the current values in case we need to rollback the change */ + old_min = clk->min_rate; + old_max = clk->max_rate; + clk->min_rate = min; + clk->max_rate = max; + + rate = clk_core_get_rate_nolock(clk->core); + if (rate < min || rate > max) { + /* + * FIXME: + * We are in bit of trouble here, current rate is outside the + * the requested range. We are going try to request appropriate + * range boundary but there is a catch. It may fail for the + * usual reason (clock broken, clock protected, etc) but also + * because: + * - round_rate() was not favorable and fell on the wrong + * side of the boundary + * - the determine_rate() callback does not really check for + * this corner case when determining the rate + */ + + if (rate < min) + rate = min; + else + rate = max; + + ret = clk_core_set_rate_nolock(clk->core, rate); + if (ret) { + /* rollback the changes */ + clk->min_rate = old_min; + clk->max_rate = old_max; + } } if (clk->exclusive_count) -- cgit v1.1 From f8f8f1d04494d3a6546bee3f0618c4dba31d7b72 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 2 Nov 2017 00:36:09 -0700 Subject: clk: Don't touch hardware when reparenting during registration The orphan clocks reparent operation shouldn't touch the hardware if clocks are enabled, otherwise it may get a chance to disable a newly registered critical clock which triggers the warning below. Assuming we have two clocks: A and B, B is the parent of A. Clock A has flag: CLK_OPS_PARENT_ENABLE Clock B has flag: CLK_IS_CRITICAL Step 1: Clock A is registered, then it becomes orphan. Step 2: Clock B is registered. Before clock B reach the critical clock enable operation, orphan A will find the newly registered parent B and do reparent operation, then parent B will be finally disabled in __clk_set_parent_after() due to CLK_OPS_PARENT_ENABLE flag as there's still no users of B which will then trigger the following warning. WARNING: CPU: 0 PID: 0 at drivers/clk/clk.c:597 clk_core_disable+0xb4/0xe0 Modules linked in: CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.11.0-rc1-00056-gdff1f66-dirty #1373 Hardware name: Generic DT based system Backtrace: [] (dump_backtrace) from [] (show_stack+0x18/0x1c) r6:600000d3 r5:00000000 r4:c0e26358 r3:00000000 [] (show_stack) from [] (dump_stack+0xb4/0xe8) [] (dump_stack) from [] (__warn+0xd8/0x104) r10:c0c21cd0 r9:c048aa78 r8:00000255 r7:00000009 r6:c0c1cd90 r5:00000000 r4:00000000 r3:c0e01d34 [] (__warn) from [] (warn_slowpath_null+0x28/0x30) r9:00000000 r8:ef00bf80 r7:c165ac4c r6:ef00bf80 r5:ef00bf80 r4:ef00bf80 [] (warn_slowpath_null) from [] (clk_core_disable+0xb4/0xe0) [] (clk_core_disable) from [] (clk_core_disable_lock+0x20/0x2c) r4:000000d3 r3:c0e0af00 [] (clk_core_disable_lock) from [] (clk_core_disable_unprepare+0x14/0x28) r5:00000000 r4:ef00bf80 [] (clk_core_disable_unprepare) from [] (__clk_set_parent_after+0x38/0x54) r4:ef00bd80 r3:000010a0 [] (__clk_set_parent_after) from [] (clk_register+0x4d0/0x648) r6:ef00d500 r5:ef00bf80 r4:ef00bd80 r3:ef00bfd4 [] (clk_register) from [] (clk_hw_register+0x10/0x1c) r9:00000000 r8:00000003 r7:00000000 r6:00000824 r5:00000001 r4:ef00d500 [] (clk_hw_register) from [] (_register_divider+0xcc/0x120) [] (_register_divider) from [] (clk_register_divider+0x44/0x54) r10:00000004 r9:00000003 r8:00000001 r7:00000000 r6:00000003 r5:00000001 r4:f0810030 [] (clk_register_divider) from [] (imx7ulp_clocks_init+0x558/0xe98) r7:c0e296f8 r6:c165c808 r5:00000000 r4:c165c808 [] (imx7ulp_clocks_init) from [] (of_clk_init+0x118/0x1e0) r10:00000001 r9:c0e01f68 r8:00000000 r7:c0e01f60 r6:ef7f8974 r5:ef0035c0 r4:00000006 [] (of_clk_init) from [] (time_init+0x2c/0x38) r10:efffed40 r9:c0d61a48 r8:c0e78000 r7:c0e07900 r6:ffffffff r5:c0e78000 r4:00000000 [] (time_init) from [] (start_kernel+0x218/0x394) [] (start_kernel) from [<6000807c>] (0x6000807c) r10:00000000 r9:410fc075 r8:6000406a r7:c0e0c930 r6:c0d61a44 r5:c0e07918 r4:c0e78294 We know that the clk isn't enabled with any sort of prepare_count here so we don't need to enable anything to prevent a race. And we're holding the prepare mutex so set_rate/set_parent can't race here either. Based on an earlier patch by Dong Aisheng. Fixes: fc8726a2c021 ("clk: core: support clocks which requires parents enable (part 2)") Cc: Michael Turquette Cc: Shawn Guo Reported-by: Dong Aisheng Signed-off-by: Stephen Boyd --- drivers/clk/clk.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 647d056..999777a 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2570,14 +2570,17 @@ static int __clk_core_init(struct clk_core *core) */ hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { struct clk_core *parent = __clk_init_parent(orphan); + unsigned long flags; /* * we could call __clk_set_parent, but that would result in a * redundant call to the .set_rate op, if it exists */ if (parent) { - __clk_set_parent_before(orphan, parent); - __clk_set_parent_after(orphan, parent, NULL); + /* update the clk tree topology */ + flags = clk_enable_lock(); + clk_reparent(orphan, parent); + clk_enable_unlock(flags); __clk_recalc_accuracies(orphan); __clk_recalc_rates(orphan, 0); } -- cgit v1.1 From f7ae75036762618ecc2cc7ae6fb4f6a74af92afb Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 3 Jan 2018 12:06:14 +0100 Subject: clk: Improve flags doc for of_clk_detect_critical() The "flags" parameter passed to of_clk_detect_critical() cannot be a pointer to a real clk_core.flags field, as clk_core is private to the clock framework internals. Change the comment to refer to top-level framework flags instead. Signed-off-by: Geert Uytterhoeven Signed-off-by: Stephen Boyd --- drivers/clk/clk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index efbc802..fe2d43e 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -3912,7 +3912,7 @@ static int parent_ready(struct device_node *np) * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree * @np: Device node pointer associated with clock provider * @index: clock index - * @flags: pointer to clk_core->flags + * @flags: pointer to top-level framework flags * * Detects if the clock-critical property exists and, if so, sets the * corresponding CLK_IS_CRITICAL flag. -- cgit v1.1 From a6059ab98130fb561157682d320c51c5ccd4b647 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 3 Jan 2018 12:06:16 +0100 Subject: clk: Show symbolic clock flags in debugfs Currently the virtual "clk_flags" file in debugfs shows the numeric value of the top-level framework flags for the specified clock. Hence the user must manually interpret these values. Moreover, on big-endian 64-bit systems, the wrong half of the value is shown, due to the cast from "unsigned long *" to "u32 *". Fix both issues by showing the symbolic flag names instead. Any non-standard flags are shown as a hex number. Signed-off-by: Geert Uytterhoeven Signed-off-by: Stephen Boyd --- drivers/clk/clk.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 55 insertions(+), 2 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index fe2d43e..479a3ee 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "clk.h" @@ -2554,6 +2555,58 @@ static const struct file_operations clk_dump_fops = { .release = single_release, }; +static const struct { + unsigned long flag; + const char *name; +} clk_flags[] = { +#define ENTRY(f) { f, __stringify(f) } + ENTRY(CLK_SET_RATE_GATE), + ENTRY(CLK_SET_PARENT_GATE), + ENTRY(CLK_SET_RATE_PARENT), + ENTRY(CLK_IGNORE_UNUSED), + ENTRY(CLK_IS_BASIC), + ENTRY(CLK_GET_RATE_NOCACHE), + ENTRY(CLK_SET_RATE_NO_REPARENT), + ENTRY(CLK_GET_ACCURACY_NOCACHE), + ENTRY(CLK_RECALC_NEW_RATES), + ENTRY(CLK_SET_RATE_UNGATE), + ENTRY(CLK_IS_CRITICAL), + ENTRY(CLK_OPS_PARENT_ENABLE), +#undef ENTRY +}; + +static int clk_flags_dump(struct seq_file *s, void *data) +{ + struct clk_core *core = s->private; + unsigned long flags = core->flags; + unsigned int i; + + for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) { + if (flags & clk_flags[i].flag) { + seq_printf(s, "%s\n", clk_flags[i].name); + flags &= ~clk_flags[i].flag; + } + } + if (flags) { + /* Unknown flags */ + seq_printf(s, "0x%lx\n", flags); + } + + return 0; +} + +static int clk_flags_open(struct inode *inode, struct file *file) +{ + return single_open(file, clk_flags_dump, inode->i_private); +} + +static const struct file_operations clk_flags_fops = { + .open = clk_flags_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static int possible_parents_dump(struct seq_file *s, void *data) { struct clk_core *core = s->private; @@ -2610,8 +2663,8 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) if (!d) goto err_out; - d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry, - (u32 *)&core->flags); + d = debugfs_create_file("clk_flags", 0444, core->dentry, core, + &clk_flags_fops); if (!d) goto err_out; -- cgit v1.1 From 4c8326d5ebb0de3191e98980c80ab644026728d0 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 3 Jan 2018 12:06:15 +0100 Subject: clk: Fix debugfs_create_*() usage When exposing data access through debugfs, the correct debugfs_create_*() functions must be used, matching the data types. Remove all casts from data pointers passed to debugfs_create_*() functions, as such casts prevent the compiler from flagging bugs. clk_core.rate and .accuracy are "unsigned long", hence casting their addresses to "u32 *" exposed the wrong halves on big-endian 64-bit systems. Fix this by using debugfs_create_ulong() instead. Octal permissions are preferred, as they are easier to read than symbolic permissions. Hence replace "S_IRUGO" by "0444" throughout. Signed-off-by: Geert Uytterhoeven [sboyd@codeaurora.org: Squash the octal change in too] Signed-off-by: Stephen Boyd --- drivers/clk/clk.c | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 479a3ee..c8ea2dd 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2648,18 +2648,16 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) core->dentry = d; - d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry, - (u32 *)&core->rate); + d = debugfs_create_ulong("clk_rate", 0444, core->dentry, &core->rate); if (!d) goto err_out; - d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry, - (u32 *)&core->accuracy); + d = debugfs_create_ulong("clk_accuracy", 0444, core->dentry, + &core->accuracy); if (!d) goto err_out; - d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry, - (u32 *)&core->phase); + d = debugfs_create_u32("clk_phase", 0444, core->dentry, &core->phase); if (!d) goto err_out; @@ -2668,28 +2666,28 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) if (!d) goto err_out; - d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry, - (u32 *)&core->prepare_count); + d = debugfs_create_u32("clk_prepare_count", 0444, core->dentry, + &core->prepare_count); if (!d) goto err_out; - d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry, - (u32 *)&core->enable_count); + d = debugfs_create_u32("clk_enable_count", 0444, core->dentry, + &core->enable_count); if (!d) goto err_out; - d = debugfs_create_u32("clk_protect_count", S_IRUGO, core->dentry, - (u32 *)&core->protect_count); + d = debugfs_create_u32("clk_protect_count", 0444, core->dentry, + &core->protect_count); if (!d) goto err_out; - d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry, - (u32 *)&core->notifier_count); + d = debugfs_create_u32("clk_notifier_count", 0444, core->dentry, + &core->notifier_count); if (!d) goto err_out; if (core->num_parents > 1) { - d = debugfs_create_file("clk_possible_parents", S_IRUGO, + d = debugfs_create_file("clk_possible_parents", 0444, core->dentry, core, &possible_parents_fops); if (!d) goto err_out; @@ -2785,22 +2783,22 @@ static int __init clk_debug_init(void) if (!rootdir) return -ENOMEM; - d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists, + d = debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, &clk_summary_fops); if (!d) return -ENOMEM; - d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists, + d = debugfs_create_file("clk_dump", 0444, rootdir, &all_lists, &clk_dump_fops); if (!d) return -ENOMEM; - d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir, + d = debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list, &clk_summary_fops); if (!d) return -ENOMEM; - d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir, + d = debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list, &clk_dump_fops); if (!d) return -ENOMEM; -- cgit v1.1 From db3188fadfe6af73d2485131a6975c032306e1ea Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 3 Jan 2018 16:44:37 -0800 Subject: clk: Simplify debugfs registration We don't need a goto here. Drop it. Signed-off-by: Stephen Boyd --- drivers/clk/clk.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index c8ea2dd..0a52357 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2723,12 +2723,8 @@ static int clk_debug_register(struct clk_core *core) mutex_lock(&clk_debug_lock); hlist_add_head(&core->debug_node, &clk_debug_list); - - if (!inited) - goto unlock; - - ret = clk_debug_create_one(core, rootdir); -unlock: + if (inited) + ret = clk_debug_create_one(core, rootdir); mutex_unlock(&clk_debug_lock); return ret; -- cgit v1.1 From a12aa8a68dfef5de181f2e555aa950a0ab05411f Mon Sep 17 00:00:00 2001 From: David Lechner Date: Thu, 4 Jan 2018 19:46:08 -0600 Subject: clk: fix reentrancy of clk_enable() on UP systems Reentrant calls to clk_enable() are not working on UP systems. This is caused by the fact spin_trylock_irqsave() always returns true when CONFIG_SMP=n (and CONFIG_DEBUG_SPINLOCK=n) which causes the reference counting to not work correctly when clk_enable_lock() is called twice before clk_enable_unlock() is called (this happens when clk_enable() is called from within another clk_enable()). This fixes the problem by skipping the call to spin_trylock_irqsave() on UP systems and relying solely on reference counting. We also make sure to set flags in this case so that we are not returning an uninitialized value. Suggested-by: Stephen Boyd Signed-off-by: David Lechner Signed-off-by: Stephen Boyd --- drivers/clk/clk.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 647d056..b221d80 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -141,10 +141,18 @@ static unsigned long clk_enable_lock(void) { unsigned long flags; - if (!spin_trylock_irqsave(&enable_lock, flags)) { + /* + * On UP systems, spin_trylock_irqsave() always returns true, even if + * we already hold the lock. So, in that case, we rely only on + * reference counting. + */ + if (!IS_ENABLED(CONFIG_SMP) || + !spin_trylock_irqsave(&enable_lock, flags)) { if (enable_owner == current) { enable_refcnt++; __acquire(enable_lock); + if (!IS_ENABLED(CONFIG_SMP)) + local_save_flags(flags); return flags; } spin_lock_irqsave(&enable_lock, flags); -- cgit v1.1