From 283189d3be56aa6db6f192bb255df68493cd79ac Mon Sep 17 00:00:00 2001 From: Li Fei Date: Thu, 28 Feb 2013 15:37:11 +0800 Subject: regmap: irq: call pm_runtime_put in pm_runtime_get_sync failed case Even in failed case of pm_runtime_get_sync, the usage_count is incremented. In order to keep the usage_count with correct value and runtime power management to behave correctly, call pm_runtime_put(_sync) in such case. Signed-off-by Liu Chuansheng Signed-off-by: Li Fei Signed-off-by: Mark Brown --- drivers/base/regmap/regmap-irq.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/base') diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 4706c63..020ea2b 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -184,6 +184,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) if (ret < 0) { dev_err(map->dev, "IRQ thread failed to resume: %d\n", ret); + pm_runtime_put(map->dev); return IRQ_NONE; } } -- cgit v1.1 From b81ea1b5ac4d3c6a628158b736dd4a98c46c29d9 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 3 Mar 2013 22:48:14 +0100 Subject: PM / QoS: Fix concurrency issues and memory leaks in device PM QoS The current device PM QoS code assumes that certain functions will never be called in parallel with each other (for example, it is assumed that dev_pm_qos_expose_flags() won't be called in parallel with dev_pm_qos_hide_flags() for the same device and analogously for the latency limit), which may be overly optimistic. Moreover, dev_pm_qos_expose_flags() and dev_pm_qos_expose_latency_limit() leak memory in error code paths (req needs to be freed on errors) and __dev_pm_qos_drop_user_request() forgets to free the request. To fix the above issues put more things under the device PM QoS mutex to make them mutually exclusive and add the missing freeing of memory. Signed-off-by: Rafael J. Wysocki --- drivers/base/power/qos.c | 129 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 87 insertions(+), 42 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 3d4d1f8..2159d62 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -344,6 +344,13 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 curr_value; int ret = 0; + if (!req) /*guard against callers passing in null */ + return -EINVAL; + + if (WARN(!dev_pm_qos_request_active(req), + "%s() called for unknown object\n", __func__)) + return -EINVAL; + if (!req->dev->power.qos) return -ENODEV; @@ -386,6 +393,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) { int ret; + mutex_lock(&dev_pm_qos_mtx); + ret = __dev_pm_qos_update_request(req, new_value); + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); + +static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req) +{ + int ret = 0; + if (!req) /*guard against callers passing in null */ return -EINVAL; @@ -393,13 +411,15 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) "%s() called for unknown object\n", __func__)) return -EINVAL; - mutex_lock(&dev_pm_qos_mtx); - ret = __dev_pm_qos_update_request(req, new_value); - mutex_unlock(&dev_pm_qos_mtx); - + if (req->dev->power.qos) { + ret = apply_constraint(req, PM_QOS_REMOVE_REQ, + PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } else { + ret = -ENODEV; + } return ret; } -EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); /** * dev_pm_qos_remove_request - modifies an existing qos request @@ -418,26 +438,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); */ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) { - int ret = 0; - - if (!req) /*guard against callers passing in null */ - return -EINVAL; - - if (WARN(!dev_pm_qos_request_active(req), - "%s() called for unknown object\n", __func__)) - return -EINVAL; + int ret; mutex_lock(&dev_pm_qos_mtx); - - if (req->dev->power.qos) { - ret = apply_constraint(req, PM_QOS_REMOVE_REQ, - PM_QOS_DEFAULT_VALUE); - memset(req, 0, sizeof(*req)); - } else { - /* Return if the device has been removed */ - ret = -ENODEV; - } - + ret = __dev_pm_qos_remove_request(req); mutex_unlock(&dev_pm_qos_mtx); return ret; } @@ -563,16 +567,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); static void __dev_pm_qos_drop_user_request(struct device *dev, enum dev_pm_qos_req_type type) { + struct dev_pm_qos_request *req = NULL; + switch(type) { case DEV_PM_QOS_LATENCY: - dev_pm_qos_remove_request(dev->power.qos->latency_req); + req = dev->power.qos->latency_req; dev->power.qos->latency_req = NULL; break; case DEV_PM_QOS_FLAGS: - dev_pm_qos_remove_request(dev->power.qos->flags_req); + req = dev->power.qos->flags_req; dev->power.qos->flags_req = NULL; break; } + __dev_pm_qos_remove_request(req); + kfree(req); } /** @@ -588,22 +596,36 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) if (!device_is_registered(dev) || value < 0) return -EINVAL; - if (dev->power.qos && dev->power.qos->latency_req) - return -EEXIST; - req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); - if (ret < 0) + if (ret < 0) { + kfree(req); return ret; + } + + mutex_lock(&dev_pm_qos_mtx); + + if (!dev->power.qos) + ret = -ENODEV; + else if (dev->power.qos->latency_req) + ret = -EEXIST; + + if (ret < 0) { + __dev_pm_qos_remove_request(req); + kfree(req); + goto out; + } dev->power.qos->latency_req = req; ret = pm_qos_sysfs_add_latency(dev); if (ret) __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); + out: + mutex_unlock(&dev_pm_qos_mtx); return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); @@ -614,10 +636,14 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); */ void dev_pm_qos_hide_latency_limit(struct device *dev) { + mutex_lock(&dev_pm_qos_mtx); + if (dev->power.qos && dev->power.qos->latency_req) { pm_qos_sysfs_remove_latency(dev); __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); } + + mutex_unlock(&dev_pm_qos_mtx); } EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); @@ -634,24 +660,37 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) if (!device_is_registered(dev)) return -EINVAL; - if (dev->power.qos && dev->power.qos->flags_req) - return -EEXIST; - req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; - pm_runtime_get_sync(dev); ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); - if (ret < 0) - goto fail; + if (ret < 0) { + kfree(req); + return ret; + } + + pm_runtime_get_sync(dev); + mutex_lock(&dev_pm_qos_mtx); + + if (!dev->power.qos) + ret = -ENODEV; + else if (dev->power.qos->flags_req) + ret = -EEXIST; + + if (ret < 0) { + __dev_pm_qos_remove_request(req); + kfree(req); + goto out; + } dev->power.qos->flags_req = req; ret = pm_qos_sysfs_add_flags(dev); if (ret) __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); -fail: + out: + mutex_unlock(&dev_pm_qos_mtx); pm_runtime_put(dev); return ret; } @@ -663,12 +702,16 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); */ void dev_pm_qos_hide_flags(struct device *dev) { + pm_runtime_get_sync(dev); + mutex_lock(&dev_pm_qos_mtx); + if (dev->power.qos && dev->power.qos->flags_req) { pm_qos_sysfs_remove_flags(dev); - pm_runtime_get_sync(dev); __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); - pm_runtime_put(dev); } + + mutex_unlock(&dev_pm_qos_mtx); + pm_runtime_put(dev); } EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); @@ -683,12 +726,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) s32 value; int ret; - if (!dev->power.qos || !dev->power.qos->flags_req) - return -EINVAL; - pm_runtime_get_sync(dev); mutex_lock(&dev_pm_qos_mtx); + if (!dev->power.qos || !dev->power.qos->flags_req) { + ret = -EINVAL; + goto out; + } + value = dev_pm_qos_requested_flags(dev); if (set) value |= mask; @@ -697,9 +742,9 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); + out: mutex_unlock(&dev_pm_qos_mtx); pm_runtime_put(dev); - return ret; } #endif /* CONFIG_PM_RUNTIME */ -- cgit v1.1 From 37530f2bda039774bd65aea14cc1d1dd26a82b9e Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 4 Mar 2013 14:22:57 +0100 Subject: PM / QoS: Remove device PM QoS sysfs attributes at the right place Device PM QoS sysfs attributes, if present during device removal, are removed from within device_pm_remove(), which is too late, since dpm_sysfs_remove() has already removed the whole attribute group they belonged to. However, moving the removal of those attributes to dpm_sysfs_remove() alone is not sufficient, because in theory they still can be re-added right after being removed by it (the device's driver is still bound to it at that point). For this reason, move the entire desctruction of device PM QoS constraints to dpm_sysfs_remove() and make it prevent any new constraints from being added after it has run. Also, move the initialization of the power.qos field in struct device to device_pm_init_common() and drop the no longer needed dev_pm_qos_constraints_init(). Reported-by: Sasha Levin Signed-off-by: Rafael J. Wysocki --- drivers/base/power/main.c | 2 - drivers/base/power/power.h | 8 +-- drivers/base/power/qos.c | 120 ++++++++++++++++++++------------------------- drivers/base/power/sysfs.c | 1 + 4 files changed, 55 insertions(+), 76 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 2b7f77d..15beb50 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -99,7 +99,6 @@ void device_pm_add(struct device *dev) dev_warn(dev, "parent %s should not be sleeping\n", dev_name(dev->parent)); list_add_tail(&dev->power.entry, &dpm_list); - dev_pm_qos_constraints_init(dev); mutex_unlock(&dpm_list_mtx); } @@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev) dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); complete_all(&dev->power.completion); mutex_lock(&dpm_list_mtx); - dev_pm_qos_constraints_destroy(dev); list_del_init(&dev->power.entry); mutex_unlock(&dpm_list_mtx); device_wakeup_disable(dev); diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index b16686a..cfc3226 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev) { if (!dev->power.early_init) { spin_lock_init(&dev->power.lock); - dev->power.power_state = PMSG_INVALID; + dev->power.qos = NULL; dev->power.early_init = true; } } @@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *); static inline void device_pm_sleep_init(struct device *dev) {} -static inline void device_pm_add(struct device *dev) -{ - dev_pm_qos_constraints_init(dev); -} +static inline void device_pm_add(struct device *dev) {} static inline void device_pm_remove(struct device *dev) { - dev_pm_qos_constraints_destroy(dev); pm_runtime_remove(dev); } diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 2159d62..5f74587e 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -41,6 +41,7 @@ #include #include #include +#include #include "power.h" @@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) struct pm_qos_flags *pqf; s32 val; - if (!qos) + if (IS_ERR_OR_NULL(qos)) return PM_QOS_FLAGS_UNDEFINED; pqf = &qos->flags; @@ -101,7 +102,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags); */ s32 __dev_pm_qos_read_value(struct device *dev) { - return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0; + return IS_ERR_OR_NULL(dev->power.qos) ? + 0 : pm_qos_read_value(&dev->power.qos->latency); } /** @@ -198,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) return 0; } -/** - * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. - * @dev: target device - * - * Called from the device PM subsystem during device insertion under - * device_pm_lock(). - */ -void dev_pm_qos_constraints_init(struct device *dev) -{ - mutex_lock(&dev_pm_qos_mtx); - dev->power.qos = NULL; - dev->power.power_state = PMSG_ON; - mutex_unlock(&dev_pm_qos_mtx); -} +static void __dev_pm_qos_hide_latency_limit(struct device *dev); +static void __dev_pm_qos_hide_flags(struct device *dev); /** * dev_pm_qos_constraints_destroy @@ -226,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev) struct pm_qos_constraints *c; struct pm_qos_flags *f; + mutex_lock(&dev_pm_qos_mtx); + /* * If the device's PM QoS resume latency limit or PM QoS flags have been * exposed to user space, they have to be hidden at this point. */ - dev_pm_qos_hide_latency_limit(dev); - dev_pm_qos_hide_flags(dev); - - mutex_lock(&dev_pm_qos_mtx); + __dev_pm_qos_hide_latency_limit(dev); + __dev_pm_qos_hide_flags(dev); - dev->power.power_state = PMSG_INVALID; qos = dev->power.qos; if (!qos) goto out; @@ -257,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) } spin_lock_irq(&dev->power.lock); - dev->power.qos = NULL; + dev->power.qos = ERR_PTR(-ENODEV); spin_unlock_irq(&dev->power.lock); kfree(c->notifiers); @@ -301,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, "%s() called for already added request\n", __func__)) return -EINVAL; - req->dev = dev; - mutex_lock(&dev_pm_qos_mtx); - if (!dev->power.qos) { - if (dev->power.power_state.event == PM_EVENT_INVALID) { - /* The device has been removed from the system. */ - req->dev = NULL; - ret = -ENODEV; - goto out; - } else { - /* - * Allocate the constraints data on the first call to - * add_request, i.e. only if the data is not already - * allocated and if the device has not been removed. - */ - ret = dev_pm_qos_constraints_allocate(dev); - } - } + if (IS_ERR(dev->power.qos)) + ret = -ENODEV; + else if (!dev->power.qos) + ret = dev_pm_qos_constraints_allocate(dev); if (!ret) { + req->dev = dev; req->type = type; ret = apply_constraint(req, PM_QOS_ADD_REQ, value); } - out: mutex_unlock(&dev_pm_qos_mtx); return ret; @@ -351,7 +327,7 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, "%s() called for unknown object\n", __func__)) return -EINVAL; - if (!req->dev->power.qos) + if (IS_ERR_OR_NULL(req->dev->power.qos)) return -ENODEV; switch(req->type) { @@ -402,7 +378,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req) { - int ret = 0; + int ret; if (!req) /*guard against callers passing in null */ return -EINVAL; @@ -411,13 +387,11 @@ static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req) "%s() called for unknown object\n", __func__)) return -EINVAL; - if (req->dev->power.qos) { - ret = apply_constraint(req, PM_QOS_REMOVE_REQ, - PM_QOS_DEFAULT_VALUE); - memset(req, 0, sizeof(*req)); - } else { - ret = -ENODEV; - } + if (IS_ERR_OR_NULL(req->dev->power.qos)) + return -ENODEV; + + ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); return ret; } @@ -466,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) mutex_lock(&dev_pm_qos_mtx); - if (!dev->power.qos) - ret = dev->power.power_state.event != PM_EVENT_INVALID ? - dev_pm_qos_constraints_allocate(dev) : -ENODEV; + if (IS_ERR(dev->power.qos)) + ret = -ENODEV; + else if (!dev->power.qos) + ret = dev_pm_qos_constraints_allocate(dev); if (!ret) ret = blocking_notifier_chain_register( @@ -497,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev, mutex_lock(&dev_pm_qos_mtx); /* Silently return if the constraints object is not present. */ - if (dev->power.qos) + if (!IS_ERR_OR_NULL(dev->power.qos)) retval = blocking_notifier_chain_unregister( dev->power.qos->latency.notifiers, notifier); @@ -608,7 +583,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) mutex_lock(&dev_pm_qos_mtx); - if (!dev->power.qos) + if (IS_ERR_OR_NULL(dev->power.qos)) ret = -ENODEV; else if (dev->power.qos->latency_req) ret = -EEXIST; @@ -630,6 +605,14 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) } EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); +static void __dev_pm_qos_hide_latency_limit(struct device *dev) +{ + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) { + pm_qos_sysfs_remove_latency(dev); + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); + } +} + /** * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. * @dev: Device whose PM QoS latency limit is to be hidden from user space. @@ -637,12 +620,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); void dev_pm_qos_hide_latency_limit(struct device *dev) { mutex_lock(&dev_pm_qos_mtx); - - if (dev->power.qos && dev->power.qos->latency_req) { - pm_qos_sysfs_remove_latency(dev); - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); - } - + __dev_pm_qos_hide_latency_limit(dev); mutex_unlock(&dev_pm_qos_mtx); } EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); @@ -673,7 +651,7 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) pm_runtime_get_sync(dev); mutex_lock(&dev_pm_qos_mtx); - if (!dev->power.qos) + if (IS_ERR_OR_NULL(dev->power.qos)) ret = -ENODEV; else if (dev->power.qos->flags_req) ret = -EEXIST; @@ -696,6 +674,14 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) } EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); +static void __dev_pm_qos_hide_flags(struct device *dev) +{ + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) { + pm_qos_sysfs_remove_flags(dev); + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); + } +} + /** * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. * @dev: Device whose PM QoS flags are to be hidden from user space. @@ -704,12 +690,7 @@ void dev_pm_qos_hide_flags(struct device *dev) { pm_runtime_get_sync(dev); mutex_lock(&dev_pm_qos_mtx); - - if (dev->power.qos && dev->power.qos->flags_req) { - pm_qos_sysfs_remove_flags(dev); - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); - } - + __dev_pm_qos_hide_flags(dev); mutex_unlock(&dev_pm_qos_mtx); pm_runtime_put(dev); } @@ -729,7 +710,7 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) pm_runtime_get_sync(dev); mutex_lock(&dev_pm_qos_mtx); - if (!dev->power.qos || !dev->power.qos->flags_req) { + if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) { ret = -EINVAL; goto out; } @@ -747,4 +728,7 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) pm_runtime_put(dev); return ret; } +#else /* !CONFIG_PM_RUNTIME */ +static void __dev_pm_qos_hide_latency_limit(struct device *dev) {} +static void __dev_pm_qos_hide_flags(struct device *dev) {} #endif /* CONFIG_PM_RUNTIME */ diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 50d16e3..a53ebd2 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev) void dpm_sysfs_remove(struct device *dev) { + dev_pm_qos_constraints_destroy(dev); rpm_sysfs_remove(dev); sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); sysfs_remove_group(&dev->kobj, &pm_attr_group); -- cgit v1.1 From c6432ea9cc043994d5b7dcb3ad86a087777cb40c Mon Sep 17 00:00:00 2001 From: Dimitris Papastamos Date: Mon, 11 Mar 2013 17:27:02 +0000 Subject: regmap: Initialize `map->debugfs' before regcache In the rbtree code we are exposing statistics relating to the number of nodes/registers of the rbtree cache for each of the devices. Ensure that `map->debugfs' has been initialized before we attempt to initialize the debugfs entry for the rbtree cache. Signed-off-by: Dimitris Papastamos Signed-off-by: Mark Brown Cc: stable@vger.kernel.org --- drivers/base/regmap/regmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 3d23675..50ef277 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -710,12 +710,12 @@ skip_format_initialization: } } + regmap_debugfs_init(map, config->name); + ret = regcache_init(map, config); if (ret != 0) goto err_range; - regmap_debugfs_init(map, config->name); - /* Add a devres resource for dev_get_regmap() */ m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); if (!m) { -- cgit v1.1 From 8abac3ba51b5525354e9b2ec0eed1c9e95c905d9 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Wed, 13 Mar 2013 16:38:33 +0100 Subject: regmap: cache Fix regcache-rbtree sync The last register block, which falls into the specified range, is not handled correctly. The formula which calculates the number of register which should be synced is inverse (and off by one). E.g. if all registers in that block should be synced only one is synced, and if only one should be synced all (but one) are synced. To calculate the number of registers that need to be synced we need to subtract the number of the first register in the block from the max register number and add one. This patch updates the code accordingly. The issue was introduced in commit ac8d91c ("regmap: Supply ranges to the sync operations"). Signed-off-by: Lars-Peter Clausen Signed-off-by: Mark Brown Cc: stable@vger.kernel.org --- drivers/base/regmap/regcache-rbtree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index e6732cf..79f4fca 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -398,7 +398,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min, base = 0; if (max < rbnode->base_reg + rbnode->blklen) - end = rbnode->base_reg + rbnode->blklen - max; + end = max - rbnode->base_reg + 1; else end = rbnode->blklen; -- cgit v1.1 From bc8ce4afd7ee7e1421c935d24b1f879f82afdd4e Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Wed, 20 Mar 2013 17:02:02 -0600 Subject: regmap: don't corrupt work buffer in _regmap_raw_write() _regmap_raw_write() contains code to call regcache_write() to write values to the cache. That code calls memcpy() to copy the value data to the start of the work_buf. However, at least when _regmap_raw_write() is called from _regmap_bus_raw_write(), the value data is in the work_buf, and this memcpy() operation may over-write part of that value data, depending on the value of reg_bytes + pad_bytes. At least when using reg_bytes==1 and pad_bytes==0, corruption of the value data does occur. To solve this, remove the memcpy() operation, and modify the subsequent .parse_val() call to parse the original value buffer directly. At least in the case of 8-bit register address and 16-bit values, and writes of single registers at a time, this memcpy-then-parse combination used to cancel each-other out; for a work-buffer containing xx 89 03, the memcpy changed it to 89 03 03, and the parse_val changed it back to 89 89 03, thus leaving the value uncorrupted. This appears completely accidental though. Since commit 8a819ff "regmap: core: Split out in place value parsing", .parse_val only returns the parsed value, and does not modify the buffer, and hence does not (accidentally) undo the corruption caused by memcpy(). This caused bogus values to get written to HW, thus preventing e.g. audio playback on systems with a WM8903 CODEC. This patch fixes that. Signed-off-by: Stephen Warren Signed-off-by: Mark Brown --- drivers/base/regmap/regmap.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 3d23675..89a9205 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -943,8 +943,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, unsigned int ival; int val_bytes = map->format.val_bytes; for (i = 0; i < val_len / val_bytes; i++) { - memcpy(map->work_buf, val + (i * val_bytes), val_bytes); - ival = map->format.parse_val(map->work_buf); + ival = map->format.parse_val(val + (i * val_bytes)); ret = regcache_write(map, reg + (i * map->reg_stride), ival); if (ret) { -- cgit v1.1 From f951b6587b94df2abb8c7a2425f7dcdb4fe647dc Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 27 Mar 2013 13:08:44 +0000 Subject: regmap: async: Add missing return Let's only write once... Signed-off-by: Mark Brown --- drivers/base/regmap/regmap.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/base') diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 3d23675..dd82acf 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1036,6 +1036,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, kfree(async->work_buf); kfree(async); } + + return ret; } trace_regmap_hw_write_start(map->dev, reg, -- cgit v1.1 From 0f703069296664eb7c649c837cc8bb936c3ef07f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 2 Apr 2013 01:25:24 +0200 Subject: PM / QoS: Avoid possible deadlock related to sysfs access Commit b81ea1b (PM / QoS: Fix concurrency issues and memory leaks in device PM QoS) put calls to pm_qos_sysfs_add_latency(), pm_qos_sysfs_add_flags(), pm_qos_sysfs_remove_latency(), and pm_qos_sysfs_remove_flags() under dev_pm_qos_mtx, which was a mistake, because it may lead to deadlocks in some situations. For example, if pm_qos_remote_wakeup_store() is run in parallel with dev_pm_qos_constraints_destroy(), they may deadlock in the following way: ====================================================== [ INFO: possible circular locking dependency detected ] 3.9.0-rc4-next-20130328-sasha-00014-g91a3267 #319 Tainted: G W ------------------------------------------------------- trinity-child6/12371 is trying to acquire lock: (s_active#54){++++.+}, at: [] sysfs_addrm_finish+0x31/0x60 but task is already holding lock: (dev_pm_qos_mtx){+.+.+.}, at: [] dev_pm_qos_constraints_destroy+0x23/0x250 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (dev_pm_qos_mtx){+.+.+.}: [] lock_acquire+0x1aa/0x240 [] __mutex_lock_common+0x59/0x5e0 [] mutex_lock_nested+0x3f/0x50 [] dev_pm_qos_update_flags+0x3f/0xc0 [] pm_qos_remote_wakeup_store+0x3f/0x70 [] dev_attr_store+0x13/0x20 [] sysfs_write_file+0xfa/0x150 [] __kernel_write+0x81/0x150 [] write_pipe_buf+0x4d/0x80 [] splice_from_pipe_feed+0x7c/0x120 [] __splice_from_pipe+0x45/0x80 [] splice_from_pipe+0x4c/0x70 [] default_file_splice_write+0x18/0x30 [] do_splice_from+0x83/0xb0 [] direct_splice_actor+0x1e/0x20 [] splice_direct_to_actor+0xe7/0x200 [] do_splice_direct+0x4c/0x70 [] do_sendfile+0x169/0x300 [] SyS_sendfile64+0x64/0xb0 [] tracesys+0xe1/0xe6 -> #0 (s_active#54){++++.+}: [] __lock_acquire+0x15bf/0x1e50 [] lock_acquire+0x1aa/0x240 [] sysfs_deactivate+0x122/0x1a0 [] sysfs_addrm_finish+0x31/0x60 [] sysfs_hash_and_remove+0x7f/0xb0 [] sysfs_unmerge_group+0x51/0x70 [] pm_qos_sysfs_remove_flags+0x14/0x20 [] __dev_pm_qos_hide_flags+0x30/0x70 [] dev_pm_qos_constraints_destroy+0x35/0x250 [] dpm_sysfs_remove+0x11/0x50 [] device_del+0x3f/0x1b0 [] device_unregister+0x48/0x60 [] usb_hub_remove_port_device+0x1c/0x20 [] hub_disconnect+0xdd/0x160 [] usb_unbind_interface+0x67/0x170 [] __device_release_driver+0x87/0xe0 [] device_release_driver+0x29/0x40 [] bus_remove_device+0x148/0x160 [] device_del+0x14f/0x1b0 [] usb_disable_device+0xf9/0x280 [] usb_set_configuration+0x268/0x840 [] usb_remove_store+0x4c/0x80 [] dev_attr_store+0x13/0x20 [] sysfs_write_file+0xfa/0x150 [] do_loop_readv_writev+0x4d/0x90 [] do_readv_writev+0xf9/0x1e0 [] vfs_writev+0x3a/0x60 [] SyS_writev+0x50/0xd0 [] tracesys+0xe1/0xe6 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(dev_pm_qos_mtx); lock(s_active#54); lock(dev_pm_qos_mtx); lock(s_active#54); *** DEADLOCK *** To avoid that, remove the calls to functions mentioned above from under dev_pm_qos_mtx and introduce a separate lock to prevent races between functions that add or remove device PM QoS sysfs attributes from happening. Reported-by: Sasha Levin Signed-off-by: Rafael J. Wysocki --- drivers/base/power/qos.c | 60 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 47 insertions(+), 13 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 5f74587e..71671c4 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -46,6 +46,7 @@ #include "power.h" static DEFINE_MUTEX(dev_pm_qos_mtx); +static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); @@ -216,12 +217,17 @@ void dev_pm_qos_constraints_destroy(struct device *dev) struct pm_qos_constraints *c; struct pm_qos_flags *f; - mutex_lock(&dev_pm_qos_mtx); + mutex_lock(&dev_pm_qos_sysfs_mtx); /* * If the device's PM QoS resume latency limit or PM QoS flags have been * exposed to user space, they have to be hidden at this point. */ + pm_qos_sysfs_remove_latency(dev); + pm_qos_sysfs_remove_flags(dev); + + mutex_lock(&dev_pm_qos_mtx); + __dev_pm_qos_hide_latency_limit(dev); __dev_pm_qos_hide_flags(dev); @@ -254,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev) out: mutex_unlock(&dev_pm_qos_mtx); + + mutex_unlock(&dev_pm_qos_sysfs_mtx); } /** @@ -558,6 +566,14 @@ static void __dev_pm_qos_drop_user_request(struct device *dev, kfree(req); } +static void dev_pm_qos_drop_user_request(struct device *dev, + enum dev_pm_qos_req_type type) +{ + mutex_lock(&dev_pm_qos_mtx); + __dev_pm_qos_drop_user_request(dev, type); + mutex_unlock(&dev_pm_qos_mtx); +} + /** * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. * @dev: Device whose PM QoS latency limit is to be exposed to user space. @@ -581,6 +597,8 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) return ret; } + mutex_lock(&dev_pm_qos_sysfs_mtx); + mutex_lock(&dev_pm_qos_mtx); if (IS_ERR_OR_NULL(dev->power.qos)) @@ -591,26 +609,27 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) if (ret < 0) { __dev_pm_qos_remove_request(req); kfree(req); + mutex_unlock(&dev_pm_qos_mtx); goto out; } - dev->power.qos->latency_req = req; + + mutex_unlock(&dev_pm_qos_mtx); + ret = pm_qos_sysfs_add_latency(dev); if (ret) - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); out: - mutex_unlock(&dev_pm_qos_mtx); + mutex_unlock(&dev_pm_qos_sysfs_mtx); return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); static void __dev_pm_qos_hide_latency_limit(struct device *dev) { - if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) { - pm_qos_sysfs_remove_latency(dev); + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); - } } /** @@ -619,9 +638,15 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev) */ void dev_pm_qos_hide_latency_limit(struct device *dev) { + mutex_lock(&dev_pm_qos_sysfs_mtx); + + pm_qos_sysfs_remove_latency(dev); + mutex_lock(&dev_pm_qos_mtx); __dev_pm_qos_hide_latency_limit(dev); mutex_unlock(&dev_pm_qos_mtx); + + mutex_unlock(&dev_pm_qos_sysfs_mtx); } EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); @@ -649,6 +674,8 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) } pm_runtime_get_sync(dev); + mutex_lock(&dev_pm_qos_sysfs_mtx); + mutex_lock(&dev_pm_qos_mtx); if (IS_ERR_OR_NULL(dev->power.qos)) @@ -659,16 +686,19 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) if (ret < 0) { __dev_pm_qos_remove_request(req); kfree(req); + mutex_unlock(&dev_pm_qos_mtx); goto out; } - dev->power.qos->flags_req = req; + + mutex_unlock(&dev_pm_qos_mtx); + ret = pm_qos_sysfs_add_flags(dev); if (ret) - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); out: - mutex_unlock(&dev_pm_qos_mtx); + mutex_unlock(&dev_pm_qos_sysfs_mtx); pm_runtime_put(dev); return ret; } @@ -676,10 +706,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); static void __dev_pm_qos_hide_flags(struct device *dev) { - if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) { - pm_qos_sysfs_remove_flags(dev); + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); - } } /** @@ -689,9 +717,15 @@ static void __dev_pm_qos_hide_flags(struct device *dev) void dev_pm_qos_hide_flags(struct device *dev) { pm_runtime_get_sync(dev); + mutex_lock(&dev_pm_qos_sysfs_mtx); + + pm_qos_sysfs_remove_flags(dev); + mutex_lock(&dev_pm_qos_mtx); __dev_pm_qos_hide_flags(dev); mutex_unlock(&dev_pm_qos_mtx); + + mutex_unlock(&dev_pm_qos_sysfs_mtx); pm_runtime_put(dev); } EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); -- cgit v1.1 From 51a246aa5c0a14b3d34a5c6d3c9e271c784b127f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 9 Apr 2013 18:03:25 +0100 Subject: regmap: Back out work buffer fix This reverts commit bc8ce4 (regmap: don't corrupt work buffer in _regmap_raw_write()) since it turns out that it can cause issues when taken in isolation from the other changes in -next that lead to its discovery. On the basis that nobody noticed the problems for quite some time without that subsequent work let's drop it from v3.9. Signed-off-by: Mark Brown --- drivers/base/regmap/regmap.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index d34adef..58cfb32 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -943,7 +943,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, unsigned int ival; int val_bytes = map->format.val_bytes; for (i = 0; i < val_len / val_bytes; i++) { - ival = map->format.parse_val(val + (i * val_bytes)); + memcpy(map->work_buf, val + (i * val_bytes), val_bytes); + ival = map->format.parse_val(map->work_buf); ret = regcache_write(map, reg + (i * map->reg_stride), ival); if (ret) { -- cgit v1.1