diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/power/Makefile | 3 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 2 | ||||
-rw-r--r-- | drivers/base/power/main.c | 280 | ||||
-rw-r--r-- | drivers/base/power/power.h | 4 | ||||
-rw-r--r-- | drivers/base/power/qos.c | 220 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 164 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 97 | ||||
-rw-r--r-- | drivers/base/regmap/internal.h | 2 | ||||
-rw-r--r-- | drivers/base/regmap/regcache.c | 13 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-debugfs.c | 2 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-irq.c | 6 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-mmio.c | 56 | ||||
-rw-r--r-- | drivers/base/regmap/regmap.c | 353 |
13 files changed, 953 insertions, 249 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 2e58ebb..1cb8544 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -1,6 +1,5 @@ -obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o +obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o -obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o obj-$(CONFIG_PM_OPP) += opp.o obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index bfb8955..dc127e5 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -42,7 +42,7 @@ struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ if (!__retval && __elapsed > __td->field) { \ __td->field = __elapsed; \ - dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ + dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \ __elapsed); \ genpd->max_off_time_changed = true; \ __td->constraint_changed = true; \ diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 1b41fca..86d5e4f 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -29,6 +29,7 @@ #include <linux/async.h> #include <linux/suspend.h> #include <trace/events/power.h> +#include <linux/cpufreq.h> #include <linux/cpuidle.h> #include <linux/timer.h> @@ -91,6 +92,8 @@ void device_pm_sleep_init(struct device *dev) { dev->power.is_prepared = false; dev->power.is_suspended = false; + dev->power.is_noirq_suspended = false; + dev->power.is_late_suspended = false; init_completion(&dev->power.completion); complete_all(&dev->power.completion); dev->power.wakeup = NULL; @@ -467,7 +470,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd) * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_resume_noirq(struct device *dev, pm_message_t state) +static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; @@ -479,6 +482,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) if (dev->power.syscore) goto Out; + if (!dev->power.is_noirq_suspended) + goto Out; + + dpm_wait(dev->parent, async); + if (dev->pm_domain) { info = "noirq power domain "; callback = pm_noirq_op(&dev->pm_domain->ops, state); @@ -499,12 +507,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) } error = dpm_run_callback(callback, dev, state, info); + dev->power.is_noirq_suspended = false; Out: + complete_all(&dev->power.completion); TRACE_RESUME(error); return error; } +static bool is_async(struct device *dev) +{ + return dev->power.async_suspend && pm_async_enabled + && !pm_trace_is_enabled(); +} + +static void async_resume_noirq(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = device_resume_noirq(dev, pm_transition, true); + if (error) + pm_dev_err(dev, pm_transition, " async", error); + + put_device(dev); +} + /** * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. * @state: PM transition of the system being carried out. @@ -514,29 +542,48 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) */ static void dpm_resume_noirq(pm_message_t state) { + struct device *dev; ktime_t starttime = ktime_get(); mutex_lock(&dpm_list_mtx); - while (!list_empty(&dpm_noirq_list)) { - struct device *dev = to_device(dpm_noirq_list.next); - int error; + pm_transition = state; + /* + * Advanced the async threads upfront, + * in case the starting of async threads is + * delayed by non-async resuming devices. + */ + list_for_each_entry(dev, &dpm_noirq_list, power.entry) { + reinit_completion(&dev->power.completion); + if (is_async(dev)) { + get_device(dev); + async_schedule(async_resume_noirq, dev); + } + } + + while (!list_empty(&dpm_noirq_list)) { + dev = to_device(dpm_noirq_list.next); get_device(dev); list_move_tail(&dev->power.entry, &dpm_late_early_list); mutex_unlock(&dpm_list_mtx); - error = device_resume_noirq(dev, state); - if (error) { - suspend_stats.failed_resume_noirq++; - dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, " noirq", error); + if (!is_async(dev)) { + int error; + + error = device_resume_noirq(dev, state, false); + if (error) { + suspend_stats.failed_resume_noirq++; + dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, " noirq", error); + } } mutex_lock(&dpm_list_mtx); put_device(dev); } mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); dpm_show_time(starttime, state, "noirq"); resume_device_irqs(); cpuidle_resume(); @@ -549,7 +596,7 @@ static void dpm_resume_noirq(pm_message_t state) * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_resume_early(struct device *dev, pm_message_t state) +static int device_resume_early(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; @@ -561,6 +608,11 @@ static int device_resume_early(struct device *dev, pm_message_t state) if (dev->power.syscore) goto Out; + if (!dev->power.is_late_suspended) + goto Out; + + dpm_wait(dev->parent, async); + if (dev->pm_domain) { info = "early power domain "; callback = pm_late_early_op(&dev->pm_domain->ops, state); @@ -581,43 +633,75 @@ static int device_resume_early(struct device *dev, pm_message_t state) } error = dpm_run_callback(callback, dev, state, info); + dev->power.is_late_suspended = false; Out: TRACE_RESUME(error); pm_runtime_enable(dev); + complete_all(&dev->power.completion); return error; } +static void async_resume_early(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = device_resume_early(dev, pm_transition, true); + if (error) + pm_dev_err(dev, pm_transition, " async", error); + + put_device(dev); +} + /** * dpm_resume_early - Execute "early resume" callbacks for all devices. * @state: PM transition of the system being carried out. */ static void dpm_resume_early(pm_message_t state) { + struct device *dev; ktime_t starttime = ktime_get(); mutex_lock(&dpm_list_mtx); - while (!list_empty(&dpm_late_early_list)) { - struct device *dev = to_device(dpm_late_early_list.next); - int error; + pm_transition = state; + /* + * Advanced the async threads upfront, + * in case the starting of async threads is + * delayed by non-async resuming devices. + */ + list_for_each_entry(dev, &dpm_late_early_list, power.entry) { + reinit_completion(&dev->power.completion); + if (is_async(dev)) { + get_device(dev); + async_schedule(async_resume_early, dev); + } + } + + while (!list_empty(&dpm_late_early_list)) { + dev = to_device(dpm_late_early_list.next); get_device(dev); list_move_tail(&dev->power.entry, &dpm_suspended_list); mutex_unlock(&dpm_list_mtx); - error = device_resume_early(dev, state); - if (error) { - suspend_stats.failed_resume_early++; - dpm_save_failed_step(SUSPEND_RESUME_EARLY); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, " early", error); - } + if (!is_async(dev)) { + int error; + error = device_resume_early(dev, state, false); + if (error) { + suspend_stats.failed_resume_early++; + dpm_save_failed_step(SUSPEND_RESUME_EARLY); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, " early", error); + } + } mutex_lock(&dpm_list_mtx); put_device(dev); } mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); dpm_show_time(starttime, state, "early"); } @@ -732,12 +816,6 @@ static void async_resume(void *data, async_cookie_t cookie) put_device(dev); } -static bool is_async(struct device *dev) -{ - return dev->power.async_suspend && pm_async_enabled - && !pm_trace_is_enabled(); -} - /** * dpm_resume - Execute "resume" callbacks for non-sysdev devices. * @state: PM transition of the system being carried out. @@ -789,6 +867,8 @@ void dpm_resume(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, NULL); + + cpufreq_resume(); } /** @@ -913,13 +993,24 @@ static pm_message_t resume_event(pm_message_t sleep_state) * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_suspend_noirq(struct device *dev, pm_message_t state) +static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; + int error = 0; + + if (async_error) + goto Complete; + + if (pm_wakeup_pending()) { + async_error = -EBUSY; + goto Complete; + } if (dev->power.syscore) - return 0; + goto Complete; + + dpm_wait_for_children(dev, async); if (dev->pm_domain) { info = "noirq power domain "; @@ -940,7 +1031,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) callback = pm_noirq_op(dev->driver->pm, state); } - return dpm_run_callback(callback, dev, state, info); + error = dpm_run_callback(callback, dev, state, info); + if (!error) + dev->power.is_noirq_suspended = true; + else + async_error = error; + +Complete: + complete_all(&dev->power.completion); + return error; +} + +static void async_suspend_noirq(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = __device_suspend_noirq(dev, pm_transition, true); + if (error) { + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, pm_transition, " async", error); + } + + put_device(dev); +} + +static int device_suspend_noirq(struct device *dev) +{ + reinit_completion(&dev->power.completion); + + if (pm_async_enabled && dev->power.async_suspend) { + get_device(dev); + async_schedule(async_suspend_noirq, dev); + return 0; + } + return __device_suspend_noirq(dev, pm_transition, false); } /** @@ -958,19 +1083,20 @@ static int dpm_suspend_noirq(pm_message_t state) cpuidle_pause(); suspend_device_irqs(); mutex_lock(&dpm_list_mtx); + pm_transition = state; + async_error = 0; + while (!list_empty(&dpm_late_early_list)) { struct device *dev = to_device(dpm_late_early_list.prev); get_device(dev); mutex_unlock(&dpm_list_mtx); - error = device_suspend_noirq(dev, state); + error = device_suspend_noirq(dev); mutex_lock(&dpm_list_mtx); if (error) { pm_dev_err(dev, state, " noirq", error); - suspend_stats.failed_suspend_noirq++; - dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); dpm_save_failed_dev(dev_name(dev)); put_device(dev); break; @@ -979,16 +1105,21 @@ static int dpm_suspend_noirq(pm_message_t state) list_move(&dev->power.entry, &dpm_noirq_list); put_device(dev); - if (pm_wakeup_pending()) { - error = -EBUSY; + if (async_error) break; - } } mutex_unlock(&dpm_list_mtx); - if (error) + async_synchronize_full(); + if (!error) + error = async_error; + + if (error) { + suspend_stats.failed_suspend_noirq++; + dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); dpm_resume_noirq(resume_event(state)); - else + } else { dpm_show_time(starttime, state, "noirq"); + } return error; } @@ -999,15 +1130,26 @@ static int dpm_suspend_noirq(pm_message_t state) * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_suspend_late(struct device *dev, pm_message_t state) +static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; + int error = 0; __pm_runtime_disable(dev, false); + if (async_error) + goto Complete; + + if (pm_wakeup_pending()) { + async_error = -EBUSY; + goto Complete; + } + if (dev->power.syscore) - return 0; + goto Complete; + + dpm_wait_for_children(dev, async); if (dev->pm_domain) { info = "late power domain "; @@ -1028,7 +1170,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state) callback = pm_late_early_op(dev->driver->pm, state); } - return dpm_run_callback(callback, dev, state, info); + error = dpm_run_callback(callback, dev, state, info); + if (!error) + dev->power.is_late_suspended = true; + else + async_error = error; + +Complete: + complete_all(&dev->power.completion); + return error; +} + +static void async_suspend_late(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = __device_suspend_late(dev, pm_transition, true); + if (error) { + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, pm_transition, " async", error); + } + put_device(dev); +} + +static int device_suspend_late(struct device *dev) +{ + reinit_completion(&dev->power.completion); + + if (pm_async_enabled && dev->power.async_suspend) { + get_device(dev); + async_schedule(async_suspend_late, dev); + return 0; + } + + return __device_suspend_late(dev, pm_transition, false); } /** @@ -1041,19 +1217,20 @@ static int dpm_suspend_late(pm_message_t state) int error = 0; mutex_lock(&dpm_list_mtx); + pm_transition = state; + async_error = 0; + while (!list_empty(&dpm_suspended_list)) { struct device *dev = to_device(dpm_suspended_list.prev); get_device(dev); mutex_unlock(&dpm_list_mtx); - error = device_suspend_late(dev, state); + error = device_suspend_late(dev); mutex_lock(&dpm_list_mtx); if (error) { pm_dev_err(dev, state, " late", error); - suspend_stats.failed_suspend_late++; - dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_save_failed_dev(dev_name(dev)); put_device(dev); break; @@ -1062,17 +1239,18 @@ static int dpm_suspend_late(pm_message_t state) list_move(&dev->power.entry, &dpm_late_early_list); put_device(dev); - if (pm_wakeup_pending()) { - error = -EBUSY; + if (async_error) break; - } } mutex_unlock(&dpm_list_mtx); - if (error) + async_synchronize_full(); + if (error) { + suspend_stats.failed_suspend_late++; + dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_resume_early(resume_event(state)); - else + } else { dpm_show_time(starttime, state, "late"); - + } return error; } @@ -1259,6 +1437,8 @@ int dpm_suspend(pm_message_t state) might_sleep(); + cpufreq_suspend(); + mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index cfc3226..a21223d 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -89,8 +89,8 @@ extern void dpm_sysfs_remove(struct device *dev); extern void rpm_sysfs_remove(struct device *dev); extern int wakeup_sysfs_add(struct device *dev); extern void wakeup_sysfs_remove(struct device *dev); -extern int pm_qos_sysfs_add_latency(struct device *dev); -extern void pm_qos_sysfs_remove_latency(struct device *dev); +extern int pm_qos_sysfs_add_resume_latency(struct device *dev); +extern void pm_qos_sysfs_remove_resume_latency(struct device *dev); extern int pm_qos_sysfs_add_flags(struct device *dev); extern void pm_qos_sysfs_remove_flags(struct device *dev); diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 5c1361a..36b9eb4 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -105,7 +105,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags); s32 __dev_pm_qos_read_value(struct device *dev) { return IS_ERR_OR_NULL(dev->power.qos) ? - 0 : pm_qos_read_value(&dev->power.qos->latency); + 0 : pm_qos_read_value(&dev->power.qos->resume_latency); } /** @@ -141,16 +141,24 @@ static int apply_constraint(struct dev_pm_qos_request *req, int ret; switch(req->type) { - case DEV_PM_QOS_LATENCY: - ret = pm_qos_update_target(&qos->latency, &req->data.pnode, - action, value); + case DEV_PM_QOS_RESUME_LATENCY: + ret = pm_qos_update_target(&qos->resume_latency, + &req->data.pnode, action, value); if (ret) { - value = pm_qos_read_value(&qos->latency); + value = pm_qos_read_value(&qos->resume_latency); blocking_notifier_call_chain(&dev_pm_notifiers, (unsigned long)value, req); } break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + ret = pm_qos_update_target(&qos->latency_tolerance, + &req->data.pnode, action, value); + if (ret) { + value = pm_qos_read_value(&qos->latency_tolerance); + req->dev->power.set_latency_tolerance(req->dev, value); + } + break; case DEV_PM_QOS_FLAGS: ret = pm_qos_update_flags(&qos->flags, &req->data.flr, action, value); @@ -186,13 +194,21 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) } BLOCKING_INIT_NOTIFIER_HEAD(n); - c = &qos->latency; + c = &qos->resume_latency; plist_head_init(&c->list); - c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; - c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; + c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; + c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; + c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; c->type = PM_QOS_MIN; c->notifiers = n; + c = &qos->latency_tolerance; + plist_head_init(&c->list); + c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; + c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; + c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; + c->type = PM_QOS_MIN; + INIT_LIST_HEAD(&qos->flags.list); spin_lock_irq(&dev->power.lock); @@ -224,7 +240,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) * If the device's PM QoS resume latency limit or PM QoS flags have been * exposed to user space, they have to be hidden at this point. */ - pm_qos_sysfs_remove_latency(dev); + pm_qos_sysfs_remove_resume_latency(dev); pm_qos_sysfs_remove_flags(dev); mutex_lock(&dev_pm_qos_mtx); @@ -237,7 +253,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) goto out; /* Flush the constraints lists for the device. */ - c = &qos->latency; + c = &qos->resume_latency; plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { /* * Update constraints list and call the notification @@ -246,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); memset(req, 0, sizeof(*req)); } + c = &qos->latency_tolerance; + plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { + apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } f = &qos->flags; list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); @@ -265,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev) mutex_unlock(&dev_pm_qos_sysfs_mtx); } +static bool dev_pm_qos_invalid_request(struct device *dev, + struct dev_pm_qos_request *req) +{ + return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE + && !dev->power.set_latency_tolerance); +} + +static int __dev_pm_qos_add_request(struct device *dev, + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value) +{ + int ret = 0; + + if (!dev || dev_pm_qos_invalid_request(dev, req)) + return -EINVAL; + + if (WARN(dev_pm_qos_request_active(req), + "%s() called for already added request\n", __func__)) + return -EINVAL; + + if (IS_ERR(dev->power.qos)) + ret = -ENODEV; + else if (!dev->power.qos) + ret = dev_pm_qos_constraints_allocate(dev); + + trace_dev_pm_qos_add_request(dev_name(dev), type, value); + if (!ret) { + req->dev = dev; + req->type = type; + ret = apply_constraint(req, PM_QOS_ADD_REQ, value); + } + return ret; +} + /** * dev_pm_qos_add_request - inserts new qos request into the list * @dev: target device for the constraint @@ -290,31 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value) { - int ret = 0; - - if (!dev || !req) /*guard against callers passing in null */ - return -EINVAL; - - if (WARN(dev_pm_qos_request_active(req), - "%s() called for already added request\n", __func__)) - return -EINVAL; + int ret; mutex_lock(&dev_pm_qos_mtx); - - if (IS_ERR(dev->power.qos)) - ret = -ENODEV; - else if (!dev->power.qos) - ret = dev_pm_qos_constraints_allocate(dev); - - trace_dev_pm_qos_add_request(dev_name(dev), type, value); - if (!ret) { - req->dev = dev; - req->type = type; - ret = apply_constraint(req, PM_QOS_ADD_REQ, value); - } - + ret = __dev_pm_qos_add_request(dev, req, type, value); mutex_unlock(&dev_pm_qos_mtx); - return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); @@ -341,7 +376,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, return -ENODEV; switch(req->type) { - case DEV_PM_QOS_LATENCY: + case DEV_PM_QOS_RESUME_LATENCY: + case DEV_PM_QOS_LATENCY_TOLERANCE: curr_value = req->data.pnode.prio; break; case DEV_PM_QOS_FLAGS: @@ -460,8 +496,8 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) ret = dev_pm_qos_constraints_allocate(dev); if (!ret) - ret = blocking_notifier_chain_register( - dev->power.qos->latency.notifiers, notifier); + ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers, + notifier); mutex_unlock(&dev_pm_qos_mtx); return ret; @@ -487,9 +523,8 @@ int dev_pm_qos_remove_notifier(struct device *dev, /* Silently return if the constraints object is not present. */ if (!IS_ERR_OR_NULL(dev->power.qos)) - retval = blocking_notifier_chain_unregister( - dev->power.qos->latency.notifiers, - notifier); + retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers, + notifier); mutex_unlock(&dev_pm_qos_mtx); return retval; @@ -530,20 +565,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. * @dev: Device whose ancestor to add the request for. * @req: Pointer to the preallocated handle. + * @type: Type of the request. * @value: Constraint latency value. */ int dev_pm_qos_add_ancestor_request(struct device *dev, - struct dev_pm_qos_request *req, s32 value) + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value) { struct device *ancestor = dev->parent; int ret = -ENODEV; - while (ancestor && !ancestor->power.ignore_children) - ancestor = ancestor->parent; + switch (type) { + case DEV_PM_QOS_RESUME_LATENCY: + while (ancestor && !ancestor->power.ignore_children) + ancestor = ancestor->parent; + break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + while (ancestor && !ancestor->power.set_latency_tolerance) + ancestor = ancestor->parent; + + break; + default: + ancestor = NULL; + } if (ancestor) - ret = dev_pm_qos_add_request(ancestor, req, - DEV_PM_QOS_LATENCY, value); + ret = dev_pm_qos_add_request(ancestor, req, type, value); if (ret < 0) req->dev = NULL; @@ -559,9 +606,13 @@ static void __dev_pm_qos_drop_user_request(struct device *dev, struct dev_pm_qos_request *req = NULL; switch(type) { - case DEV_PM_QOS_LATENCY: - req = dev->power.qos->latency_req; - dev->power.qos->latency_req = NULL; + case DEV_PM_QOS_RESUME_LATENCY: + req = dev->power.qos->resume_latency_req; + dev->power.qos->resume_latency_req = NULL; + break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + req = dev->power.qos->latency_tolerance_req; + dev->power.qos->latency_tolerance_req = NULL; break; case DEV_PM_QOS_FLAGS: req = dev->power.qos->flags_req; @@ -597,7 +648,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) if (!req) return -ENOMEM; - ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); + ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value); if (ret < 0) { kfree(req); return ret; @@ -609,7 +660,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) if (IS_ERR_OR_NULL(dev->power.qos)) ret = -ENODEV; - else if (dev->power.qos->latency_req) + else if (dev->power.qos->resume_latency_req) ret = -EEXIST; if (ret < 0) { @@ -618,13 +669,13 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) mutex_unlock(&dev_pm_qos_mtx); goto out; } - dev->power.qos->latency_req = req; + dev->power.qos->resume_latency_req = req; mutex_unlock(&dev_pm_qos_mtx); - ret = pm_qos_sysfs_add_latency(dev); + ret = pm_qos_sysfs_add_resume_latency(dev); if (ret) - dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); out: mutex_unlock(&dev_pm_qos_sysfs_mtx); @@ -634,8 +685,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); static void __dev_pm_qos_hide_latency_limit(struct device *dev) { - if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req) + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); } /** @@ -646,7 +697,7 @@ void dev_pm_qos_hide_latency_limit(struct device *dev) { mutex_lock(&dev_pm_qos_sysfs_mtx); - pm_qos_sysfs_remove_latency(dev); + pm_qos_sysfs_remove_resume_latency(dev); mutex_lock(&dev_pm_qos_mtx); __dev_pm_qos_hide_latency_limit(dev); @@ -768,6 +819,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) pm_runtime_put(dev); return ret; } + +/** + * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance. + * @dev: Device to obtain the user space latency tolerance for. + */ +s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) +{ + s32 ret; + + mutex_lock(&dev_pm_qos_mtx); + ret = IS_ERR_OR_NULL(dev->power.qos) + || !dev->power.qos->latency_tolerance_req ? + PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT : + dev->power.qos->latency_tolerance_req->data.pnode.prio; + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} + +/** + * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance. + * @dev: Device to update the user space latency tolerance for. + * @val: New user space latency tolerance for @dev (negative values disable). + */ +int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) +{ + int ret; + + mutex_lock(&dev_pm_qos_mtx); + + if (IS_ERR_OR_NULL(dev->power.qos) + || !dev->power.qos->latency_tolerance_req) { + struct dev_pm_qos_request *req; + + if (val < 0) { + ret = -EINVAL; + goto out; + } + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto out; + } + ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val); + if (ret < 0) { + kfree(req); + goto out; + } + dev->power.qos->latency_tolerance_req = req; + } else { + if (val < 0) { + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE); + ret = 0; + } else { + ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val); + } + } + + out: + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} #else /* !CONFIG_PM_RUNTIME */ static void __dev_pm_qos_hide_latency_limit(struct device *dev) {} static void __dev_pm_qos_hide_flags(struct device *dev) {} diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 72e00e6..67c7938 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -13,6 +13,43 @@ #include <trace/events/rpm.h> #include "power.h" +#define RPM_GET_CALLBACK(dev, cb) \ +({ \ + int (*__rpm_cb)(struct device *__d); \ + \ + if (dev->pm_domain) \ + __rpm_cb = dev->pm_domain->ops.cb; \ + else if (dev->type && dev->type->pm) \ + __rpm_cb = dev->type->pm->cb; \ + else if (dev->class && dev->class->pm) \ + __rpm_cb = dev->class->pm->cb; \ + else if (dev->bus && dev->bus->pm) \ + __rpm_cb = dev->bus->pm->cb; \ + else \ + __rpm_cb = NULL; \ + \ + if (!__rpm_cb && dev->driver && dev->driver->pm) \ + __rpm_cb = dev->driver->pm->cb; \ + \ + __rpm_cb; \ +}) + +static int (*rpm_get_suspend_cb(struct device *dev))(struct device *) +{ + return RPM_GET_CALLBACK(dev, runtime_suspend); +} + +static int (*rpm_get_resume_cb(struct device *dev))(struct device *) +{ + return RPM_GET_CALLBACK(dev, runtime_resume); +} + +#ifdef CONFIG_PM_RUNTIME +static int (*rpm_get_idle_cb(struct device *dev))(struct device *) +{ + return RPM_GET_CALLBACK(dev, runtime_idle); +} + static int rpm_resume(struct device *dev, int rpmflags); static int rpm_suspend(struct device *dev, int rpmflags); @@ -310,19 +347,7 @@ static int rpm_idle(struct device *dev, int rpmflags) dev->power.idle_notification = true; - if (dev->pm_domain) - callback = dev->pm_domain->ops.runtime_idle; - else if (dev->type && dev->type->pm) - callback = dev->type->pm->runtime_idle; - else if (dev->class && dev->class->pm) - callback = dev->class->pm->runtime_idle; - else if (dev->bus && dev->bus->pm) - callback = dev->bus->pm->runtime_idle; - else - callback = NULL; - - if (!callback && dev->driver && dev->driver->pm) - callback = dev->driver->pm->runtime_idle; + callback = rpm_get_idle_cb(dev); if (callback) retval = __rpm_callback(callback, dev); @@ -492,19 +517,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_SUSPENDING); - if (dev->pm_domain) - callback = dev->pm_domain->ops.runtime_suspend; - else if (dev->type && dev->type->pm) - callback = dev->type->pm->runtime_suspend; - else if (dev->class && dev->class->pm) - callback = dev->class->pm->runtime_suspend; - else if (dev->bus && dev->bus->pm) - callback = dev->bus->pm->runtime_suspend; - else - callback = NULL; - - if (!callback && dev->driver && dev->driver->pm) - callback = dev->driver->pm->runtime_suspend; + callback = rpm_get_suspend_cb(dev); retval = rpm_callback(callback, dev); if (retval) @@ -724,19 +737,7 @@ static int rpm_resume(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_RESUMING); - if (dev->pm_domain) - callback = dev->pm_domain->ops.runtime_resume; - else if (dev->type && dev->type->pm) - callback = dev->type->pm->runtime_resume; - else if (dev->class && dev->class->pm) - callback = dev->class->pm->runtime_resume; - else if (dev->bus && dev->bus->pm) - callback = dev->bus->pm->runtime_resume; - else - callback = NULL; - - if (!callback && dev->driver && dev->driver->pm) - callback = dev->driver->pm->runtime_resume; + callback = rpm_get_resume_cb(dev); retval = rpm_callback(callback, dev); if (retval) { @@ -1130,7 +1131,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_barrier); * @dev: Device to handle. * @check_resume: If set, check if there's a resume request for the device. * - * Increment power.disable_depth for the device and if was zero previously, + * Increment power.disable_depth for the device and if it was zero previously, * cancel all pending runtime PM requests for the device and wait for all * operations in progress to complete. The device can be either active or * suspended after its runtime PM has been disabled. @@ -1401,3 +1402,86 @@ void pm_runtime_remove(struct device *dev) if (dev->power.irq_safe && dev->parent) pm_runtime_put(dev->parent); } +#endif + +/** + * pm_runtime_force_suspend - Force a device into suspend state if needed. + * @dev: Device to suspend. + * + * Disable runtime PM so we safely can check the device's runtime PM status and + * if it is active, invoke it's .runtime_suspend callback to bring it into + * suspend state. Keep runtime PM disabled to preserve the state unless we + * encounter errors. + * + * Typically this function may be invoked from a system suspend callback to make + * sure the device is put into low power state. + */ +int pm_runtime_force_suspend(struct device *dev) +{ + int (*callback)(struct device *); + int ret = 0; + + pm_runtime_disable(dev); + + /* + * Note that pm_runtime_status_suspended() returns false while + * !CONFIG_PM_RUNTIME, which means the device will be put into low + * power state. + */ + if (pm_runtime_status_suspended(dev)) + return 0; + + callback = rpm_get_suspend_cb(dev); + + if (!callback) { + ret = -ENOSYS; + goto err; + } + + ret = callback(dev); + if (ret) + goto err; + + pm_runtime_set_suspended(dev); + return 0; +err: + pm_runtime_enable(dev); + return ret; +} +EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); + +/** + * pm_runtime_force_resume - Force a device into resume state. + * @dev: Device to resume. + * + * Prior invoking this function we expect the user to have brought the device + * into low power state by a call to pm_runtime_force_suspend(). Here we reverse + * those actions and brings the device into full power. We update the runtime PM + * status and re-enables runtime PM. + * + * Typically this function may be invoked from a system resume callback to make + * sure the device is put into full power state. + */ +int pm_runtime_force_resume(struct device *dev) +{ + int (*callback)(struct device *); + int ret = 0; + + callback = rpm_get_resume_cb(dev); + + if (!callback) { + ret = -ENOSYS; + goto out; + } + + ret = callback(dev); + if (ret) + goto out; + + pm_runtime_set_active(dev); + pm_runtime_mark_last_busy(dev); +out: + pm_runtime_enable(dev); + return ret; +} +EXPORT_SYMBOL_GPL(pm_runtime_force_resume); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 03e089a..95b181d1 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -218,15 +218,16 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, autosuspend_delay_ms_store); -static ssize_t pm_qos_latency_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t pm_qos_resume_latency_show(struct device *dev, + struct device_attribute *attr, + char *buf) { - return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev)); + return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev)); } -static ssize_t pm_qos_latency_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t n) +static ssize_t pm_qos_resume_latency_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) { s32 value; int ret; @@ -237,12 +238,47 @@ static ssize_t pm_qos_latency_store(struct device *dev, if (value < 0) return -EINVAL; - ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value); + ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, + value); return ret < 0 ? ret : n; } static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, - pm_qos_latency_show, pm_qos_latency_store); + pm_qos_resume_latency_show, pm_qos_resume_latency_store); + +static ssize_t pm_qos_latency_tolerance_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + s32 value = dev_pm_qos_get_user_latency_tolerance(dev); + + if (value < 0) + return sprintf(buf, "auto\n"); + else if (value == PM_QOS_LATENCY_ANY) + return sprintf(buf, "any\n"); + + return sprintf(buf, "%d\n", value); +} + +static ssize_t pm_qos_latency_tolerance_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) +{ + s32 value; + int ret; + + if (kstrtos32(buf, 0, &value)) { + if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n")) + value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; + else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) + value = PM_QOS_LATENCY_ANY; + } + ret = dev_pm_qos_update_user_latency_tolerance(dev, value); + return ret < 0 ? ret : n; +} + +static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644, + pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store); static ssize_t pm_qos_no_power_off_show(struct device *dev, struct device_attribute *attr, @@ -618,15 +654,26 @@ static struct attribute_group pm_runtime_attr_group = { .attrs = runtime_attrs, }; -static struct attribute *pm_qos_latency_attrs[] = { +static struct attribute *pm_qos_resume_latency_attrs[] = { #ifdef CONFIG_PM_RUNTIME &dev_attr_pm_qos_resume_latency_us.attr, #endif /* CONFIG_PM_RUNTIME */ NULL, }; -static struct attribute_group pm_qos_latency_attr_group = { +static struct attribute_group pm_qos_resume_latency_attr_group = { + .name = power_group_name, + .attrs = pm_qos_resume_latency_attrs, +}; + +static struct attribute *pm_qos_latency_tolerance_attrs[] = { +#ifdef CONFIG_PM_RUNTIME + &dev_attr_pm_qos_latency_tolerance_us.attr, +#endif /* CONFIG_PM_RUNTIME */ + NULL, +}; +static struct attribute_group pm_qos_latency_tolerance_attr_group = { .name = power_group_name, - .attrs = pm_qos_latency_attrs, + .attrs = pm_qos_latency_tolerance_attrs, }; static struct attribute *pm_qos_flags_attrs[] = { @@ -654,18 +701,23 @@ int dpm_sysfs_add(struct device *dev) if (rc) goto err_out; } - if (device_can_wakeup(dev)) { rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); - if (rc) { - if (pm_runtime_callbacks_present(dev)) - sysfs_unmerge_group(&dev->kobj, - &pm_runtime_attr_group); - goto err_out; - } + if (rc) + goto err_runtime; + } + if (dev->power.set_latency_tolerance) { + rc = sysfs_merge_group(&dev->kobj, + &pm_qos_latency_tolerance_attr_group); + if (rc) + goto err_wakeup; } return 0; + err_wakeup: + sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); + err_runtime: + sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); err_out: sysfs_remove_group(&dev->kobj, &pm_attr_group); return rc; @@ -681,14 +733,14 @@ void wakeup_sysfs_remove(struct device *dev) sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); } -int pm_qos_sysfs_add_latency(struct device *dev) +int pm_qos_sysfs_add_resume_latency(struct device *dev) { - return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group); + return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); } -void pm_qos_sysfs_remove_latency(struct device *dev) +void pm_qos_sysfs_remove_resume_latency(struct device *dev) { - sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group); + sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); } int pm_qos_sysfs_add_flags(struct device *dev) @@ -708,6 +760,7 @@ void rpm_sysfs_remove(struct device *dev) void dpm_sysfs_remove(struct device *dev) { + sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); dev_pm_qos_constraints_destroy(dev); rpm_sysfs_remove(dev); sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 33414b1..7d13269 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -134,6 +134,8 @@ struct regmap { /* if set, converts bulk rw to single rw */ bool use_single_rw; + /* if set, the device supports multi write mode */ + bool can_multi_write; struct rb_root range_tree; void *selector_work_buf; /* Scratch buffer used for selector */ diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index d4dd771..29b4128 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -249,11 +249,12 @@ static int regcache_default_sync(struct regmap *map, unsigned int min, { unsigned int reg; - for (reg = min; reg <= max; reg++) { + for (reg = min; reg <= max; reg += map->reg_stride) { unsigned int val; int ret; - if (regmap_volatile(map, reg)) + if (regmap_volatile(map, reg) || + !regmap_writeable(map, reg)) continue; ret = regcache_read(map, reg, &val); @@ -312,10 +313,6 @@ int regcache_sync(struct regmap *map) /* Apply any patch first */ map->cache_bypass = 1; for (i = 0; i < map->patch_regs; i++) { - if (map->patch[i].reg % map->reg_stride) { - ret = -EINVAL; - goto out; - } ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def); if (ret != 0) { dev_err(map->dev, "Failed to write %x = %x: %d\n", @@ -636,10 +633,10 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, if (*data == NULL) return 0; - count = cur - base; + count = (cur - base) / map->reg_stride; dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n", - count * val_bytes, count, base, cur - 1); + count * val_bytes, count, base, cur - map->reg_stride); map->cache_bypass = 1; diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index c5471cd..45d812c 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -511,7 +511,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name) debugfs_create_file("range", 0400, map->debugfs, map, ®map_reg_ranges_fops); - if (map->max_register) { + if (map->max_register || regmap_readable(map, 0)) { debugfs_create_file("registers", 0400, map->debugfs, map, ®map_map_fops); debugfs_create_file("access", 0400, map->debugfs, diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 8269206..edf88f2 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -368,8 +368,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, if (!d) return -ENOMEM; - *data = d; - d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, GFP_KERNEL); if (!d->status_buf) @@ -506,6 +504,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, goto err_domain; } + *data = d; + return 0; err_domain: @@ -533,7 +533,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) return; free_irq(irq, d); - /* We should unmap the domain but... */ + irq_domain_remove(d->domain); kfree(d->wake_buf); kfree(d->mask_buf_def); kfree(d->mask_buf); diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index 81f9775..de45a1e 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c @@ -26,10 +26,47 @@ struct regmap_mmio_context { void __iomem *regs; + unsigned reg_bytes; unsigned val_bytes; + unsigned pad_bytes; struct clk *clk; }; +static inline void regmap_mmio_regsize_check(size_t reg_size) +{ + switch (reg_size) { + case 1: + case 2: + case 4: +#ifdef CONFIG_64BIT + case 8: +#endif + break; + default: + BUG(); + } +} + +static int regmap_mmio_regbits_check(size_t reg_bits) +{ + switch (reg_bits) { + case 8: + case 16: + case 32: +#ifdef CONFIG_64BIT + case 64: +#endif + return 0; + default: + return -EINVAL; + } +} + +static inline void regmap_mmio_count_check(size_t count) +{ + BUG_ON(count % 2 != 0); +} + static int regmap_mmio_gather_write(void *context, const void *reg, size_t reg_size, const void *val, size_t val_size) @@ -38,7 +75,7 @@ static int regmap_mmio_gather_write(void *context, u32 offset; int ret; - BUG_ON(reg_size != 4); + regmap_mmio_regsize_check(reg_size); if (!IS_ERR(ctx->clk)) { ret = clk_enable(ctx->clk); @@ -81,9 +118,13 @@ static int regmap_mmio_gather_write(void *context, static int regmap_mmio_write(void *context, const void *data, size_t count) { - BUG_ON(count < 4); + struct regmap_mmio_context *ctx = context; + u32 offset = ctx->reg_bytes + ctx->pad_bytes; + + regmap_mmio_count_check(count); - return regmap_mmio_gather_write(context, data, 4, data + 4, count - 4); + return regmap_mmio_gather_write(context, data, ctx->reg_bytes, + data + offset, count - offset); } static int regmap_mmio_read(void *context, @@ -94,7 +135,7 @@ static int regmap_mmio_read(void *context, u32 offset; int ret; - BUG_ON(reg_size != 4); + regmap_mmio_regsize_check(reg_size); if (!IS_ERR(ctx->clk)) { ret = clk_enable(ctx->clk); @@ -165,8 +206,9 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, int min_stride; int ret; - if (config->reg_bits != 32) - return ERR_PTR(-EINVAL); + ret = regmap_mmio_regbits_check(config->reg_bits); + if (ret) + return ERR_PTR(ret); if (config->pad_bits) return ERR_PTR(-EINVAL); @@ -209,6 +251,8 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, ctx->regs = regs; ctx->val_bytes = config->val_bits / 8; + ctx->reg_bytes = config->reg_bits / 8; + ctx->pad_bytes = config->pad_bits / 8; ctx->clk = ERR_PTR(-ENODEV); if (clk_id == NULL) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 6a19515..d0a0724 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -380,6 +380,28 @@ static void regmap_range_exit(struct regmap *map) kfree(map->selector_work_buf); } +int regmap_attach_dev(struct device *dev, struct regmap *map, + const struct regmap_config *config) +{ + struct regmap **m; + + map->dev = dev; + + regmap_debugfs_init(map, config->name); + + /* Add a devres resource for dev_get_regmap() */ + m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); + if (!m) { + regmap_debugfs_exit(map); + return -ENOMEM; + } + *m = map; + devres_add(dev, m); + + return 0; +} +EXPORT_SYMBOL_GPL(regmap_attach_dev); + /** * regmap_init(): Initialise register map * @@ -397,7 +419,7 @@ struct regmap *regmap_init(struct device *dev, void *bus_context, const struct regmap_config *config) { - struct regmap *map, **m; + struct regmap *map; int ret = -EINVAL; enum regmap_endian reg_endian, val_endian; int i, j; @@ -439,6 +461,7 @@ struct regmap *regmap_init(struct device *dev, else map->reg_stride = 1; map->use_single_rw = config->use_single_rw; + map->can_multi_write = config->can_multi_write; map->dev = dev; map->bus = bus; map->bus_context = bus_context; @@ -718,7 +741,7 @@ skip_format_initialization: new->window_start = range_cfg->window_start; new->window_len = range_cfg->window_len; - if (_regmap_range_add(map, new) == false) { + if (!_regmap_range_add(map, new)) { dev_err(map->dev, "Failed to add range %d\n", i); kfree(new); goto err_range; @@ -734,25 +757,18 @@ skip_format_initialization: } } - regmap_debugfs_init(map, config->name); - ret = regcache_init(map, config); if (ret != 0) goto err_range; - /* Add a devres resource for dev_get_regmap() */ - m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); - if (!m) { - ret = -ENOMEM; - goto err_debugfs; - } - *m = map; - devres_add(dev, m); + if (dev) + ret = regmap_attach_dev(dev, map, config); + if (ret != 0) + goto err_regcache; return map; -err_debugfs: - regmap_debugfs_exit(map); +err_regcache: regcache_exit(map); err_range: regmap_range_exit(map); @@ -1520,12 +1536,12 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, if (reg % map->reg_stride) return -EINVAL; - map->lock(map->lock_arg); /* * Some devices don't support bulk write, for * them we have a series of single write operations. */ if (!map->bus || map->use_single_rw) { + map->lock(map->lock_arg); for (i = 0; i < val_count; i++) { unsigned int ival; @@ -1554,31 +1570,239 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, if (ret != 0) goto out; } +out: + map->unlock(map->lock_arg); } else { void *wval; wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL); if (!wval) { - ret = -ENOMEM; dev_err(map->dev, "Error in memory allocation\n"); - goto out; + return -ENOMEM; } for (i = 0; i < val_count * val_bytes; i += val_bytes) map->format.parse_inplace(wval + i); + map->lock(map->lock_arg); ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); + map->unlock(map->lock_arg); kfree(wval); } -out: - map->unlock(map->lock_arg); return ret; } EXPORT_SYMBOL_GPL(regmap_bulk_write); /* + * _regmap_raw_multi_reg_write() + * + * the (register,newvalue) pairs in regs have not been formatted, but + * they are all in the same page and have been changed to being page + * relative. The page register has been written if that was neccessary. + */ +static int _regmap_raw_multi_reg_write(struct regmap *map, + const struct reg_default *regs, + size_t num_regs) +{ + int ret; + void *buf; + int i; + u8 *u8; + size_t val_bytes = map->format.val_bytes; + size_t reg_bytes = map->format.reg_bytes; + size_t pad_bytes = map->format.pad_bytes; + size_t pair_size = reg_bytes + pad_bytes + val_bytes; + size_t len = pair_size * num_regs; + + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* We have to linearise by hand. */ + + u8 = buf; + + for (i = 0; i < num_regs; i++) { + int reg = regs[i].reg; + int val = regs[i].def; + trace_regmap_hw_write_start(map->dev, reg, 1); + map->format.format_reg(u8, reg, map->reg_shift); + u8 += reg_bytes + pad_bytes; + map->format.format_val(u8, val, 0); + u8 += val_bytes; + } + u8 = buf; + *u8 |= map->write_flag_mask; + + ret = map->bus->write(map->bus_context, buf, len); + + kfree(buf); + + for (i = 0; i < num_regs; i++) { + int reg = regs[i].reg; + trace_regmap_hw_write_done(map->dev, reg, 1); + } + return ret; +} + +static unsigned int _regmap_register_page(struct regmap *map, + unsigned int reg, + struct regmap_range_node *range) +{ + unsigned int win_page = (reg - range->range_min) / range->window_len; + + return win_page; +} + +static int _regmap_range_multi_paged_reg_write(struct regmap *map, + struct reg_default *regs, + size_t num_regs) +{ + int ret; + int i, n; + struct reg_default *base; + unsigned int this_page; + /* + * the set of registers are not neccessarily in order, but + * since the order of write must be preserved this algorithm + * chops the set each time the page changes + */ + base = regs; + for (i = 0, n = 0; i < num_regs; i++, n++) { + unsigned int reg = regs[i].reg; + struct regmap_range_node *range; + + range = _regmap_range_lookup(map, reg); + if (range) { + unsigned int win_page = _regmap_register_page(map, reg, + range); + + if (i == 0) + this_page = win_page; + if (win_page != this_page) { + this_page = win_page; + ret = _regmap_raw_multi_reg_write(map, base, n); + if (ret != 0) + return ret; + base += n; + n = 0; + } + ret = _regmap_select_page(map, &base[n].reg, range, 1); + if (ret != 0) + return ret; + } + } + if (n > 0) + return _regmap_raw_multi_reg_write(map, base, n); + return 0; +} + +static int _regmap_multi_reg_write(struct regmap *map, + const struct reg_default *regs, + size_t num_regs) +{ + int i; + int ret; + + if (!map->can_multi_write) { + for (i = 0; i < num_regs; i++) { + ret = _regmap_write(map, regs[i].reg, regs[i].def); + if (ret != 0) + return ret; + } + return 0; + } + + if (!map->format.parse_inplace) + return -EINVAL; + + if (map->writeable_reg) + for (i = 0; i < num_regs; i++) { + int reg = regs[i].reg; + if (!map->writeable_reg(map->dev, reg)) + return -EINVAL; + if (reg % map->reg_stride) + return -EINVAL; + } + + if (!map->cache_bypass) { + for (i = 0; i < num_regs; i++) { + unsigned int val = regs[i].def; + unsigned int reg = regs[i].reg; + ret = regcache_write(map, reg, val); + if (ret) { + dev_err(map->dev, + "Error in caching of register: %x ret: %d\n", + reg, ret); + return ret; + } + } + if (map->cache_only) { + map->cache_dirty = true; + return 0; + } + } + + WARN_ON(!map->bus); + + for (i = 0; i < num_regs; i++) { + unsigned int reg = regs[i].reg; + struct regmap_range_node *range; + range = _regmap_range_lookup(map, reg); + if (range) { + size_t len = sizeof(struct reg_default)*num_regs; + struct reg_default *base = kmemdup(regs, len, + GFP_KERNEL); + if (!base) + return -ENOMEM; + ret = _regmap_range_multi_paged_reg_write(map, base, + num_regs); + kfree(base); + + return ret; + } + } + return _regmap_raw_multi_reg_write(map, regs, num_regs); +} + +/* * regmap_multi_reg_write(): Write multiple registers to the device * + * where the set of register,value pairs are supplied in any order, + * possibly not all in a single range. + * + * @map: Register map to write to + * @regs: Array of structures containing register,value to be written + * @num_regs: Number of registers to write + * + * The 'normal' block write mode will send ultimately send data on the + * target bus as R,V1,V2,V3,..,Vn where successively higer registers are + * addressed. However, this alternative block multi write mode will send + * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device + * must of course support the mode. + * + * A value of zero will be returned on success, a negative errno will be + * returned in error cases. + */ +int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs, + int num_regs) +{ + int ret; + + map->lock(map->lock_arg); + + ret = _regmap_multi_reg_write(map, regs, num_regs); + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_multi_reg_write); + +/* + * regmap_multi_reg_write_bypassed(): Write multiple registers to the + * device but not the cache + * * where the set of register are supplied in any order * * @map: Register map to write to @@ -1592,30 +1816,27 @@ EXPORT_SYMBOL_GPL(regmap_bulk_write); * A value of zero will be returned on success, a negative errno will * be returned in error cases. */ -int regmap_multi_reg_write(struct regmap *map, struct reg_default *regs, - int num_regs) +int regmap_multi_reg_write_bypassed(struct regmap *map, + const struct reg_default *regs, + int num_regs) { - int ret = 0, i; - - for (i = 0; i < num_regs; i++) { - int reg = regs[i].reg; - if (reg % map->reg_stride) - return -EINVAL; - } + int ret; + bool bypass; map->lock(map->lock_arg); - for (i = 0; i < num_regs; i++) { - ret = _regmap_write(map, regs[i].reg, regs[i].def); - if (ret != 0) - goto out; - } -out: + bypass = map->cache_bypass; + map->cache_bypass = true; + + ret = _regmap_multi_reg_write(map, regs, num_regs); + + map->cache_bypass = bypass; + map->unlock(map->lock_arg); return ret; } -EXPORT_SYMBOL_GPL(regmap_multi_reg_write); +EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); /** * regmap_raw_write_async(): Write raw values to one or more registers @@ -1736,6 +1957,9 @@ static int _regmap_read(struct regmap *map, unsigned int reg, if (map->cache_only) return -EBUSY; + if (!regmap_readable(map, reg)) + return -EIO; + ret = map->reg_read(context, reg, val); if (ret == 0) { #ifdef LOG_DEVICE @@ -1966,9 +2190,11 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, if (tmp != orig) { ret = _regmap_write(map, reg, tmp); - *change = true; + if (change) + *change = true; } else { - *change = false; + if (change) + *change = false; } return ret; @@ -1987,11 +2213,10 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, int regmap_update_bits(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val) { - bool change; int ret; map->lock(map->lock_arg); - ret = _regmap_update_bits(map, reg, mask, val, &change); + ret = _regmap_update_bits(map, reg, mask, val, NULL); map->unlock(map->lock_arg); return ret; @@ -2016,14 +2241,13 @@ EXPORT_SYMBOL_GPL(regmap_update_bits); int regmap_update_bits_async(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val) { - bool change; int ret; map->lock(map->lock_arg); map->async = true; - ret = _regmap_update_bits(map, reg, mask, val, &change); + ret = _regmap_update_bits(map, reg, mask, val, NULL); map->async = false; @@ -2173,35 +2397,21 @@ EXPORT_SYMBOL_GPL(regmap_async_complete); * apply them immediately. Typically this is used to apply * corrections to be applied to the device defaults on startup, such * as the updates some vendors provide to undocumented registers. + * + * The caller must ensure that this function cannot be called + * concurrently with either itself or regcache_sync(). */ int regmap_register_patch(struct regmap *map, const struct reg_default *regs, int num_regs) { struct reg_default *p; - int i, ret; + int ret; bool bypass; if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", num_regs)) return 0; - map->lock(map->lock_arg); - - bypass = map->cache_bypass; - - map->cache_bypass = true; - map->async = true; - - /* Write out first; it's useful to apply even if we fail later. */ - for (i = 0; i < num_regs; i++) { - ret = _regmap_write(map, regs[i].reg, regs[i].def); - if (ret != 0) { - dev_err(map->dev, "Failed to write %x = %x: %d\n", - regs[i].reg, regs[i].def, ret); - goto out; - } - } - p = krealloc(map->patch, sizeof(struct reg_default) * (map->patch_regs + num_regs), GFP_KERNEL); @@ -2210,9 +2420,20 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs, map->patch = p; map->patch_regs += num_regs; } else { - ret = -ENOMEM; + return -ENOMEM; } + map->lock(map->lock_arg); + + bypass = map->cache_bypass; + + map->cache_bypass = true; + map->async = true; + + ret = _regmap_multi_reg_write(map, regs, num_regs); + if (ret != 0) + goto out; + out: map->async = false; map->cache_bypass = bypass; @@ -2240,6 +2461,18 @@ int regmap_get_val_bytes(struct regmap *map) } EXPORT_SYMBOL_GPL(regmap_get_val_bytes); +int regmap_parse_val(struct regmap *map, const void *buf, + unsigned int *val) +{ + if (!map->format.parse_val) + return -EINVAL; + + *val = map->format.parse_val(buf); + + return 0; +} +EXPORT_SYMBOL_GPL(regmap_parse_val); + static int __init regmap_initcall(void) { regmap_debugfs_initcall(); |