summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/memory.c1
-rw-r--r--drivers/base/power/domain.c402
-rw-r--r--drivers/base/power/opp.c17
-rw-r--r--drivers/base/power/trace.c2
-rw-r--r--drivers/base/syscore.c8
5 files changed, 347 insertions, 83 deletions
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 9f9b235..45d7c8f 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -30,7 +30,6 @@
static DEFINE_MUTEX(mem_sysfs_mutex);
#define MEMORY_CLASS_NAME "memory"
-#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
static int sections_per_block;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 1aed94c..be8714a 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -13,6 +13,11 @@
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+
+static LIST_HEAD(gpd_list);
+static DEFINE_MUTEX(gpd_list_lock);
#ifdef CONFIG_PM
@@ -30,6 +35,41 @@ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
genpd->sd_count--;
}
+static void genpd_acquire_lock(struct generic_pm_domain *genpd)
+{
+ DEFINE_WAIT(wait);
+
+ mutex_lock(&genpd->lock);
+ /*
+ * Wait for the domain to transition into either the active,
+ * or the power off state.
+ */
+ for (;;) {
+ prepare_to_wait(&genpd->status_wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (genpd->status == GPD_STATE_ACTIVE
+ || genpd->status == GPD_STATE_POWER_OFF)
+ break;
+ mutex_unlock(&genpd->lock);
+
+ schedule();
+
+ mutex_lock(&genpd->lock);
+ }
+ finish_wait(&genpd->status_wait_queue, &wait);
+}
+
+static void genpd_release_lock(struct generic_pm_domain *genpd)
+{
+ mutex_unlock(&genpd->lock);
+}
+
+static void genpd_set_active(struct generic_pm_domain *genpd)
+{
+ if (genpd->resume_count == 0)
+ genpd->status = GPD_STATE_ACTIVE;
+}
+
/**
* pm_genpd_poweron - Restore power to a given PM domain and its parents.
* @genpd: PM domain to power up.
@@ -37,24 +77,34 @@ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
* Restore power to @genpd and all of its parents so that it is possible to
* resume a device belonging to it.
*/
-static int pm_genpd_poweron(struct generic_pm_domain *genpd)
+int pm_genpd_poweron(struct generic_pm_domain *genpd)
{
+ struct generic_pm_domain *parent = genpd->parent;
+ DEFINE_WAIT(wait);
int ret = 0;
start:
- if (genpd->parent)
- mutex_lock(&genpd->parent->lock);
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ if (parent) {
+ genpd_acquire_lock(parent);
+ mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ mutex_lock(&genpd->lock);
+ }
- if (!genpd->power_is_off
+ if (genpd->status == GPD_STATE_ACTIVE
|| (genpd->prepared_count > 0 && genpd->suspend_power_off))
goto out;
- if (genpd->parent && genpd->parent->power_is_off) {
+ if (genpd->status != GPD_STATE_POWER_OFF) {
+ genpd_set_active(genpd);
+ goto out;
+ }
+
+ if (parent && parent->status != GPD_STATE_ACTIVE) {
mutex_unlock(&genpd->lock);
- mutex_unlock(&genpd->parent->lock);
+ genpd_release_lock(parent);
- ret = pm_genpd_poweron(genpd->parent);
+ ret = pm_genpd_poweron(parent);
if (ret)
return ret;
@@ -67,14 +117,14 @@ static int pm_genpd_poweron(struct generic_pm_domain *genpd)
goto out;
}
- genpd->power_is_off = false;
- if (genpd->parent)
- genpd->parent->sd_count++;
+ genpd_set_active(genpd);
+ if (parent)
+ parent->sd_count++;
out:
mutex_unlock(&genpd->lock);
- if (genpd->parent)
- mutex_unlock(&genpd->parent->lock);
+ if (parent)
+ genpd_release_lock(parent);
return ret;
}
@@ -90,6 +140,7 @@ static int pm_genpd_poweron(struct generic_pm_domain *genpd)
*/
static int __pm_genpd_save_device(struct dev_list_entry *dle,
struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
{
struct device *dev = dle->dev;
struct device_driver *drv = dev->driver;
@@ -98,6 +149,8 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
if (dle->need_restore)
return 0;
+ mutex_unlock(&genpd->lock);
+
if (drv && drv->pm && drv->pm->runtime_suspend) {
if (genpd->start_device)
genpd->start_device(dev);
@@ -108,6 +161,8 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
genpd->stop_device(dev);
}
+ mutex_lock(&genpd->lock);
+
if (!ret)
dle->need_restore = true;
@@ -121,6 +176,7 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
*/
static void __pm_genpd_restore_device(struct dev_list_entry *dle,
struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
{
struct device *dev = dle->dev;
struct device_driver *drv = dev->driver;
@@ -128,6 +184,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
if (!dle->need_restore)
return;
+ mutex_unlock(&genpd->lock);
+
if (drv && drv->pm && drv->pm->runtime_resume) {
if (genpd->start_device)
genpd->start_device(dev);
@@ -138,10 +196,39 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
genpd->stop_device(dev);
}
+ mutex_lock(&genpd->lock);
+
dle->need_restore = false;
}
/**
+ * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
+ * @genpd: PM domain to check.
+ *
+ * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
+ * a "power off" operation, which means that a "power on" has occured in the
+ * meantime, or if its resume_count field is different from zero, which means
+ * that one of its devices has been resumed in the meantime.
+ */
+static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
+{
+ return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
+}
+
+/**
+ * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
+ * @genpd: PM domait to power off.
+ *
+ * Queue up the execution of pm_genpd_poweroff() unless it's already been done
+ * before.
+ */
+void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+{
+ if (!work_pending(&genpd->power_off_work))
+ queue_work(pm_wq, &genpd->power_off_work);
+}
+
+/**
* pm_genpd_poweroff - Remove power from a given PM domain.
* @genpd: PM domain to power down.
*
@@ -150,13 +237,22 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
* the @genpd's devices' drivers and remove power from @genpd.
*/
static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
{
struct generic_pm_domain *parent;
struct dev_list_entry *dle;
unsigned int not_suspended;
- int ret;
+ int ret = 0;
- if (genpd->power_is_off || genpd->prepared_count > 0)
+ start:
+ /*
+ * Do not try to power off the domain in the following situations:
+ * (1) The domain is already in the "power off" state.
+ * (2) System suspend is in progress.
+ * (3) One of the domain's devices is being resumed right now.
+ */
+ if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
+ || genpd->resume_count > 0)
return 0;
if (genpd->sd_count > 0)
@@ -170,35 +266,76 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
if (not_suspended > genpd->in_progress)
return -EBUSY;
+ if (genpd->poweroff_task) {
+ /*
+ * Another instance of pm_genpd_poweroff() is executing
+ * callbacks, so tell it to start over and return.
+ */
+ genpd->status = GPD_STATE_REPEAT;
+ return 0;
+ }
+
if (genpd->gov && genpd->gov->power_down_ok) {
if (!genpd->gov->power_down_ok(&genpd->domain))
return -EAGAIN;
}
+ genpd->status = GPD_STATE_BUSY;
+ genpd->poweroff_task = current;
+
list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
ret = __pm_genpd_save_device(dle, genpd);
- if (ret)
- goto err_dev;
- }
+ if (ret) {
+ genpd_set_active(genpd);
+ goto out;
+ }
- if (genpd->power_off)
- genpd->power_off(genpd);
+ if (genpd_abort_poweroff(genpd))
+ goto out;
- genpd->power_is_off = true;
+ if (genpd->status == GPD_STATE_REPEAT) {
+ genpd->poweroff_task = NULL;
+ goto start;
+ }
+ }
parent = genpd->parent;
if (parent) {
- genpd_sd_counter_dec(parent);
- if (parent->sd_count == 0)
- queue_work(pm_wq, &parent->power_off_work);
+ mutex_unlock(&genpd->lock);
+
+ genpd_acquire_lock(parent);
+ mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+
+ if (genpd_abort_poweroff(genpd)) {
+ genpd_release_lock(parent);
+ goto out;
+ }
}
- return 0;
+ if (genpd->power_off) {
+ ret = genpd->power_off(genpd);
+ if (ret == -EBUSY) {
+ genpd_set_active(genpd);
+ if (parent)
+ genpd_release_lock(parent);
+
+ goto out;
+ }
+ }
+
+ genpd->status = GPD_STATE_POWER_OFF;
+
+ if (parent) {
+ genpd_sd_counter_dec(parent);
+ if (parent->sd_count == 0)
+ genpd_queue_power_off_work(parent);
- err_dev:
- list_for_each_entry_continue(dle, &genpd->dev_list, node)
- __pm_genpd_restore_device(dle, genpd);
+ genpd_release_lock(parent);
+ }
+ out:
+ genpd->poweroff_task = NULL;
+ wake_up_all(&genpd->status_wait_queue);
return ret;
}
@@ -212,13 +349,9 @@ static void genpd_power_off_work_fn(struct work_struct *work)
genpd = container_of(work, struct generic_pm_domain, power_off_work);
- if (genpd->parent)
- mutex_lock(&genpd->parent->lock);
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ genpd_acquire_lock(genpd);
pm_genpd_poweroff(genpd);
- mutex_unlock(&genpd->lock);
- if (genpd->parent)
- mutex_unlock(&genpd->parent->lock);
+ genpd_release_lock(genpd);
}
/**
@@ -239,23 +372,17 @@ static int pm_genpd_runtime_suspend(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->parent)
- mutex_lock(&genpd->parent->lock);
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
-
if (genpd->stop_device) {
int ret = genpd->stop_device(dev);
if (ret)
- goto out;
+ return ret;
}
+
+ mutex_lock(&genpd->lock);
genpd->in_progress++;
pm_genpd_poweroff(genpd);
genpd->in_progress--;
-
- out:
mutex_unlock(&genpd->lock);
- if (genpd->parent)
- mutex_unlock(&genpd->parent->lock);
return 0;
}
@@ -276,9 +403,6 @@ static void __pm_genpd_runtime_resume(struct device *dev,
break;
}
}
-
- if (genpd->start_device)
- genpd->start_device(dev);
}
/**
@@ -292,6 +416,7 @@ static void __pm_genpd_runtime_resume(struct device *dev,
static int pm_genpd_runtime_resume(struct device *dev)
{
struct generic_pm_domain *genpd;
+ DEFINE_WAIT(wait);
int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -305,9 +430,34 @@ static int pm_genpd_runtime_resume(struct device *dev)
return ret;
mutex_lock(&genpd->lock);
+ genpd->status = GPD_STATE_BUSY;
+ genpd->resume_count++;
+ for (;;) {
+ prepare_to_wait(&genpd->status_wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ /*
+ * If current is the powering off task, we have been called
+ * reentrantly from one of the device callbacks, so we should
+ * not wait.
+ */
+ if (!genpd->poweroff_task || genpd->poweroff_task == current)
+ break;
+ mutex_unlock(&genpd->lock);
+
+ schedule();
+
+ mutex_lock(&genpd->lock);
+ }
+ finish_wait(&genpd->status_wait_queue, &wait);
__pm_genpd_runtime_resume(dev, genpd);
+ genpd->resume_count--;
+ genpd_set_active(genpd);
+ wake_up_all(&genpd->status_wait_queue);
mutex_unlock(&genpd->lock);
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
return 0;
}
@@ -339,7 +489,7 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
{
struct generic_pm_domain *parent = genpd->parent;
- if (genpd->power_is_off)
+ if (genpd->status == GPD_STATE_POWER_OFF)
return;
if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
@@ -348,7 +498,7 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
if (genpd->power_off)
genpd->power_off(genpd);
- genpd->power_is_off = true;
+ genpd->status = GPD_STATE_POWER_OFF;
if (parent) {
genpd_sd_counter_dec(parent);
pm_genpd_sync_poweroff(parent);
@@ -356,6 +506,33 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
}
/**
+ * resume_needed - Check whether to resume a device before system suspend.
+ * @dev: Device to check.
+ * @genpd: PM domain the device belongs to.
+ *
+ * There are two cases in which a device that can wake up the system from sleep
+ * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
+ * to wake up the system and it has to remain active for this purpose while the
+ * system is in the sleep state and (2) if the device is not enabled to wake up
+ * the system from sleep states and it generally doesn't generate wakeup signals
+ * by itself (those signals are generated on its behalf by other parts of the
+ * system). In the latter case it may be necessary to reconfigure the device's
+ * wakeup settings during system suspend, because it may have been set up to
+ * signal remote wakeup from the system's working state as needed by runtime PM.
+ * Return 'true' in either of the above cases.
+ */
+static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
+{
+ bool active_wakeup;
+
+ if (!device_can_wakeup(dev))
+ return false;
+
+ active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
+ return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
+}
+
+/**
* pm_genpd_prepare - Start power transition of a device in a PM domain.
* @dev: Device to start the transition of.
*
@@ -367,6 +544,7 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
static int pm_genpd_prepare(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -374,33 +552,57 @@ static int pm_genpd_prepare(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- mutex_lock(&genpd->lock);
+ /*
+ * If a wakeup request is pending for the device, it should be woken up
+ * at this point and a system wakeup event should be reported if it's
+ * set up to wake up the system from sleep states.
+ */
+ pm_runtime_get_noresume(dev);
+ if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+ pm_wakeup_event(dev, 0);
+
+ if (pm_wakeup_pending()) {
+ pm_runtime_put_sync(dev);
+ return -EBUSY;
+ }
+
+ if (resume_needed(dev, genpd))
+ pm_runtime_resume(dev);
+
+ genpd_acquire_lock(genpd);
if (genpd->prepared_count++ == 0)
- genpd->suspend_power_off = genpd->power_is_off;
+ genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
+
+ genpd_release_lock(genpd);
if (genpd->suspend_power_off) {
- mutex_unlock(&genpd->lock);
+ pm_runtime_put_noidle(dev);
return 0;
}
/*
- * If the device is in the (runtime) "suspended" state, call
- * .start_device() for it, if defined.
- */
- if (pm_runtime_suspended(dev))
- __pm_genpd_runtime_resume(dev, genpd);
-
- /*
- * Do not check if runtime resume is pending at this point, because it
- * has been taken care of already and if pm_genpd_poweron() ran at this
- * point as a result of the check, it would deadlock.
+ * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
+ * so pm_genpd_poweron() will return immediately, but if the device
+ * is suspended (e.g. it's been stopped by .stop_device()), we need
+ * to make it operational.
*/
+ pm_runtime_resume(dev);
__pm_runtime_disable(dev, false);
- mutex_unlock(&genpd->lock);
+ ret = pm_generic_prepare(dev);
+ if (ret) {
+ mutex_lock(&genpd->lock);
+
+ if (--genpd->prepared_count == 0)
+ genpd->suspend_power_off = false;
- return pm_generic_prepare(dev);
+ mutex_unlock(&genpd->lock);
+ pm_runtime_enable(dev);
+ }
+
+ pm_runtime_put_sync(dev);
+ return ret;
}
/**
@@ -716,7 +918,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
* guaranteed that this function will never run twice in parallel for
* the same PM domain, so it is not necessary to use locking here.
*/
- genpd->power_is_off = true;
+ genpd->status = GPD_STATE_POWER_OFF;
if (genpd->suspend_power_off) {
/*
* The boot kernel might put the domain into the power on state,
@@ -786,7 +988,9 @@ static void pm_genpd_complete(struct device *dev)
if (run_complete) {
pm_generic_complete(dev);
+ pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
}
}
@@ -824,9 +1028,9 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
return -EINVAL;
- mutex_lock(&genpd->lock);
+ genpd_acquire_lock(genpd);
- if (genpd->power_is_off) {
+ if (genpd->status == GPD_STATE_POWER_OFF) {
ret = -EINVAL;
goto out;
}
@@ -858,7 +1062,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
spin_unlock_irq(&dev->power.lock);
out:
- mutex_unlock(&genpd->lock);
+ genpd_release_lock(genpd);
return ret;
}
@@ -879,7 +1083,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
return -EINVAL;
- mutex_lock(&genpd->lock);
+ genpd_acquire_lock(genpd);
if (genpd->prepared_count > 0) {
ret = -EAGAIN;
@@ -903,7 +1107,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
}
out:
- mutex_unlock(&genpd->lock);
+ genpd_release_lock(genpd);
return ret;
}
@@ -922,9 +1126,19 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
return -EINVAL;
- mutex_lock(&genpd->lock);
+ start:
+ genpd_acquire_lock(genpd);
+ mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
+
+ if (new_subdomain->status != GPD_STATE_POWER_OFF
+ && new_subdomain->status != GPD_STATE_ACTIVE) {
+ mutex_unlock(&new_subdomain->lock);
+ genpd_release_lock(genpd);
+ goto start;
+ }
- if (genpd->power_is_off && !new_subdomain->power_is_off) {
+ if (genpd->status == GPD_STATE_POWER_OFF
+ && new_subdomain->status != GPD_STATE_POWER_OFF) {
ret = -EINVAL;
goto out;
}
@@ -936,17 +1150,14 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
}
}
- mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
-
list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
new_subdomain->parent = genpd;
- if (!subdomain->power_is_off)
+ if (subdomain->status != GPD_STATE_POWER_OFF)
genpd->sd_count++;
- mutex_unlock(&new_subdomain->lock);
-
out:
- mutex_unlock(&genpd->lock);
+ mutex_unlock(&new_subdomain->lock);
+ genpd_release_lock(genpd);
return ret;
}
@@ -965,7 +1176,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
return -EINVAL;
- mutex_lock(&genpd->lock);
+ start:
+ genpd_acquire_lock(genpd);
list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
if (subdomain != target)
@@ -973,9 +1185,16 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
+ if (subdomain->status != GPD_STATE_POWER_OFF
+ && subdomain->status != GPD_STATE_ACTIVE) {
+ mutex_unlock(&subdomain->lock);
+ genpd_release_lock(genpd);
+ goto start;
+ }
+
list_del(&subdomain->sd_node);
subdomain->parent = NULL;
- if (!subdomain->power_is_off)
+ if (subdomain->status != GPD_STATE_POWER_OFF)
genpd_sd_counter_dec(genpd);
mutex_unlock(&subdomain->lock);
@@ -984,7 +1203,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
break;
}
- mutex_unlock(&genpd->lock);
+ genpd_release_lock(genpd);
return ret;
}
@@ -1010,7 +1229,10 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
genpd->in_progress = 0;
genpd->sd_count = 0;
- genpd->power_is_off = is_off;
+ genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
+ init_waitqueue_head(&genpd->status_wait_queue);
+ genpd->poweroff_task = NULL;
+ genpd->resume_count = 0;
genpd->device_count = 0;
genpd->suspended_count = 0;
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
@@ -1030,4 +1252,22 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
genpd->domain.ops.restore = pm_genpd_restore;
genpd->domain.ops.complete = pm_genpd_complete;
+ mutex_lock(&gpd_list_lock);
+ list_add(&genpd->gpd_list_node, &gpd_list);
+ mutex_unlock(&gpd_list_lock);
+}
+
+/**
+ * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
+ */
+void pm_genpd_poweroff_unused(void)
+{
+ struct generic_pm_domain *genpd;
+
+ mutex_lock(&gpd_list_lock);
+
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node)
+ genpd_queue_power_off_work(genpd);
+
+ mutex_unlock(&gpd_list_lock);
}
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 56a6899..5cc1232 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -625,4 +625,21 @@ int opp_init_cpufreq_table(struct device *dev,
return 0;
}
+
+/**
+ * opp_free_cpufreq_table() - free the cpufreq table
+ * @dev: device for which we do this operation
+ * @table: table to free
+ *
+ * Free up the table allocated by opp_init_cpufreq_table
+ */
+void opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ if (!table)
+ return;
+
+ kfree(*table);
+ *table = NULL;
+}
#endif /* CONFIG_CPU_FREQ */
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index c80e138..af10abe 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -112,7 +112,7 @@ static unsigned int read_magic_time(void)
unsigned int val;
get_rtc_time(&time);
- pr_info("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n",
+ pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n",
time.tm_hour, time.tm_min, time.tm_sec,
time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
val = time.tm_year; /* 100 years */
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index c126db3..e8d11b6 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -9,6 +9,7 @@
#include <linux/syscore_ops.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/interrupt.h>
static LIST_HEAD(syscore_ops_list);
static DEFINE_MUTEX(syscore_ops_lock);
@@ -48,6 +49,13 @@ int syscore_suspend(void)
struct syscore_ops *ops;
int ret = 0;
+ pr_debug("Checking wakeup interrupts\n");
+
+ /* Return error code if there are any wakeup interrupts pending. */
+ ret = check_wakeup_irqs();
+ if (ret)
+ return ret;
+
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core suspend.\n");
OpenPOWER on IntegriCloud