From c125e96f044427f38d106fab7bc5e4a5e6a18262 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 5 Jul 2010 22:43:53 +0200 Subject: PM: Make it possible to avoid races between wakeup and system sleep One of the arguments during the suspend blockers discussion was that the mainline kernel didn't contain any mechanisms making it possible to avoid races between wakeup and system suspend. Generally, there are two problems in that area. First, if a wakeup event occurs exactly when /sys/power/state is being written to, it may be delivered to user space right before the freezer kicks in, so the user space consumer of the event may not be able to process it before the system is suspended. Second, if a wakeup event occurs after user space has been frozen, it is not generally guaranteed that the ongoing transition of the system into a sleep state will be aborted. To address these issues introduce a new global sysfs attribute, /sys/power/wakeup_count, associated with a running counter of wakeup events and three helper functions, pm_stay_awake(), pm_relax(), and pm_wakeup_event(), that may be used by kernel subsystems to control the behavior of this attribute and to request the PM core to abort system transitions into a sleep state already in progress. The /sys/power/wakeup_count file may be read from or written to by user space. Reads will always succeed (unless interrupted by a signal) and return the current value of the wakeup events counter. Writes, however, will only succeed if the written number is equal to the current value of the wakeup events counter. If a write is successful, it will cause the kernel to save the current value of the wakeup events counter and to abort the subsequent system transition into a sleep state if any wakeup events are reported after the write has returned. [The assumption is that before writing to /sys/power/state user space will first read from /sys/power/wakeup_count. Next, user space consumers of wakeup events will have a chance to acknowledge or veto the upcoming system transition to a sleep state. Finally, if the transition is allowed to proceed, /sys/power/wakeup_count will be written to and if that succeeds, /sys/power/state will be written to as well. Still, if any wakeup events are reported to the PM core by kernel subsystems after that point, the transition will be aborted.] Additionally, put a wakeup events counter into struct dev_pm_info and make these per-device wakeup event counters available via sysfs, so that it's possible to check the activity of various wakeup event sources within the kernel. To illustrate how subsystems can use pm_wakeup_event(), make the low-level PCI runtime PM wakeup-handling code use it. Signed-off-by: Rafael J. Wysocki Acked-by: Jesse Barnes Acked-by: Greg Kroah-Hartman Acked-by: markgross Reviewed-by: Alan Stern --- drivers/base/power/Makefile | 2 +- drivers/base/power/main.c | 1 + drivers/base/power/sysfs.c | 15 +++ drivers/base/power/wakeup.c | 229 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 246 insertions(+), 1 deletion(-) create mode 100644 drivers/base/power/wakeup.c (limited to 'drivers/base') diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 89de753..cbccf9a 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_PM) += sysfs.o -obj-$(CONFIG_PM_SLEEP) += main.o +obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_OPS) += generic_ops.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 941fcb8..5419a49 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -59,6 +59,7 @@ void device_pm_init(struct device *dev) { dev->power.status = DPM_ON; init_completion(&dev->power.completion); + dev->power.wakeup_count = 0; pm_runtime_init(dev); } diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index a4c33bc..81d344e 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -73,6 +73,8 @@ * device are known to the PM core. However, for some devices this * attribute is set to "enabled" by bus type code or device drivers and in * that cases it should be safe to leave the default value. + * + * wakeup_count - Report the number of wakeup events related to the device */ static const char enabled[] = "enabled"; @@ -144,6 +146,16 @@ wake_store(struct device * dev, struct device_attribute *attr, static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); +#ifdef CONFIG_PM_SLEEP +static ssize_t wakeup_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", dev->power.wakeup_count); +} + +static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); +#endif + #ifdef CONFIG_PM_ADVANCED_DEBUG #ifdef CONFIG_PM_RUNTIME @@ -230,6 +242,9 @@ static struct attribute * power_attrs[] = { &dev_attr_control.attr, #endif &dev_attr_wakeup.attr, +#ifdef CONFIG_PM_SLEEP + &dev_attr_wakeup_count.attr, +#endif #ifdef CONFIG_PM_ADVANCED_DEBUG &dev_attr_async.attr, #ifdef CONFIG_PM_RUNTIME diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c new file mode 100644 index 0000000..2559907 --- /dev/null +++ b/drivers/base/power/wakeup.c @@ -0,0 +1,229 @@ +/* + * drivers/base/power/wakeup.c - System wakeup events framework + * + * Copyright (c) 2010 Rafael J. Wysocki , Novell Inc. + * + * This file is released under the GPLv2. + */ + +#include +#include +#include +#include +#include +#include + +/* + * If set, the suspend/hibernate code will abort transitions to a sleep state + * if wakeup events are registered during or immediately before the transition. + */ +bool events_check_enabled; + +/* The counter of registered wakeup events. */ +static unsigned long event_count; +/* A preserved old value of event_count. */ +static unsigned long saved_event_count; +/* The counter of wakeup events being processed. */ +static unsigned long events_in_progress; + +static DEFINE_SPINLOCK(events_lock); + +/* + * The functions below use the observation that each wakeup event starts a + * period in which the system should not be suspended. The moment this period + * will end depends on how the wakeup event is going to be processed after being + * detected and all of the possible cases can be divided into two distinct + * groups. + * + * First, a wakeup event may be detected by the same functional unit that will + * carry out the entire processing of it and possibly will pass it to user space + * for further processing. In that case the functional unit that has detected + * the event may later "close" the "no suspend" period associated with it + * directly as soon as it has been dealt with. The pair of pm_stay_awake() and + * pm_relax(), balanced with each other, is supposed to be used in such + * situations. + * + * Second, a wakeup event may be detected by one functional unit and processed + * by another one. In that case the unit that has detected it cannot really + * "close" the "no suspend" period associated with it, unless it knows in + * advance what's going to happen to the event during processing. This + * knowledge, however, may not be available to it, so it can simply specify time + * to wait before the system can be suspended and pass it as the second + * argument of pm_wakeup_event(). + */ + +/** + * pm_stay_awake - Notify the PM core that a wakeup event is being processed. + * @dev: Device the wakeup event is related to. + * + * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the + * counter of wakeup events being processed. If @dev is not NULL, the counter + * of wakeup events related to @dev is incremented too. + * + * Call this function after detecting of a wakeup event if pm_relax() is going + * to be called directly after processing the event (and possibly passing it to + * user space for further processing). + * + * It is safe to call this function from interrupt context. + */ +void pm_stay_awake(struct device *dev) +{ + unsigned long flags; + + spin_lock_irqsave(&events_lock, flags); + if (dev) + dev->power.wakeup_count++; + + events_in_progress++; + spin_unlock_irqrestore(&events_lock, flags); +} + +/** + * pm_relax - Notify the PM core that processing of a wakeup event has ended. + * + * Notify the PM core that a wakeup event has been processed by decrementing + * the counter of wakeup events being processed and incrementing the counter + * of registered wakeup events. + * + * Call this function for wakeup events whose processing started with calling + * pm_stay_awake(). + * + * It is safe to call it from interrupt context. + */ +void pm_relax(void) +{ + unsigned long flags; + + spin_lock_irqsave(&events_lock, flags); + if (events_in_progress) { + events_in_progress--; + event_count++; + } + spin_unlock_irqrestore(&events_lock, flags); +} + +/** + * pm_wakeup_work_fn - Deferred closing of a wakeup event. + * + * Execute pm_relax() for a wakeup event detected in the past and free the + * work item object used for queuing up the work. + */ +static void pm_wakeup_work_fn(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + + pm_relax(); + kfree(dwork); +} + +/** + * pm_wakeup_event - Notify the PM core of a wakeup event. + * @dev: Device the wakeup event is related to. + * @msec: Anticipated event processing time (in milliseconds). + * + * Notify the PM core of a wakeup event (signaled by @dev) that will take + * approximately @msec milliseconds to be processed by the kernel. Increment + * the counter of wakeup events being processed and queue up a work item + * that will execute pm_relax() for the event after @msec milliseconds. If @dev + * is not NULL, the counter of wakeup events related to @dev is incremented too. + * + * It is safe to call this function from interrupt context. + */ +void pm_wakeup_event(struct device *dev, unsigned int msec) +{ + unsigned long flags; + struct delayed_work *dwork; + + dwork = msec ? kzalloc(sizeof(*dwork), GFP_ATOMIC) : NULL; + + spin_lock_irqsave(&events_lock, flags); + if (dev) + dev->power.wakeup_count++; + + if (dwork) { + INIT_DELAYED_WORK(dwork, pm_wakeup_work_fn); + schedule_delayed_work(dwork, msecs_to_jiffies(msec)); + + events_in_progress++; + } else { + event_count++; + } + spin_unlock_irqrestore(&events_lock, flags); +} + +/** + * pm_check_wakeup_events - Check for new wakeup events. + * + * Compare the current number of registered wakeup events with its preserved + * value from the past to check if new wakeup events have been registered since + * the old value was stored. Check if the current number of wakeup events being + * processed is zero. + */ +bool pm_check_wakeup_events(void) +{ + unsigned long flags; + bool ret = true; + + spin_lock_irqsave(&events_lock, flags); + if (events_check_enabled) { + ret = (event_count == saved_event_count) && !events_in_progress; + events_check_enabled = ret; + } + spin_unlock_irqrestore(&events_lock, flags); + return ret; +} + +/** + * pm_get_wakeup_count - Read the number of registered wakeup events. + * @count: Address to store the value at. + * + * Store the number of registered wakeup events at the address in @count. Block + * if the current number of wakeup events being processed is nonzero. + * + * Return false if the wait for the number of wakeup events being processed to + * drop down to zero has been interrupted by a signal (and the current number + * of wakeup events being processed is still nonzero). Otherwise return true. + */ +bool pm_get_wakeup_count(unsigned long *count) +{ + bool ret; + + spin_lock_irq(&events_lock); + if (capable(CAP_SYS_ADMIN)) + events_check_enabled = false; + + while (events_in_progress && !signal_pending(current)) { + spin_unlock_irq(&events_lock); + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + spin_lock_irq(&events_lock); + } + *count = event_count; + ret = !events_in_progress; + spin_unlock_irq(&events_lock); + return ret; +} + +/** + * pm_save_wakeup_count - Save the current number of registered wakeup events. + * @count: Value to compare with the current number of registered wakeup events. + * + * If @count is equal to the current number of registered wakeup events and the + * current number of wakeup events being processed is zero, store @count as the + * old number of registered wakeup events to be used by pm_check_wakeup_events() + * and return true. Otherwise return false. + */ +bool pm_save_wakeup_count(unsigned long count) +{ + bool ret = false; + + spin_lock_irq(&events_lock); + if (count == event_count && !events_in_progress) { + saved_event_count = count; + events_check_enabled = true; + ret = true; + } + spin_unlock_irq(&events_lock); + return ret; +} -- cgit v1.1 From 4eb241e5691363c391aac8a5051d0d013188ec84 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 7 Jul 2010 23:43:51 +0200 Subject: PM: Do not use dynamically allocated objects in pm_wakeup_event() Originally, pm_wakeup_event() uses struct delayed_work objects, allocated with GFP_ATOMIC, to schedule the execution of pm_relax() in future. However, as noted by Alan Stern, it is not necessary to do that, because all pm_wakeup_event() calls can use one static timer that will always be set to expire at the latest time passed to pm_wakeup_event(). The modifications are based on the example code posted by Alan. Signed-off-by: Rafael J. Wysocki --- drivers/base/power/wakeup.c | 56 ++++++++++++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 19 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 2559907..eb594fa 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -28,6 +28,11 @@ static unsigned long events_in_progress; static DEFINE_SPINLOCK(events_lock); +static void pm_wakeup_timer_fn(unsigned long data); + +static DEFINE_TIMER(events_timer, pm_wakeup_timer_fn, 0, 0); +static unsigned long events_timer_expires; + /* * The functions below use the observation that each wakeup event starts a * period in which the system should not be suspended. The moment this period @@ -103,17 +108,22 @@ void pm_relax(void) } /** - * pm_wakeup_work_fn - Deferred closing of a wakeup event. + * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. * - * Execute pm_relax() for a wakeup event detected in the past and free the - * work item object used for queuing up the work. + * Decrease the counter of wakeup events being processed after it was increased + * by pm_wakeup_event(). */ -static void pm_wakeup_work_fn(struct work_struct *work) +static void pm_wakeup_timer_fn(unsigned long data) { - struct delayed_work *dwork = to_delayed_work(work); + unsigned long flags; - pm_relax(); - kfree(dwork); + spin_lock_irqsave(&events_lock, flags); + if (events_timer_expires + && time_before_eq(events_timer_expires, jiffies)) { + events_in_progress--; + events_timer_expires = 0; + } + spin_unlock_irqrestore(&events_lock, flags); } /** @@ -123,30 +133,38 @@ static void pm_wakeup_work_fn(struct work_struct *work) * * Notify the PM core of a wakeup event (signaled by @dev) that will take * approximately @msec milliseconds to be processed by the kernel. Increment - * the counter of wakeup events being processed and queue up a work item - * that will execute pm_relax() for the event after @msec milliseconds. If @dev - * is not NULL, the counter of wakeup events related to @dev is incremented too. + * the counter of registered wakeup events and (if @msec is nonzero) set up + * the wakeup events timer to execute pm_wakeup_timer_fn() in future (if the + * timer has not been set up already, increment the counter of wakeup events + * being processed). If @dev is not NULL, the counter of wakeup events related + * to @dev is incremented too. * * It is safe to call this function from interrupt context. */ void pm_wakeup_event(struct device *dev, unsigned int msec) { unsigned long flags; - struct delayed_work *dwork; - - dwork = msec ? kzalloc(sizeof(*dwork), GFP_ATOMIC) : NULL; spin_lock_irqsave(&events_lock, flags); + event_count++; if (dev) dev->power.wakeup_count++; - if (dwork) { - INIT_DELAYED_WORK(dwork, pm_wakeup_work_fn); - schedule_delayed_work(dwork, msecs_to_jiffies(msec)); + if (msec) { + unsigned long expires; - events_in_progress++; - } else { - event_count++; + expires = jiffies + msecs_to_jiffies(msec); + if (!expires) + expires = 1; + + if (!events_timer_expires + || time_after(expires, events_timer_expires)) { + if (!events_timer_expires) + events_in_progress++; + + mod_timer(&events_timer, expires); + events_timer_expires = expires; + } } spin_unlock_irqrestore(&events_lock, flags); } -- cgit v1.1 From 0fcb4eef8294492c8f1de8236b1ed81f09e42922 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Thu, 8 Jul 2010 00:05:37 +0200 Subject: PM / Runtime: Make runtime_status attribute not debug-only (v. 2) This patch (as1404b) makes the runtime_status sysfs attribute available even in the absence of CONFIG_PM_ADVANCED_DEBUG, and it changes the routine to display "unsupported" when runtime PM is disabled for a device. Although not strictly 100% accurate, this will almost always be correct. Signed-off-by: Alan Stern Acked-by: Dominik Brodowski Signed-off-by: Rafael J. Wysocki --- drivers/base/power/sysfs.c | 53 +++++++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 20 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 81d344e..1eca50c 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -110,6 +110,38 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr, } static DEVICE_ATTR(control, 0644, control_show, control_store); + +static ssize_t rtpm_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const char *p; + + if (dev->power.runtime_error) { + p = "error\n"; + } else if (dev->power.disable_depth) { + p = "unsupported\n"; + } else { + switch (dev->power.runtime_status) { + case RPM_SUSPENDED: + p = "suspended\n"; + break; + case RPM_SUSPENDING: + p = "suspending\n"; + break; + case RPM_RESUMING: + p = "resuming\n"; + break; + case RPM_ACTIVE: + p = "active\n"; + break; + default: + return -EIO; + } + } + return sprintf(buf, p); +} + +static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); #endif static ssize_t @@ -184,27 +216,8 @@ static ssize_t rtpm_enabled_show(struct device *dev, return sprintf(buf, "enabled\n"); } -static ssize_t rtpm_status_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - if (dev->power.runtime_error) - return sprintf(buf, "error\n"); - switch (dev->power.runtime_status) { - case RPM_SUSPENDED: - return sprintf(buf, "suspended\n"); - case RPM_SUSPENDING: - return sprintf(buf, "suspending\n"); - case RPM_RESUMING: - return sprintf(buf, "resuming\n"); - case RPM_ACTIVE: - return sprintf(buf, "active\n"); - } - return -EIO; -} - static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL); static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL); -static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); #endif @@ -240,6 +253,7 @@ static DEVICE_ATTR(async, 0644, async_show, async_store); static struct attribute * power_attrs[] = { #ifdef CONFIG_PM_RUNTIME &dev_attr_control.attr, + &dev_attr_runtime_status.attr, #endif &dev_attr_wakeup.attr, #ifdef CONFIG_PM_SLEEP @@ -250,7 +264,6 @@ static struct attribute * power_attrs[] = { #ifdef CONFIG_PM_RUNTIME &dev_attr_runtime_usage.attr, &dev_attr_runtime_active_kids.attr, - &dev_attr_runtime_status.attr, &dev_attr_runtime_enabled.attr, #endif #endif -- cgit v1.1 From 8d4b9d1bfef117862a2889dec4dac227068544c9 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 19 Jul 2010 02:01:06 +0200 Subject: PM / Runtime: Add runtime PM statistics (v3) In order for PowerTOP to be able to report how well the new runtime PM is working for the various drivers, the kernel needs to export some basic statistics in sysfs. This patch adds two sysfs files in the runtime PM domain that expose the total time a device has been active, and the time a device has been suspended. With this PowerTOP can compute the activity percentage Active %age = 100 * (delta active) / (delta active + delta suspended) and present the information to the user. I've written the PowerTOP code (slated for version 1.12) already, and the output looks like this: Runtime Device Power Management statistics Active Device name 10.0% 06:00.0 Ethernet controller: Realtek Semiconductor Co., Ltd. RTL8101E/RTL8102E PCI Express Fast Ethernet controller [version 2: fix stat update bugs noticed by Alan Stern] [version 3: rebase to -next and move the sysfs declaration] Signed-off-by: Arjan van de Ven Signed-off-by: Rafael J. Wysocki --- drivers/base/power/runtime.c | 54 ++++++++++++++++++++++++++++++++++++++------ drivers/base/power/sysfs.c | 30 ++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 7 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index b0ec0e9..b78c401 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -123,6 +123,45 @@ int pm_runtime_idle(struct device *dev) } EXPORT_SYMBOL_GPL(pm_runtime_idle); + +/** + * update_pm_runtime_accounting - Update the time accounting of power states + * @dev: Device to update the accounting for + * + * In order to be able to have time accounting of the various power states + * (as used by programs such as PowerTOP to show the effectiveness of runtime + * PM), we need to track the time spent in each state. + * update_pm_runtime_accounting must be called each time before the + * runtime_status field is updated, to account the time in the old state + * correctly. + */ +void update_pm_runtime_accounting(struct device *dev) +{ + unsigned long now = jiffies; + int delta; + + delta = now - dev->power.accounting_timestamp; + + if (delta < 0) + delta = 0; + + dev->power.accounting_timestamp = now; + + if (dev->power.disable_depth > 0) + return; + + if (dev->power.runtime_status == RPM_SUSPENDED) + dev->power.suspended_jiffies += delta; + else + dev->power.active_jiffies += delta; +} + +static void __update_runtime_status(struct device *dev, enum rpm_status status) +{ + update_pm_runtime_accounting(dev); + dev->power.runtime_status = status; +} + /** * __pm_runtime_suspend - Carry out run-time suspend of given device. * @dev: Device to suspend. @@ -197,7 +236,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) goto repeat; } - dev->power.runtime_status = RPM_SUSPENDING; + __update_runtime_status(dev, RPM_SUSPENDING); dev->power.deferred_resume = false; if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { @@ -228,7 +267,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) } if (retval) { - dev->power.runtime_status = RPM_ACTIVE; + __update_runtime_status(dev, RPM_ACTIVE); if (retval == -EAGAIN || retval == -EBUSY) { if (dev->power.timer_expires == 0) notify = true; @@ -237,7 +276,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) pm_runtime_cancel_pending(dev); } } else { - dev->power.runtime_status = RPM_SUSPENDED; + __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); if (dev->parent) { @@ -381,7 +420,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) goto repeat; } - dev->power.runtime_status = RPM_RESUMING; + __update_runtime_status(dev, RPM_RESUMING); if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { spin_unlock_irq(&dev->power.lock); @@ -411,10 +450,10 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) } if (retval) { - dev->power.runtime_status = RPM_SUSPENDED; + __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_cancel_pending(dev); } else { - dev->power.runtime_status = RPM_ACTIVE; + __update_runtime_status(dev, RPM_ACTIVE); if (parent) atomic_inc(&parent->power.child_count); } @@ -848,7 +887,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) } out_set: - dev->power.runtime_status = status; + __update_runtime_status(dev, status); dev->power.runtime_error = 0; out: spin_unlock_irqrestore(&dev->power.lock, flags); @@ -1077,6 +1116,7 @@ void pm_runtime_init(struct device *dev) dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; + dev->power.accounting_timestamp = jiffies; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 1eca50c..e56b438 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "power.h" /* @@ -111,6 +112,33 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr, static DEVICE_ATTR(control, 0644, control_show, control_store); +static ssize_t rtpm_active_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + spin_lock_irq(&dev->power.lock); + update_pm_runtime_accounting(dev); + ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies)); + spin_unlock_irq(&dev->power.lock); + return ret; +} + +static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL); + +static ssize_t rtpm_suspended_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + spin_lock_irq(&dev->power.lock); + update_pm_runtime_accounting(dev); + ret = sprintf(buf, "%i\n", + jiffies_to_msecs(dev->power.suspended_jiffies)); + spin_unlock_irq(&dev->power.lock); + return ret; +} + +static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL); + static ssize_t rtpm_status_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -254,6 +282,8 @@ static struct attribute * power_attrs[] = { #ifdef CONFIG_PM_RUNTIME &dev_attr_control.attr, &dev_attr_runtime_status.attr, + &dev_attr_runtime_suspended_time.attr, + &dev_attr_runtime_active_time.attr, #endif &dev_attr_wakeup.attr, #ifdef CONFIG_PM_SLEEP -- cgit v1.1