diff options
author | SungEun Kim <cleaneye.kim@lge.com> | 2015-07-03 15:57:20 +0900 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2015-07-14 21:04:48 +0200 |
commit | 6ce12a977b7e484540482febe47d1e65f7427abf (patch) | |
tree | 2f8abc2e11c35e0c399dd1b1701564853eb10f55 /kernel/power | |
parent | bc0195aad0daa2ad5b0d76cce22b167bc3435590 (diff) | |
download | op-kernel-dev-6ce12a977b7e484540482febe47d1e65f7427abf.zip op-kernel-dev-6ce12a977b7e484540482febe47d1e65f7427abf.tar.gz |
PM / autosleep: Use workqueue for user space wakeup sources garbage collector
The synchronous synchronize_rcu() in wakeup_source_remove() makes
user process which writes to /sys/kernel/wake_unlock blocked sometimes.
For example, when android eventhub tries to release a wakelock, this
blocking process can occur, and eventhub can't get input events
for a while.
Using a work item instead of direct function call at pm_wake_unlock()
can prevent this unnecessary delay from happening.
Signed-off-by: SungEun Kim <cleaneye.kim@lge.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'kernel/power')
-rw-r--r-- | kernel/power/wakelock.c | 18 |
1 files changed, 15 insertions, 3 deletions
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 019069c..1896386 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -17,6 +17,7 @@ #include <linux/list.h> #include <linux/rbtree.h> #include <linux/slab.h> +#include <linux/workqueue.h> #include "power.h" @@ -83,7 +84,9 @@ static inline void decrement_wakelocks_number(void) {} #define WL_GC_COUNT_MAX 100 #define WL_GC_TIME_SEC 300 +static void __wakelocks_gc(struct work_struct *work); static LIST_HEAD(wakelocks_lru_list); +static DECLARE_WORK(wakelock_work, __wakelocks_gc); static unsigned int wakelocks_gc_count; static inline void wakelocks_lru_add(struct wakelock *wl) @@ -96,13 +99,12 @@ static inline void wakelocks_lru_most_recent(struct wakelock *wl) list_move(&wl->lru, &wakelocks_lru_list); } -static void wakelocks_gc(void) +static void __wakelocks_gc(struct work_struct *work) { struct wakelock *wl, *aux; ktime_t now; - if (++wakelocks_gc_count <= WL_GC_COUNT_MAX) - return; + mutex_lock(&wakelocks_lock); now = ktime_get(); list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) { @@ -127,6 +129,16 @@ static void wakelocks_gc(void) } } wakelocks_gc_count = 0; + + mutex_unlock(&wakelocks_lock); +} + +static void wakelocks_gc(void) +{ + if (++wakelocks_gc_count <= WL_GC_COUNT_MAX) + return; + + schedule_work(&wakelock_work); } #else /* !CONFIG_PM_WAKELOCKS_GC */ static inline void wakelocks_lru_add(struct wakelock *wl) {} |