diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2007-05-09 02:34:10 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 12:30:52 -0700 |
commit | 7097a87afe937a5879528d52880c2d95f089e96c (patch) | |
tree | f06090c0f6ed327ee2894deb8ac7c588ab55bf4e /kernel | |
parent | 3af24433efac62f451bfdb1cf1edb7181fb73645 (diff) | |
download | op-kernel-dev-7097a87afe937a5879528d52880c2d95f089e96c.zip op-kernel-dev-7097a87afe937a5879528d52880c2d95f089e96c.tar.gz |
workqueue: kill run_scheduled_work()
Because it has no callers.
Actually, I think the whole idea of run_scheduled_work() was not right, not
good to mix "unqueue this work and execute its ->func()" in one function.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 73 |
1 files changed, 0 insertions, 73 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a981add..ea42225 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -98,79 +98,6 @@ static inline void *get_wq_data(struct work_struct *work) return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); } -static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) -{ - int ret = 0; - unsigned long flags; - - spin_lock_irqsave(&cwq->lock, flags); - /* - * We need to re-validate the work info after we've gotten - * the cpu_workqueue lock. We can run the work now iff: - * - * - the wq_data still matches the cpu_workqueue_struct - * - AND the work is still marked pending - * - AND the work is still on a list (which will be this - * workqueue_struct list) - * - * All these conditions are important, because we - * need to protect against the work being run right - * now on another CPU (all but the last one might be - * true if it's currently running and has not been - * released yet, for example). - */ - if (get_wq_data(work) == cwq - && work_pending(work) - && !list_empty(&work->entry)) { - work_func_t f = work->func; - cwq->current_work = work; - list_del_init(&work->entry); - spin_unlock_irqrestore(&cwq->lock, flags); - - if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) - work_release(work); - f(work); - - spin_lock_irqsave(&cwq->lock, flags); - cwq->current_work = NULL; - ret = 1; - } - spin_unlock_irqrestore(&cwq->lock, flags); - return ret; -} - -/** - * run_scheduled_work - run scheduled work synchronously - * @work: work to run - * - * This checks if the work was pending, and runs it - * synchronously if so. It returns a boolean to indicate - * whether it had any scheduled work to run or not. - * - * NOTE! This _only_ works for normal work_structs. You - * CANNOT use this for delayed work, because the wq data - * for delayed work will not point properly to the per- - * CPU workqueue struct, but will change! - */ -int fastcall run_scheduled_work(struct work_struct *work) -{ - for (;;) { - struct cpu_workqueue_struct *cwq; - - if (!work_pending(work)) - return 0; - if (list_empty(&work->entry)) - return 0; - /* NOTE! This depends intimately on __queue_work! */ - cwq = get_wq_data(work); - if (!cwq) - return 0; - if (__run_work(cwq, work)) - return 1; - } -} -EXPORT_SYMBOL(run_scheduled_work); - static void insert_work(struct cpu_workqueue_struct *cwq, struct work_struct *work, int tail) { |