summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2007-05-09 02:34:17 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 12:30:52 -0700
commita848e3b67c07ed79374bd0f9b82f9ce45a419643 (patch)
tree72f4385e12310bf8e1b959d4d90d32f05c10cd61 /kernel/workqueue.c
parent63bc0362521cbaae3ed17b8de7b094f9492453f0 (diff)
downloadop-kernel-dev-a848e3b67c07ed79374bd0f9b82f9ce45a419643.zip
op-kernel-dev-a848e3b67c07ed79374bd0f9b82f9ce45a419643.tar.gz
workqueue: introduce wq_per_cpu() helper
Cleanup. A number of per_cpu_ptr(wq->cpu_wq, cpu) users have to check that cpu is valid for this wq. Make a simple helper. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0eb9b33..985902e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -86,6 +86,14 @@ static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
? &cpu_singlethread_map : &cpu_populated_map;
}
+static
+struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
+{
+ if (unlikely(is_single_threaded(wq)))
+ cpu = singlethread_cpu;
+ return per_cpu_ptr(wq->cpu_wq, cpu);
+}
+
/*
* Set the workqueue on which a work item is to be run
* - Must *only* be called if the pending flag is set
@@ -142,16 +150,14 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
*/
int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
- int ret = 0, cpu = get_cpu();
+ int ret = 0;
if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
- if (unlikely(is_single_threaded(wq)))
- cpu = singlethread_cpu;
BUG_ON(!list_empty(&work->entry));
- __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+ __queue_work(wq_per_cpu(wq, get_cpu()), work);
+ put_cpu();
ret = 1;
}
- put_cpu();
return ret;
}
EXPORT_SYMBOL_GPL(queue_work);
@@ -161,12 +167,8 @@ void delayed_work_timer_fn(unsigned long __data)
struct delayed_work *dwork = (struct delayed_work *)__data;
struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
struct workqueue_struct *wq = cwq->wq;
- int cpu = smp_processor_id();
-
- if (unlikely(is_single_threaded(wq)))
- cpu = singlethread_cpu;
- __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
+ __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
}
/**
@@ -209,9 +211,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
BUG_ON(!list_empty(&work->entry));
/* This stores cwq for the moment, for the timer_fn */
- set_wq_data(work,
- per_cpu_ptr(wq->cpu_wq, wq->singlethread ?
- singlethread_cpu : raw_smp_processor_id()));
+ set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
timer->expires = jiffies + delay;
timer->data = (unsigned long)dwork;
timer->function = delayed_work_timer_fn;
OpenPOWER on IntegriCloud