summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authormdf <mdf@FreeBSD.org>2010-07-22 16:41:09 +0000
committermdf <mdf@FreeBSD.org>2010-07-22 16:41:09 +0000
commitfa23fa820ab654dffda5fdb3a938f75cf77b3646 (patch)
tree3a37e0a8990ba91718b1b54ff6cb60d91c9caa49 /sys/kern
parentf6fa9f4a32c433b96c8e6546a8d2f762455f9055 (diff)
downloadFreeBSD-src-fa23fa820ab654dffda5fdb3a938f75cf77b3646.zip
FreeBSD-src-fa23fa820ab654dffda5fdb3a938f75cf77b3646.tar.gz
Fix taskqueue_drain(9) to not have false negatives. For threaded
taskqueues, more than one task can be running simultaneously. Also make taskqueue_run(9) static to the file, since there are no consumers in the base kernel and the function signature needs to change with this fix. Remove mention of taskqueue_run(9) and taskqueue_run_fast(9) from the taskqueue(9) man page. Reviewed by: jhb Approved by: zml (mentor)
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/subr_taskqueue.c61
1 files changed, 38 insertions, 23 deletions
diff --git a/sys/kern/subr_taskqueue.c b/sys/kern/subr_taskqueue.c
index 205a23a..6ccf143 100644
--- a/sys/kern/subr_taskqueue.c
+++ b/sys/kern/subr_taskqueue.c
@@ -63,6 +63,8 @@ struct taskqueue {
#define TQ_FLAGS_BLOCKED (1 << 1)
#define TQ_FLAGS_PENDING (1 << 2)
+static void taskqueue_run(struct taskqueue *, struct task **);
+
static __inline void
TQ_LOCK(struct taskqueue *tq)
{
@@ -139,7 +141,7 @@ taskqueue_free(struct taskqueue *queue)
TQ_LOCK(queue);
queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
- taskqueue_run(queue);
+ taskqueue_run(queue, &queue->tq_running);
taskqueue_terminate(queue->tq_threads, queue);
mtx_destroy(&queue->tq_mutex);
free(queue->tq_threads, M_TASKQUEUE);
@@ -215,15 +217,14 @@ taskqueue_unblock(struct taskqueue *queue)
TQ_UNLOCK(queue);
}
-void
-taskqueue_run(struct taskqueue *queue)
+static void
+taskqueue_run(struct taskqueue *queue, struct task **tpp)
{
- struct task *task;
- int owned, pending;
+ struct task *task, *running;
+ int pending;
- owned = mtx_owned(&queue->tq_mutex);
- if (!owned)
- TQ_LOCK(queue);
+ mtx_assert(&queue->tq_mutex, MA_OWNED);
+ running = NULL;
while (STAILQ_FIRST(&queue->tq_queue)) {
/*
* Carefully remove the first task from the queue and
@@ -233,22 +234,16 @@ taskqueue_run(struct taskqueue *queue)
STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
pending = task->ta_pending;
task->ta_pending = 0;
- queue->tq_running = task;
+ task->ta_running = tpp;
+ *tpp = task;
TQ_UNLOCK(queue);
task->ta_func(task->ta_context, pending);
TQ_LOCK(queue);
- queue->tq_running = NULL;
+ *tpp = NULL;
wakeup(task);
}
-
- /*
- * For compatibility, unlock on return if the queue was not locked
- * on entry, although this opens a race window.
- */
- if (!owned)
- TQ_UNLOCK(queue);
}
void
@@ -256,15 +251,19 @@ taskqueue_drain(struct taskqueue *queue, struct task *task)
{
if (queue->tq_spin) { /* XXX */
mtx_lock_spin(&queue->tq_mutex);
- while (task->ta_pending != 0 || task == queue->tq_running)
+ while (task->ta_pending != 0 ||
+ (task->ta_running != NULL && task == *task->ta_running)) {
msleep_spin(task, &queue->tq_mutex, "-", 0);
+ }
mtx_unlock_spin(&queue->tq_mutex);
} else {
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
mtx_lock(&queue->tq_mutex);
- while (task->ta_pending != 0 || task == queue->tq_running)
+ while (task->ta_pending != 0 ||
+ (task->ta_running != NULL && task == *task->ta_running)) {
msleep(task, &queue->tq_mutex, PWAIT, "-", 0);
+ }
mtx_unlock(&queue->tq_mutex);
}
}
@@ -278,7 +277,9 @@ taskqueue_swi_enqueue(void *context)
static void
taskqueue_swi_run(void *dummy)
{
- taskqueue_run(taskqueue_swi);
+ TQ_LOCK(taskqueue_swi);
+ taskqueue_run(taskqueue_swi, &taskqueue_swi->tq_running);
+ TQ_UNLOCK(taskqueue_swi);
}
static void
@@ -290,7 +291,9 @@ taskqueue_swi_giant_enqueue(void *context)
static void
taskqueue_swi_giant_run(void *dummy)
{
- taskqueue_run(taskqueue_swi_giant);
+ TQ_LOCK(taskqueue_swi_giant);
+ taskqueue_run(taskqueue_swi_giant, &taskqueue_swi_giant->tq_running);
+ TQ_UNLOCK(taskqueue_swi_giant);
}
int
@@ -352,12 +355,22 @@ void
taskqueue_thread_loop(void *arg)
{
struct taskqueue **tqp, *tq;
+ struct task *running;
+
+ /*
+ * The kernel stack space is globaly addressable, and it would
+ * be an error to ask whether a task is running after the
+ * taskqueue has been released. So it is safe to have the
+ * task point back to an address in the taskqueue's stack to
+ * determine if the task is running.
+ */
+ running = NULL;
tqp = arg;
tq = *tqp;
TQ_LOCK(tq);
while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
- taskqueue_run(tq);
+ taskqueue_run(tq, &running);
/*
* Because taskqueue_run() can drop tq_mutex, we need to
* check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
@@ -423,7 +436,9 @@ taskqueue_fast_enqueue(void *context)
static void
taskqueue_fast_run(void *dummy)
{
- taskqueue_run(taskqueue_fast);
+ TQ_LOCK(taskqueue_fast);
+ taskqueue_run(taskqueue_fast, &taskqueue_fast->tq_running);
+ TQ_UNLOCK(taskqueue_fast);
}
TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
OpenPOWER on IntegriCloud