summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_taskqueue.c
diff options
context:
space:
mode:
authorsam <sam@FreeBSD.org>2003-09-05 23:09:22 +0000
committersam <sam@FreeBSD.org>2003-09-05 23:09:22 +0000
commit546fd338df0b9b22bd11afd07c5e3e8735c09ab6 (patch)
treeb2b8548c093d546ea8a2d30c93dafdef717cfee7 /sys/kern/subr_taskqueue.c
parent27b68e0947c6cecbcd383634979d4f8c818c30ca (diff)
downloadFreeBSD-src-546fd338df0b9b22bd11afd07c5e3e8735c09ab6.zip
FreeBSD-src-546fd338df0b9b22bd11afd07c5e3e8735c09ab6.tar.gz
"fast swi" taskqueue support. This is a taskqueue that uses spinlocks
making it useful for dispatching swi tasks from fast interrupt handlers. Sponsered by: FreeBSD Foundation
Diffstat (limited to 'sys/kern/subr_taskqueue.c')
-rw-r--r--sys/kern/subr_taskqueue.c118
1 files changed, 118 insertions, 0 deletions
diff --git a/sys/kern/subr_taskqueue.c b/sys/kern/subr_taskqueue.c
index bb1f1b6..01b6733 100644
--- a/sys/kern/subr_taskqueue.c
+++ b/sys/kern/subr_taskqueue.c
@@ -272,3 +272,121 @@ TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, 0,
TASKQUEUE_DEFINE(thread, taskqueue_thread_enqueue, 0,
kthread_create(taskqueue_kthread, NULL,
&taskqueue_thread_proc, RFNOWAIT, 0, "taskqueue"));
+
+int
+taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
+{
+ struct task *ins;
+ struct task *prev;
+
+ mtx_lock_spin(&queue->tq_mutex);
+
+ /*
+ * Don't allow new tasks on a queue which is being freed.
+ */
+ if (queue->tq_draining) {
+ mtx_unlock_spin(&queue->tq_mutex);
+ return EPIPE;
+ }
+
+ /*
+ * Count multiple enqueues.
+ */
+ if (task->ta_pending) {
+ task->ta_pending++;
+ mtx_unlock_spin(&queue->tq_mutex);
+ return 0;
+ }
+
+ /*
+ * Optimise the case when all tasks have the same priority.
+ */
+ prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
+ if (!prev || prev->ta_priority >= task->ta_priority) {
+ STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
+ } else {
+ prev = 0;
+ for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
+ prev = ins, ins = STAILQ_NEXT(ins, ta_link))
+ if (ins->ta_priority < task->ta_priority)
+ break;
+
+ if (prev)
+ STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
+ else
+ STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
+ }
+
+ task->ta_pending = 1;
+ if (queue->tq_enqueue)
+ queue->tq_enqueue(queue->tq_context);
+
+ mtx_unlock_spin(&queue->tq_mutex);
+
+ return 0;
+}
+
+static void
+taskqueue_run_fast(struct taskqueue *queue)
+{
+ struct task *task;
+ int pending;
+
+ mtx_lock_spin(&queue->tq_mutex);
+ while (STAILQ_FIRST(&queue->tq_queue)) {
+ /*
+ * Carefully remove the first task from the queue and
+ * zero its pending count.
+ */
+ task = STAILQ_FIRST(&queue->tq_queue);
+ STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
+ pending = task->ta_pending;
+ task->ta_pending = 0;
+ mtx_unlock_spin(&queue->tq_mutex);
+
+ task->ta_func(task->ta_context, pending);
+
+ mtx_lock_spin(&queue->tq_mutex);
+ }
+ mtx_unlock_spin(&queue->tq_mutex);
+}
+
+struct taskqueue *taskqueue_fast;
+static void *taskqueue_fast_ih;
+
+static void
+taskqueue_fast_schedule(void *context)
+{
+ swi_sched(taskqueue_fast_ih, 0);
+}
+
+static void
+taskqueue_fast_run(void *dummy)
+{
+ taskqueue_run_fast(taskqueue_fast);
+}
+
+static void
+taskqueue_define_fast(void *arg)
+{
+ taskqueue_fast = malloc(sizeof(struct taskqueue),
+ M_TASKQUEUE, M_NOWAIT | M_ZERO);
+ if (!taskqueue_fast) {
+ printf("%s: Unable to allocate fast task queue!\n", __func__);
+ return;
+ }
+
+ STAILQ_INIT(&taskqueue_fast->tq_queue);
+ taskqueue_fast->tq_name = "fast";
+ taskqueue_fast->tq_enqueue = taskqueue_fast_schedule;
+ mtx_init(&taskqueue_fast->tq_mutex, "taskqueue", NULL, MTX_SPIN);
+
+ mtx_lock(&taskqueue_queues_mutex);
+ STAILQ_INSERT_TAIL(&taskqueue_queues, taskqueue_fast, tq_link);
+ mtx_unlock(&taskqueue_queues_mutex);
+
+ swi_add(NULL, "Fast task queue", taskqueue_fast_run,
+ NULL, SWI_TQ_FAST, 0, &taskqueue_fast_ih);
+}
+SYSINIT(taskqueue_fast, SI_SUB_CONFIGURE, SI_ORDER_SECOND,
+ taskqueue_define_fast, NULL);
OpenPOWER on IntegriCloud