summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorsam <sam@FreeBSD.org>2003-09-05 23:09:22 +0000
committersam <sam@FreeBSD.org>2003-09-05 23:09:22 +0000
commit546fd338df0b9b22bd11afd07c5e3e8735c09ab6 (patch)
treeb2b8548c093d546ea8a2d30c93dafdef717cfee7
parent27b68e0947c6cecbcd383634979d4f8c818c30ca (diff)
downloadFreeBSD-src-546fd338df0b9b22bd11afd07c5e3e8735c09ab6.zip
FreeBSD-src-546fd338df0b9b22bd11afd07c5e3e8735c09ab6.tar.gz
"fast swi" taskqueue support. This is a taskqueue that uses spinlocks
making it useful for dispatching swi tasks from fast interrupt handlers. Sponsered by: FreeBSD Foundation
-rw-r--r--sys/kern/subr_taskqueue.c118
-rw-r--r--sys/sys/interrupt.h7
-rw-r--r--sys/sys/taskqueue.h9
3 files changed, 131 insertions, 3 deletions
diff --git a/sys/kern/subr_taskqueue.c b/sys/kern/subr_taskqueue.c
index bb1f1b6..01b6733 100644
--- a/sys/kern/subr_taskqueue.c
+++ b/sys/kern/subr_taskqueue.c
@@ -272,3 +272,121 @@ TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, 0,
TASKQUEUE_DEFINE(thread, taskqueue_thread_enqueue, 0,
kthread_create(taskqueue_kthread, NULL,
&taskqueue_thread_proc, RFNOWAIT, 0, "taskqueue"));
+
+int
+taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
+{
+ struct task *ins;
+ struct task *prev;
+
+ mtx_lock_spin(&queue->tq_mutex);
+
+ /*
+ * Don't allow new tasks on a queue which is being freed.
+ */
+ if (queue->tq_draining) {
+ mtx_unlock_spin(&queue->tq_mutex);
+ return EPIPE;
+ }
+
+ /*
+ * Count multiple enqueues.
+ */
+ if (task->ta_pending) {
+ task->ta_pending++;
+ mtx_unlock_spin(&queue->tq_mutex);
+ return 0;
+ }
+
+ /*
+ * Optimise the case when all tasks have the same priority.
+ */
+ prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
+ if (!prev || prev->ta_priority >= task->ta_priority) {
+ STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
+ } else {
+ prev = 0;
+ for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
+ prev = ins, ins = STAILQ_NEXT(ins, ta_link))
+ if (ins->ta_priority < task->ta_priority)
+ break;
+
+ if (prev)
+ STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
+ else
+ STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
+ }
+
+ task->ta_pending = 1;
+ if (queue->tq_enqueue)
+ queue->tq_enqueue(queue->tq_context);
+
+ mtx_unlock_spin(&queue->tq_mutex);
+
+ return 0;
+}
+
+static void
+taskqueue_run_fast(struct taskqueue *queue)
+{
+ struct task *task;
+ int pending;
+
+ mtx_lock_spin(&queue->tq_mutex);
+ while (STAILQ_FIRST(&queue->tq_queue)) {
+ /*
+ * Carefully remove the first task from the queue and
+ * zero its pending count.
+ */
+ task = STAILQ_FIRST(&queue->tq_queue);
+ STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
+ pending = task->ta_pending;
+ task->ta_pending = 0;
+ mtx_unlock_spin(&queue->tq_mutex);
+
+ task->ta_func(task->ta_context, pending);
+
+ mtx_lock_spin(&queue->tq_mutex);
+ }
+ mtx_unlock_spin(&queue->tq_mutex);
+}
+
+struct taskqueue *taskqueue_fast;
+static void *taskqueue_fast_ih;
+
+static void
+taskqueue_fast_schedule(void *context)
+{
+ swi_sched(taskqueue_fast_ih, 0);
+}
+
+static void
+taskqueue_fast_run(void *dummy)
+{
+ taskqueue_run_fast(taskqueue_fast);
+}
+
+static void
+taskqueue_define_fast(void *arg)
+{
+ taskqueue_fast = malloc(sizeof(struct taskqueue),
+ M_TASKQUEUE, M_NOWAIT | M_ZERO);
+ if (!taskqueue_fast) {
+ printf("%s: Unable to allocate fast task queue!\n", __func__);
+ return;
+ }
+
+ STAILQ_INIT(&taskqueue_fast->tq_queue);
+ taskqueue_fast->tq_name = "fast";
+ taskqueue_fast->tq_enqueue = taskqueue_fast_schedule;
+ mtx_init(&taskqueue_fast->tq_mutex, "taskqueue", NULL, MTX_SPIN);
+
+ mtx_lock(&taskqueue_queues_mutex);
+ STAILQ_INSERT_TAIL(&taskqueue_queues, taskqueue_fast, tq_link);
+ mtx_unlock(&taskqueue_queues_mutex);
+
+ swi_add(NULL, "Fast task queue", taskqueue_fast_run,
+ NULL, SWI_TQ_FAST, 0, &taskqueue_fast_ih);
+}
+SYSINIT(taskqueue_fast, SI_SUB_CONFIGURE, SI_ORDER_SECOND,
+ taskqueue_define_fast, NULL);
diff --git a/sys/sys/interrupt.h b/sys/sys/interrupt.h
index a48efb7..58a9dd0 100644
--- a/sys/sys/interrupt.h
+++ b/sys/sys/interrupt.h
@@ -94,9 +94,10 @@ struct ithd {
#define SWI_CAMNET 2
#define SWI_CAMBIO 3
#define SWI_VM 4
-#define SWI_TQ_GIANT 5
-#define SWI_TQ 6
-#define SWI_CLOCK 7
+#define SWI_TQ_FAST 5
+#define SWI_TQ_GIANT 6
+#define SWI_TQ 7
+#define SWI_CLOCK 8
extern struct ithd *tty_ithd;
extern struct ithd *clk_ithd;
diff --git a/sys/sys/taskqueue.h b/sys/sys/taskqueue.h
index d0cac11..9695b35 100644
--- a/sys/sys/taskqueue.h
+++ b/sys/sys/taskqueue.h
@@ -120,4 +120,13 @@ TASKQUEUE_DECLARE(swi);
*/
TASKQUEUE_DECLARE(thread);
+/*
+ * Queue for swi handlers dispatched from fast interrupt handlers.
+ * These are necessarily different from the above because the queue
+ * must be locked with spinlocks since sleep mutex's cannot be used
+ * from a fast interrupt handler context.
+ */
+TASKQUEUE_DECLARE(fast);
+int taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task);
+
#endif /* !_SYS_TASKQUEUE_H_ */
OpenPOWER on IntegriCloud