summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormav <mav@FreeBSD.org>2018-02-02 18:02:06 +0000
committermav <mav@FreeBSD.org>2018-02-02 18:02:06 +0000
commite6ad539d7575f723818b94d59c72377497425a11 (patch)
tree72cf02b13ce5849eeb47312061c46bcf10b3e2fa
parent48ada52ce53aebd93f69bb55cc403bae9e678fd7 (diff)
downloadFreeBSD-src-e6ad539d7575f723818b94d59c72377497425a11.zip
FreeBSD-src-e6ad539d7575f723818b94d59c72377497425a11.tar.gz
MFC r307566 (by sbruno):
Ensure that tasks with a specific cpu set prior to smp starting get re-attached to a thread running on that cpu. ref: https://github.com/NextBSD/NextBSD/commit/fcc20e306bc93ebbbe51f3775d1afb527970a2e9
-rw-r--r--sys/kern/subr_gtaskqueue.c128
1 files changed, 85 insertions, 43 deletions
diff --git a/sys/kern/subr_gtaskqueue.c b/sys/kern/subr_gtaskqueue.c
index 0518be9..115d834 100644
--- a/sys/kern/subr_gtaskqueue.c
+++ b/sys/kern/subr_gtaskqueue.c
@@ -555,7 +555,7 @@ struct taskq_bind_task {
};
static void
-taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx)
+taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu)
{
struct taskqgroup_cpu *qcpu;
@@ -565,7 +565,7 @@ taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx)
taskqueue_thread_enqueue, &qcpu->tgc_taskq);
gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
"%s_%d", qgroup->tqg_name, idx);
- qcpu->tgc_cpu = idx * qgroup->tqg_stride;
+ qcpu->tgc_cpu = cpu;
}
static void
@@ -634,8 +634,8 @@ taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
qgroup->tqg_queue[qid].tgc_cnt++;
LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
- gtask->gt_cpu = qgroup->tqg_queue[qid].tgc_cpu;
if (irq != -1 && smp_started) {
+ gtask->gt_cpu = qgroup->tqg_queue[qid].tgc_cpu;
CPU_ZERO(&mask);
CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
mtx_unlock(&qgroup->tqg_lock);
@@ -644,6 +644,32 @@ taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
mtx_unlock(&qgroup->tqg_lock);
}
+static void
+taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
+{
+ cpuset_t mask;
+ int qid, cpu;
+
+ mtx_lock(&qgroup->tqg_lock);
+ qid = taskqgroup_find(qgroup, gtask->gt_uniq);
+ cpu = qgroup->tqg_queue[qid].tgc_cpu;
+ if (gtask->gt_irq != -1) {
+ mtx_unlock(&qgroup->tqg_lock);
+
+ CPU_ZERO(&mask);
+ CPU_SET(cpu, &mask);
+ intr_setaffinity(gtask->gt_irq, &mask);
+
+ mtx_lock(&qgroup->tqg_lock);
+ }
+ qgroup->tqg_queue[qid].tgc_cnt++;
+
+ LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
+ gt_list);
+ gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
+ mtx_unlock(&qgroup->tqg_lock);
+}
+
int
taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
void *uniq, int cpu, int irq, char *name)
@@ -672,13 +698,46 @@ taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
qgroup->tqg_queue[qid].tgc_cnt++;
LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
- if (irq != -1 && smp_started) {
- CPU_ZERO(&mask);
- CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
- mtx_unlock(&qgroup->tqg_lock);
+ cpu = qgroup->tqg_queue[qid].tgc_cpu;
+ mtx_unlock(&qgroup->tqg_lock);
+
+ CPU_ZERO(&mask);
+ CPU_SET(cpu, &mask);
+ if (irq != -1 && smp_started)
intr_setaffinity(irq, &mask);
- } else
+ return (0);
+}
+
+static int
+taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
+{
+ cpuset_t mask;
+ int i, qid, irq, cpu;
+
+ qid = -1;
+ irq = gtask->gt_irq;
+ cpu = gtask->gt_cpu;
+ MPASS(smp_started);
+ mtx_lock(&qgroup->tqg_lock);
+ for (i = 0; i < qgroup->tqg_cnt; i++)
+ if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
+ qid = i;
+ break;
+ }
+ if (qid == -1) {
mtx_unlock(&qgroup->tqg_lock);
+ return (EINVAL);
+ }
+ qgroup->tqg_queue[qid].tgc_cnt++;
+ LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
+ gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
+ mtx_unlock(&qgroup->tqg_lock);
+
+ CPU_ZERO(&mask);
+ CPU_SET(cpu, &mask);
+
+ if (irq != -1)
+ intr_setaffinity(irq, &mask);
return (0);
}
@@ -742,9 +801,8 @@ static int
_taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
{
LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL);
- cpuset_t mask;
struct grouptask *gtask;
- int i, k, old_cnt, qid, cpu;
+ int i, k, old_cnt, old_cpu, cpu;
mtx_assert(&qgroup->tqg_lock, MA_OWNED);
@@ -759,6 +817,9 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
}
qgroup->tqg_adjusting = 1;
old_cnt = qgroup->tqg_cnt;
+ old_cpu = 0;
+ if (old_cnt < cnt)
+ old_cpu = qgroup->tqg_queue[old_cnt].tgc_cpu;
mtx_unlock(&qgroup->tqg_lock);
/*
* Set up queue for tasks added before boot.
@@ -772,8 +833,13 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
/*
* If new taskq threads have been added.
*/
- for (i = old_cnt; i < cnt; i++)
- taskqgroup_cpu_create(qgroup, i);
+ cpu = old_cpu;
+ for (i = old_cnt; i < cnt; i++) {
+ for (k = 0; k < qgroup->tqg_stride; k++)
+ cpu = CPU_NEXT(cpu);
+
+ taskqgroup_cpu_create(qgroup, i, cpu);
+ }
mtx_lock(&qgroup->tqg_lock);
qgroup->tqg_cnt = cnt;
qgroup->tqg_stride = stride;
@@ -789,39 +855,15 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
}
}
+ mtx_unlock(&qgroup->tqg_lock);
+
while ((gtask = LIST_FIRST(&gtask_head))) {
LIST_REMOVE(gtask, gt_list);
if (gtask->gt_cpu == -1)
- qid = taskqgroup_find(qgroup, gtask->gt_uniq);
- else {
- for (i = 0; i < qgroup->tqg_cnt; i++)
- if (qgroup->tqg_queue[i].tgc_cpu == gtask->gt_cpu) {
- qid = i;
- break;
- }
- }
- qgroup->tqg_queue[qid].tgc_cnt++;
- LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
- gt_list);
- gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
- }
- /*
- * Set new CPU and IRQ affinity
- */
- cpu = CPU_FIRST();
- for (i = 0; i < cnt; i++) {
- qgroup->tqg_queue[i].tgc_cpu = cpu;
- for (k = 0; k < qgroup->tqg_stride; k++)
- cpu = CPU_NEXT(cpu);
- CPU_ZERO(&mask);
- CPU_SET(qgroup->tqg_queue[i].tgc_cpu, &mask);
- LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list) {
- if (gtask->gt_irq == -1)
- continue;
- intr_setaffinity(gtask->gt_irq, &mask);
- }
+ taskqgroup_attach_deferred(qgroup, gtask);
+ else if (taskqgroup_attach_cpu_deferred(qgroup, gtask))
+ taskqgroup_attach_deferred(qgroup, gtask);
}
- mtx_unlock(&qgroup->tqg_lock);
/*
* If taskq thread count has been reduced.
@@ -838,12 +880,12 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
}
int
-taskqgroup_adjust(struct taskqgroup *qgroup, int cpu, int stride)
+taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
{
int error;
mtx_lock(&qgroup->tqg_lock);
- error = _taskqgroup_adjust(qgroup, cpu, stride);
+ error = _taskqgroup_adjust(qgroup, cnt, stride);
mtx_unlock(&qgroup->tqg_lock);
return (error);
OpenPOWER on IntegriCloud