summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormav <mav@FreeBSD.org>2018-02-02 18:03:14 +0000
committermav <mav@FreeBSD.org>2018-02-02 18:03:14 +0000
commit2d870dc158a4bb94ef259f892fc23e1f9dfec8d7 (patch)
tree985ebf2a39a12a2f39f85f504cffbcbb2c45862d
parente6ad539d7575f723818b94d59c72377497425a11 (diff)
downloadFreeBSD-src-2d870dc158a4bb94ef259f892fc23e1f9dfec8d7.zip
FreeBSD-src-2d870dc158a4bb94ef259f892fc23e1f9dfec8d7.tar.gz
MFC r307567 (by sbruno): Assert that we're assigning a non-null taskqueue.
ref: https://github.com/NextBSD/NextBSD/commit/535865d02c162e415d7436899cd6db5000a0cc7b Fix cpu assignment by assuring stride is non-zero, assert that all tasks have a valid taskqueue. ref: https://github.com/NextBSD/NextBSD/commit/db398176234fe3ce9f8e8b671f56000f8276feba Start cpu assignment from zero. ref: https://github.com/NextBSD/NextBSD/commit/d99d39b6b6c5dfac1eb440c41e36ebf4c897198e
-rw-r--r--sys/kern/subr_gtaskqueue.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/sys/kern/subr_gtaskqueue.c b/sys/kern/subr_gtaskqueue.c
index 115d834..b54e4b8 100644
--- a/sys/kern/subr_gtaskqueue.c
+++ b/sys/kern/subr_gtaskqueue.c
@@ -666,6 +666,7 @@ taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
gt_list);
+ MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL);
gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
mtx_unlock(&qgroup->tqg_lock);
}
@@ -730,6 +731,7 @@ taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtas
}
qgroup->tqg_queue[qid].tgc_cnt++;
LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
+ MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL);
gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
mtx_unlock(&qgroup->tqg_lock);
@@ -835,10 +837,10 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
*/
cpu = old_cpu;
for (i = old_cnt; i < cnt; i++) {
- for (k = 0; k < qgroup->tqg_stride; k++)
- cpu = CPU_NEXT(cpu);
-
taskqgroup_cpu_create(qgroup, i, cpu);
+
+ for (k = 0; k < stride; k++)
+ cpu = CPU_NEXT(cpu);
}
mtx_lock(&qgroup->tqg_lock);
qgroup->tqg_cnt = cnt;
@@ -865,6 +867,15 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
taskqgroup_attach_deferred(qgroup, gtask);
}
+#ifdef INVARIANTS
+ mtx_lock(&qgroup->tqg_lock);
+ for (i = 0; i < qgroup->tqg_cnt; i++) {
+ MPASS(qgroup->tqg_queue[i].tgc_taskq != NULL);
+ LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list)
+ MPASS(gtask->gt_taskqueue != NULL);
+ }
+ mtx_unlock(&qgroup->tqg_lock);
+#endif
/*
* If taskq thread count has been reduced.
*/
OpenPOWER on IntegriCloud