summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-07-19 19:51:45 +0000
committerjeff <jeff@FreeBSD.org>2007-07-19 19:51:45 +0000
commit550dacee12e48c0ff97cc2301a7b8756fc69bf57 (patch)
treef62e634ae6610fd7edb8e47ec703c5c4d3c8b142 /sys/kern/sched_ule.c
parenta2df1122c7afebd60bbfc854038db6523bfb0254 (diff)
downloadFreeBSD-src-550dacee12e48c0ff97cc2301a7b8756fc69bf57.zip
FreeBSD-src-550dacee12e48c0ff97cc2301a7b8756fc69bf57.tar.gz
- When newtd is specified to sched_switch() it was not being initialized
properly. We have to temporarily unlock the TDQ lock so we can lock the thread and add it to the run queue. This is used only for KSE. - When we add a thread from the tdq_move() via sched_balance() we need to ipi the target if it's sitting in the idle thread or it'll never run. Reported by: Rene Landan Approved by: re
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c32
1 files changed, 25 insertions, 7 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index a90e657..2c732da 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -683,6 +683,7 @@ tdq_move(struct tdq *from, struct tdq *to)
ts->ts_cpu = cpu;
td->td_lock = TDQ_LOCKPTR(to);
tdq_add(to, td, SRQ_YIELDING);
+ tdq_notify(ts);
}
/*
@@ -1657,6 +1658,26 @@ sched_unlend_user_prio(struct thread *td, u_char prio)
}
/*
+ * Add the thread passed as 'newtd' to the run queue before selecting
+ * the next thread to run. This is only used for KSE.
+ */
+static void
+sched_switchin(struct tdq *tdq, struct thread *td)
+{
+#ifdef SMP
+ spinlock_enter();
+ TDQ_UNLOCK(tdq);
+ thread_lock(td);
+ spinlock_exit();
+ sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING);
+#else
+ td->td_lock = TDQ_LOCKPTR(tdq);
+#endif
+ tdq_add(tdq, td, SRQ_YIELDING);
+ MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
+}
+
+/*
* Block a thread for switching. Similar to thread_block() but does not
* bump the spin count.
*/
@@ -1750,14 +1771,11 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
*/
TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
/*
- * If KSE assigned a new thread just add it here and pick the best one.
+ * If KSE assigned a new thread just add it here and let choosethread
+ * select the best one.
*/
- if (newtd != NULL) {
- /* XXX This is bogus. What if the thread is locked elsewhere? */
- td->td_lock = TDQ_LOCKPTR(tdq);
- td->td_sched->ts_cpu = cpuid;
- tdq_add(tdq, td, SRQ_YIELDING);
- }
+ if (newtd != NULL)
+ sched_switchin(tdq, newtd);
newtd = choosethread();
/*
* Call the MD code to switch contexts if necessary.
OpenPOWER on IntegriCloud