summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authormdf <mdf@FreeBSD.org>2010-09-01 20:32:47 +0000
committermdf <mdf@FreeBSD.org>2010-09-01 20:32:47 +0000
commitbbc395771573eb8ceaf7c40f841d5a7720bf2032 (patch)
treeee8b6f89258b1cf24a265a85475976835d4b3900 /sys/kern/sched_ule.c
parent4525889ed390c72d717942ae92b2e3ce5add947e (diff)
downloadFreeBSD-src-bbc395771573eb8ceaf7c40f841d5a7720bf2032.zip
FreeBSD-src-bbc395771573eb8ceaf7c40f841d5a7720bf2032.tar.gz
Fix a bug with sched_affinity() where it checks td_pinned of another
thread in a racy manner, which can lead to attempting to migrate a thread that is pinned to a CPU. Instead, have sched_switch() determine which CPU a thread should run on if the current one is not allowed. KASSERT in sched_bind() that the thread is not yet pinned to a CPU. KASSERT in sched_switch() that only migratable threads or those moving due to a sched_bind() are changing CPUs. sched_affinity code came from jhb@. MFC after: 2 weeks
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 0a1f7b9..8e4a7ba 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1797,10 +1797,16 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
srqflag = (flags & SW_PREEMPT) ?
SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
SRQ_OURSELF|SRQ_YIELDING;
+ if (THREAD_CAN_MIGRATE(td) && !THREAD_CAN_SCHED(td, ts->ts_cpu))
+ ts->ts_cpu = sched_pickcpu(td, 0);
if (ts->ts_cpu == cpuid)
tdq_runq_add(tdq, td, srqflag);
- else
+ else {
+ KASSERT(THREAD_CAN_MIGRATE(td) ||
+ (ts->ts_flags & TSF_BOUND) != 0,
+ ("Thread %p shouldn't migrate", td));
mtx = sched_switch_migrate(tdq, td, srqflag);
+ }
} else {
/* This thread must be going to sleep. */
TDQ_LOCK(tdq);
@@ -2383,7 +2389,6 @@ sched_affinity(struct thread *td)
{
#ifdef SMP
struct td_sched *ts;
- int cpu;
THREAD_LOCK_ASSERT(td, MA_OWNED);
ts = td->td_sched;
@@ -2397,17 +2402,13 @@ sched_affinity(struct thread *td)
if (!TD_IS_RUNNING(td))
return;
td->td_flags |= TDF_NEEDRESCHED;
- if (!THREAD_CAN_MIGRATE(td))
- return;
/*
- * Assign the new cpu and force a switch before returning to
- * userspace. If the target thread is not running locally send
- * an ipi to force the issue.
+ * Force a switch before returning to userspace. If the
+ * target thread is not running locally send an ipi to force
+ * the issue.
*/
- cpu = ts->ts_cpu;
- ts->ts_cpu = sched_pickcpu(td, 0);
- if (cpu != PCPU_GET(cpuid))
- ipi_cpu(cpu, IPI_PREEMPT);
+ if (td != curthread)
+ ipi_cpu(ts->ts_cpu, IPI_PREEMPT);
#endif
}
@@ -2424,6 +2425,7 @@ sched_bind(struct thread *td, int cpu)
ts = td->td_sched;
if (ts->ts_flags & TSF_BOUND)
sched_unbind(td);
+ KASSERT(THREAD_CAN_MIGRATE(td), ("%p must be migratable", td));
ts->ts_flags |= TSF_BOUND;
sched_pin();
if (PCPU_GET(cpuid) == cpu)
OpenPOWER on IntegriCloud