summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-10-08 23:50:39 +0000
committerjeff <jeff@FreeBSD.org>2007-10-08 23:50:39 +0000
commit2bbd1c2e708382ee1b19c674a8652704a90aa976 (patch)
tree437f98987c1080f4f0762b3cbbdae5ae828de9c0 /sys/kern/sched_ule.c
parent57102cf5add6c7f7ca4dea8071ccd6e46560954e (diff)
downloadFreeBSD-src-2bbd1c2e708382ee1b19c674a8652704a90aa976.zip
FreeBSD-src-2bbd1c2e708382ee1b19c674a8652704a90aa976.tar.gz
- Bail out of tdq_idled if !smp_started or idle stealing is disabled. This
fixes a bug on UP machines with SMP kernels where the idle thread constantly switches after trying to steal work from the local cpu. - Make the idle stealing code more robust against self selection. - Prefer to steal from the cpu with the highest load that has at least one transferable thread. Before we selected the cpu with the highest transferable count which excludes bound threads. Collaborated with: csjp Approved by: re
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 77cf83f..99f84f0 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -740,9 +740,10 @@ tdq_idled(struct tdq *tdq)
struct tdq *steal;
int highload;
int highcpu;
- int load;
int cpu;
+ if (smp_started == 0 || steal_idle == 0)
+ return (1);
/* We don't want to be preempted while we're iterating over tdqs */
spinlock_enter();
tdg = tdq->tdq_group;
@@ -762,29 +763,34 @@ tdq_idled(struct tdq *tdq)
}
TDQ_UNLOCK(tdq);
}
+ /*
+ * Find the least loaded CPU with a transferable thread and attempt
+ * to steal it. We make a lockless pass and then verify that the
+ * thread is still available after locking.
+ */
for (;;) {
- if (steal_idle == 0)
- break;
highcpu = 0;
highload = 0;
for (cpu = 0; cpu <= mp_maxid; cpu++) {
if (CPU_ABSENT(cpu))
continue;
steal = TDQ_CPU(cpu);
- load = TDQ_CPU(cpu)->tdq_transferable;
- if (load < highload)
+ if (steal->tdq_transferable == 0)
+ continue;
+ if (steal->tdq_load < highload)
continue;
- highload = load;
+ highload = steal->tdq_load;
highcpu = cpu;
}
if (highload < steal_thresh)
break;
steal = TDQ_CPU(highcpu);
+ if (steal == tdq)
+ break;
tdq_lock_pair(tdq, steal);
- if (steal->tdq_transferable >= steal_thresh)
+ if (steal->tdq_load >= steal_thresh && steal->tdq_transferable)
goto steal;
tdq_unlock_pair(tdq, steal);
- break;
}
spinlock_exit();
return (1);
OpenPOWER on IntegriCloud