summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authormav <mav@FreeBSD.org>2010-09-10 13:24:47 +0000
committermav <mav@FreeBSD.org>2010-09-10 13:24:47 +0000
commitaa2a7434536fa9cdc5e48a926e00e314f87b1396 (patch)
tree1d4e9902d2fa2ec03cbf1555675fb4a34b90a7ad /sys/kern/sched_ule.c
parente3c11acb8b25b5901c31cc9dbad72b1990dde985 (diff)
downloadFreeBSD-src-aa2a7434536fa9cdc5e48a926e00e314f87b1396.zip
FreeBSD-src-aa2a7434536fa9cdc5e48a926e00e314f87b1396.tar.gz
Do not IPI CPU that is already spinning for load. It doubles effect of
spining (comparing to MWAIT) on some heavly switching test loads.
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 0162b64..bb2d34a 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -196,7 +196,7 @@ static int preempt_thresh = 0;
#endif
static int static_boost = PRI_MIN_TIMESHARE;
static int sched_idlespins = 10000;
-static int sched_idlespinthresh = 4;
+static int sched_idlespinthresh = 64;
/*
* tdq - per processor runqs and statistics. All fields are protected by the
@@ -208,6 +208,7 @@ struct tdq {
struct mtx tdq_lock; /* run queue lock. */
struct cpu_group *tdq_cg; /* Pointer to cpu topology. */
volatile int tdq_load; /* Aggregate load. */
+ volatile int tdq_cpu_idle; /* cpu_idle() is active. */
int tdq_sysload; /* For loadavg, !ITHD load. */
int tdq_transferable; /* Transferable thread count. */
short tdq_switchcnt; /* Switches this tick. */
@@ -966,7 +967,7 @@ tdq_notify(struct tdq *tdq, struct thread *td)
* If the MD code has an idle wakeup routine try that before
* falling back to IPI.
*/
- if (cpu_idle_wakeup(cpu))
+ if (!tdq->tdq_cpu_idle || cpu_idle_wakeup(cpu))
return;
}
tdq->tdq_ipipending = 1;
@@ -2545,8 +2546,14 @@ sched_idletd(void *dummy)
}
}
switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
- if (tdq->tdq_load == 0)
- cpu_idle(switchcnt > 1);
+ if (tdq->tdq_load == 0) {
+ tdq->tdq_cpu_idle = 1;
+ if (tdq->tdq_load == 0) {
+ cpu_idle(switchcnt > sched_idlespinthresh);
+ tdq->tdq_switchcnt++;
+ }
+ tdq->tdq_cpu_idle = 0;
+ }
if (tdq->tdq_load) {
thread_lock(td);
mi_switch(SW_VOL | SWT_IDLE, NULL);
OpenPOWER on IntegriCloud