summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_4bsd.c
diff options
context:
space:
mode:
authormav <mav@FreeBSD.org>2010-09-11 07:08:22 +0000
committermav <mav@FreeBSD.org>2010-09-11 07:08:22 +0000
commit90db9577861bc12784711572d1f686fefaa56046 (patch)
tree91162b6a541218c29cacf3f3b5ce1d64e7a0abae /sys/kern/sched_4bsd.c
parentf9956f69fbc5c984c7e716e4dbac5198659aa5e0 (diff)
downloadFreeBSD-src-90db9577861bc12784711572d1f686fefaa56046.zip
FreeBSD-src-90db9577861bc12784711572d1f686fefaa56046.tar.gz
Merge some SCHED_ULE features to SCHED_4BSD:
- Teach SCHED_4BSD to inform cpu_idle() about high sleep/wakeup rate to choose optimized handler. In case of x86 it is MONITOR/MWAIT. Also it will be needed to bypass forthcoming idle tick skipping logic to not consume resources on events rescheduling when it won't give any benefits. - Teach SCHED_4BSD to wake up idle CPUs without using IPI. In case of x86, when MONITOR/MWAIT is active, it require just single memory write. This doubles performance on some heavily switching test loads.
Diffstat (limited to 'sys/kern/sched_4bsd.c')
-rw-r--r--sys/kern/sched_4bsd.c32
1 files changed, 28 insertions, 4 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index e579198..780dc6d 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -157,6 +157,12 @@ static struct runq runq_pcpu[MAXCPU];
long runq_length[MAXCPU];
#endif
+struct pcpuidlestat {
+ u_int idlecalls;
+ u_int oldidlecalls;
+};
+static DPCPU_DEFINE(struct pcpuidlestat, idlestat);
+
static void
setup_runqs(void)
{
@@ -684,6 +690,7 @@ sched_rr_interval(void)
void
sched_clock(struct thread *td)
{
+ struct pcpuidlestat *stat;
struct td_sched *ts;
THREAD_LOCK_ASSERT(td, MA_OWNED);
@@ -703,6 +710,10 @@ sched_clock(struct thread *td)
if (!TD_IS_IDLETHREAD(td) &&
ticks - PCPU_GET(switchticks) >= sched_quantum)
td->td_flags |= TDF_NEEDRESCHED;
+
+ stat = DPCPU_PTR(idlestat);
+ stat->oldidlecalls = stat->idlecalls;
+ stat->idlecalls = 0;
}
/*
@@ -1137,7 +1148,15 @@ forward_wakeup(int cpunum)
}
if (map) {
forward_wakeups_delivered++;
- ipi_selected(map, IPI_AST);
+ SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ id = pc->pc_cpumask;
+ if ((map & id) == 0)
+ continue;
+ if (cpu_idle_wakeup(pc->pc_cpuid))
+ map &= ~id;
+ }
+ if (map)
+ ipi_selected(map, IPI_AST);
return (1);
}
if (cpunum == NOCPU)
@@ -1154,7 +1173,8 @@ kick_other_cpu(int pri, int cpuid)
pcpu = pcpu_find(cpuid);
if (idle_cpus_mask & pcpu->pc_cpumask) {
forward_wakeups_delivered++;
- ipi_cpu(cpuid, IPI_AST);
+ if (!cpu_idle_wakeup(cpuid))
+ ipi_cpu(cpuid, IPI_AST);
return;
}
@@ -1537,12 +1557,16 @@ sched_tick(void)
void
sched_idletd(void *dummy)
{
+ struct pcpuidlestat *stat;
+ stat = DPCPU_PTR(idlestat);
for (;;) {
mtx_assert(&Giant, MA_NOTOWNED);
- while (sched_runnable() == 0)
- cpu_idle(0);
+ while (sched_runnable() == 0) {
+ cpu_idle(stat->idlecalls + stat->oldidlecalls > 64);
+ stat->idlecalls++;
+ }
mtx_lock_spin(&sched_lock);
mi_switch(SW_VOL | SWT_IDLE, NULL);
OpenPOWER on IntegriCloud