summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_4bsd.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/sched_4bsd.c')
-rw-r--r--sys/kern/sched_4bsd.c96
1 files changed, 48 insertions, 48 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 9424f73..2fad27c 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -155,6 +155,8 @@ static struct runq runq;
*/
static struct runq runq_pcpu[MAXCPU];
long runq_length[MAXCPU];
+
+static cpuset_t idle_cpus_mask;
#endif
struct pcpuidlestat {
@@ -233,16 +235,6 @@ SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
&forward_wakeup_use_loop, 0,
"Use a loop to find idle cpus");
-static int forward_wakeup_use_single = 0;
-SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW,
- &forward_wakeup_use_single, 0,
- "Only signal one idle cpu");
-
-static int forward_wakeup_use_htt = 0;
-SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
- &forward_wakeup_use_htt, 0,
- "account for htt");
-
#endif
#if 0
static int sched_followon = 0;
@@ -959,7 +951,8 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
if (td->td_flags & TDF_IDLETD) {
TD_SET_CAN_RUN(td);
#ifdef SMP
- idle_cpus_mask &= ~PCPU_GET(cpumask);
+ /* Spinlock held here, assume no migration. */
+ CPU_NAND(&idle_cpus_mask, PCPU_PTR(cpumask));
#endif
} else {
if (TD_IS_RUNNING(td)) {
@@ -1033,7 +1026,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
#ifdef SMP
if (td->td_flags & TDF_IDLETD)
- idle_cpus_mask |= PCPU_GET(cpumask);
+ CPU_OR(&idle_cpus_mask, PCPU_PTR(cpumask));
#endif
sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);
@@ -1062,7 +1055,8 @@ static int
forward_wakeup(int cpunum)
{
struct pcpu *pc;
- cpumask_t dontuse, id, map, map2, map3, me;
+ cpuset_t dontuse, id, map, map2, me;
+ int iscpuset;
mtx_assert(&sched_lock, MA_OWNED);
@@ -1079,69 +1073,65 @@ forward_wakeup(int cpunum)
/*
* Check the idle mask we received against what we calculated
* before in the old version.
+ *
+ * Also note that sched_lock is held now, thus no migration is
+ * expected.
*/
me = PCPU_GET(cpumask);
/* Don't bother if we should be doing it ourself. */
- if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
+ if (CPU_OVERLAP(&me, &idle_cpus_mask) &&
+ (cpunum == NOCPU || CPU_ISSET(cpunum, &me)))
return (0);
- dontuse = me | stopped_cpus | hlt_cpus_mask;
- map3 = 0;
+ dontuse = me;
+ CPU_OR(&dontuse, &stopped_cpus);
+ CPU_OR(&dontuse, &hlt_cpus_mask);
+ CPU_ZERO(&map2);
if (forward_wakeup_use_loop) {
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
id = pc->pc_cpumask;
- if ((id & dontuse) == 0 &&
+ if (!CPU_OVERLAP(&id, &dontuse) &&
pc->pc_curthread == pc->pc_idlethread) {
- map3 |= id;
+ CPU_OR(&map2, &id);
}
}
}
if (forward_wakeup_use_mask) {
- map = 0;
- map = idle_cpus_mask & ~dontuse;
+ map = idle_cpus_mask;
+ CPU_NAND(&map, &dontuse);
/* If they are both on, compare and use loop if different. */
if (forward_wakeup_use_loop) {
- if (map != map3) {
- printf("map (%02X) != map3 (%02X)\n", map,
- map3);
- map = map3;
+ if (CPU_CMP(&map, &map2)) {
+ printf("map != map2, loop method preferred\n");
+ map = map2;
}
}
} else {
- map = map3;
+ map = map2;
}
/* If we only allow a specific CPU, then mask off all the others. */
if (cpunum != NOCPU) {
KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
- map &= (1 << cpunum);
- } else {
- /* Try choose an idle die. */
- if (forward_wakeup_use_htt) {
- map2 = (map & (map >> 1)) & 0x5555;
- if (map2) {
- map = map2;
- }
- }
-
- /* Set only one bit. */
- if (forward_wakeup_use_single) {
- map = map & ((~map) + 1);
- }
+ iscpuset = CPU_ISSET(cpunum, &map);
+ if (iscpuset == 0)
+ CPU_ZERO(&map);
+ else
+ CPU_SETOF(cpunum, &map);
}
- if (map) {
+ if (!CPU_EMPTY(&map)) {
forward_wakeups_delivered++;
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
id = pc->pc_cpumask;
- if ((map & id) == 0)
+ if (!CPU_OVERLAP(&map, &id))
continue;
if (cpu_idle_wakeup(pc->pc_cpuid))
- map &= ~id;
+ CPU_NAND(&map, &id);
}
- if (map)
+ if (!CPU_EMPTY(&map))
ipi_selected(map, IPI_AST);
return (1);
}
@@ -1157,7 +1147,7 @@ kick_other_cpu(int pri, int cpuid)
int cpri;
pcpu = pcpu_find(cpuid);
- if (idle_cpus_mask & pcpu->pc_cpumask) {
+ if (CPU_OVERLAP(&idle_cpus_mask, &pcpu->pc_cpumask)) {
forward_wakeups_delivered++;
if (!cpu_idle_wakeup(cpuid))
ipi_cpu(cpuid, IPI_AST);
@@ -1215,6 +1205,7 @@ void
sched_add(struct thread *td, int flags)
#ifdef SMP
{
+ cpuset_t idle, me, tidlemsk;
struct td_sched *ts;
int forwarded = 0;
int cpu;
@@ -1284,11 +1275,20 @@ sched_add(struct thread *td, int flags)
kick_other_cpu(td->td_priority, cpu);
} else {
if (!single_cpu) {
- cpumask_t me = PCPU_GET(cpumask);
- cpumask_t idle = idle_cpus_mask & me;
- if (!idle && ((flags & SRQ_INTR) == 0) &&
- (idle_cpus_mask & ~(hlt_cpus_mask | me)))
+ /*
+ * Thread spinlock is held here, assume no
+ * migration is possible.
+ */
+ me = PCPU_GET(cpumask);
+ idle = idle_cpus_mask;
+ tidlemsk = idle;
+ CPU_AND(&idle, &me);
+ CPU_OR(&me, &hlt_cpus_mask);
+ CPU_NAND(&tidlemsk, &me);
+
+ if (CPU_EMPTY(&idle) && ((flags & SRQ_INTR) == 0) &&
+ !CPU_EMPTY(&tidlemsk))
forwarded = forward_wakeup(cpu);
}
OpenPOWER on IntegriCloud