summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2011-07-04 12:04:52 +0000
committerattilio <attilio@FreeBSD.org>2011-07-04 12:04:52 +0000
commit364d0522f778b206262efce0932d6cea821879c6 (patch)
treef85bc4cd83b575a0c42f08d746e644f6dfc0e2ef /sys/kern
parent95ca970257de274a90a1a867048c5ace5acf532d (diff)
downloadFreeBSD-src-364d0522f778b206262efce0932d6cea821879c6.zip
FreeBSD-src-364d0522f778b206262efce0932d6cea821879c6.tar.gz
With retirement of cpumask_t and usage of cpuset_t for representing a
mask of CPUs, pc_other_cpus and pc_cpumask become highly inefficient. Remove them and replace their usage with custom pc_cpuid magic (as, atm, pc_cpumask can be easilly represented by (1 << pc_cpuid) and pc_other_cpus by (all_cpus & ~(1 << pc_cpuid))). This change is not targeted for MFC because of struct pcpu members removal and dependency by cpumask_t retirement. MD review by: marcel, marius, alc Tested by: pluknet MD testing by: marcel, marius, gonzo, andreast
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_rmlock.c6
-rw-r--r--sys/kern/sched_4bsd.c57
-rw-r--r--sys/kern/subr_kdb.c16
-rw-r--r--sys/kern/subr_pcpu.c1
-rw-r--r--sys/kern/subr_smp.c4
5 files changed, 41 insertions, 43 deletions
diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
index 3214e1b..1c7337d 100644
--- a/sys/kern/kern_rmlock.c
+++ b/sys/kern/kern_rmlock.c
@@ -263,7 +263,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
pc = pcpu_find(curcpu);
/* Check if we just need to do a proper critical_exit. */
- if (!CPU_OVERLAP(&pc->pc_cpumask, &rm->rm_writecpus)) {
+ if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
critical_exit();
return (1);
}
@@ -325,7 +325,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
critical_enter();
pc = pcpu_find(curcpu);
- CPU_NAND(&rm->rm_writecpus, &pc->pc_cpumask);
+ CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
rm_tracker_add(pc, tracker);
sched_pin();
critical_exit();
@@ -367,7 +367,7 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
* conditional jump.
*/
if (0 == (td->td_owepreempt |
- CPU_OVERLAP(&rm->rm_writecpus, &pc->pc_cpumask)))
+ CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
return (1);
/* We do not have a read token and need to acquire one. */
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 592bb80..574755f0 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -951,8 +951,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
if (td->td_flags & TDF_IDLETD) {
TD_SET_CAN_RUN(td);
#ifdef SMP
- /* Spinlock held here, assume no migration. */
- CPU_NAND(&idle_cpus_mask, PCPU_PTR(cpumask));
+ CPU_CLR(PCPU_GET(cpuid), &idle_cpus_mask);
#endif
} else {
if (TD_IS_RUNNING(td)) {
@@ -1026,7 +1025,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
#ifdef SMP
if (td->td_flags & TDF_IDLETD)
- CPU_OR(&idle_cpus_mask, PCPU_PTR(cpumask));
+ CPU_SET(PCPU_GET(cpuid), &idle_cpus_mask);
#endif
sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);
@@ -1055,7 +1054,8 @@ static int
forward_wakeup(int cpunum)
{
struct pcpu *pc;
- cpuset_t dontuse, id, map, map2, me;
+ cpuset_t dontuse, map, map2;
+ u_int id, me;
int iscpuset;
mtx_assert(&sched_lock, MA_OWNED);
@@ -1073,27 +1073,24 @@ forward_wakeup(int cpunum)
/*
* Check the idle mask we received against what we calculated
* before in the old version.
- *
- * Also note that sched_lock is held now, thus no migration is
- * expected.
*/
- me = PCPU_GET(cpumask);
+ me = PCPU_GET(cpuid);
/* Don't bother if we should be doing it ourself. */
- if (CPU_OVERLAP(&me, &idle_cpus_mask) &&
- (cpunum == NOCPU || CPU_ISSET(cpunum, &me)))
+ if (CPU_ISSET(me, &idle_cpus_mask) &&
+ (cpunum == NOCPU || me == cpunum))
return (0);
- dontuse = me;
+ CPU_SETOF(me, &dontuse);
CPU_OR(&dontuse, &stopped_cpus);
CPU_OR(&dontuse, &hlt_cpus_mask);
CPU_ZERO(&map2);
if (forward_wakeup_use_loop) {
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
- id = pc->pc_cpumask;
- if (!CPU_OVERLAP(&id, &dontuse) &&
+ id = pc->pc_cpuid;
+ if (!CPU_ISSET(id, &dontuse) &&
pc->pc_curthread == pc->pc_idlethread) {
- CPU_OR(&map2, &id);
+ CPU_SET(id, &map2);
}
}
}
@@ -1125,11 +1122,11 @@ forward_wakeup(int cpunum)
if (!CPU_EMPTY(&map)) {
forward_wakeups_delivered++;
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
- id = pc->pc_cpumask;
- if (!CPU_OVERLAP(&map, &id))
+ id = pc->pc_cpuid;
+ if (!CPU_ISSET(id, &map))
continue;
if (cpu_idle_wakeup(pc->pc_cpuid))
- CPU_NAND(&map, &id);
+ CPU_CLR(id, &map);
}
if (!CPU_EMPTY(&map))
ipi_selected(map, IPI_AST);
@@ -1147,7 +1144,7 @@ kick_other_cpu(int pri, int cpuid)
int cpri;
pcpu = pcpu_find(cpuid);
- if (CPU_OVERLAP(&idle_cpus_mask, &pcpu->pc_cpumask)) {
+ if (CPU_ISSET(cpuid, &idle_cpus_mask)) {
forward_wakeups_delivered++;
if (!cpu_idle_wakeup(cpuid))
ipi_cpu(cpuid, IPI_AST);
@@ -1205,10 +1202,10 @@ void
sched_add(struct thread *td, int flags)
#ifdef SMP
{
- cpuset_t idle, me, tidlemsk;
+ cpuset_t tidlemsk;
struct td_sched *ts;
+ u_int cpu, cpuid;
int forwarded = 0;
- int cpu;
int single_cpu = 0;
ts = td->td_sched;
@@ -1271,23 +1268,17 @@ sched_add(struct thread *td, int flags)
ts->ts_runq = &runq;
}
- if (single_cpu && (cpu != PCPU_GET(cpuid))) {
+ cpuid = PCPU_GET(cpuid);
+ if (single_cpu && cpu != cpuid) {
kick_other_cpu(td->td_priority, cpu);
} else {
if (!single_cpu) {
+ tidlemsk = idle_cpus_mask;
+ CPU_NAND(&tidlemsk, &hlt_cpus_mask);
+ CPU_CLR(cpuid, &tidlemsk);
- /*
- * Thread spinlock is held here, assume no
- * migration is possible.
- */
- me = PCPU_GET(cpumask);
- idle = idle_cpus_mask;
- tidlemsk = idle;
- CPU_AND(&idle, &me);
- CPU_OR(&me, &hlt_cpus_mask);
- CPU_NAND(&tidlemsk, &me);
-
- if (CPU_EMPTY(&idle) && ((flags & SRQ_INTR) == 0) &&
+ if (!CPU_ISSET(cpuid, &idle_cpus_mask) &&
+ ((flags & SRQ_INTR) == 0) &&
!CPU_EMPTY(&tidlemsk))
forwarded = forward_wakeup(cpu);
}
diff --git a/sys/kern/subr_kdb.c b/sys/kern/subr_kdb.c
index 2bc5ab2..f5cb31e 100644
--- a/sys/kern/subr_kdb.c
+++ b/sys/kern/subr_kdb.c
@@ -197,9 +197,12 @@ kdb_sysctl_trap_code(SYSCTL_HANDLER_ARGS)
void
kdb_panic(const char *msg)
{
-
#ifdef SMP
- stop_cpus_hard(PCPU_GET(other_cpus));
+ cpuset_t other_cpus;
+
+ other_cpus = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &other_cpus);
+ stop_cpus_hard(other_cpus);
#endif
printf("KDB: panic\n");
panic("%s", msg);
@@ -415,7 +418,7 @@ kdb_thr_ctx(struct thread *thr)
#if defined(SMP) && defined(KDB_STOPPEDPCB)
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
if (pc->pc_curthread == thr &&
- CPU_OVERLAP(&stopped_cpus, &pc->pc_cpumask))
+ CPU_ISSET(pc->pc_cpuid, &stopped_cpus))
return (KDB_STOPPEDPCB(pc));
}
#endif
@@ -499,6 +502,9 @@ kdb_thr_select(struct thread *thr)
int
kdb_trap(int type, int code, struct trapframe *tf)
{
+#ifdef SMP
+ cpuset_t other_cpus;
+#endif
struct kdb_dbbe *be;
register_t intr;
int handled;
@@ -514,7 +520,9 @@ kdb_trap(int type, int code, struct trapframe *tf)
intr = intr_disable();
#ifdef SMP
- stop_cpus_hard(PCPU_GET(other_cpus));
+ other_cpus = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &other_cpus);
+ stop_cpus_hard(other_cpus);
#endif
kdb_active++;
diff --git a/sys/kern/subr_pcpu.c b/sys/kern/subr_pcpu.c
index a6b3ae0..ec6b590 100644
--- a/sys/kern/subr_pcpu.c
+++ b/sys/kern/subr_pcpu.c
@@ -87,7 +87,6 @@ pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
KASSERT(cpuid >= 0 && cpuid < MAXCPU,
("pcpu_init: invalid cpuid %d", cpuid));
pcpu->pc_cpuid = cpuid;
- CPU_SETOF(cpuid, &pcpu->pc_cpumask);
cpuid_to_pcpu[cpuid] = pcpu;
STAILQ_INSERT_TAIL(&cpuhead, pcpu, pc_allcpu);
cpu_pcpu_init(pcpu, cpuid, size);
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index 0929ab9..caec965 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -142,7 +142,7 @@ mp_start(void *dummy)
/* Probe for MP hardware. */
if (smp_disabled != 0 || cpu_mp_probe() == 0) {
mp_ncpus = 1;
- all_cpus = PCPU_GET(cpumask);
+ CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
return;
}
@@ -706,7 +706,7 @@ mp_setvariables_for_up(void *dummy)
{
mp_ncpus = 1;
mp_maxid = PCPU_GET(cpuid);
- all_cpus = PCPU_GET(cpumask);
+ CPU_SETOF(mp_maxid, &all_cpus);
KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
}
SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
OpenPOWER on IntegriCloud