summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2011-07-04 12:04:52 +0000
committerattilio <attilio@FreeBSD.org>2011-07-04 12:04:52 +0000
commit364d0522f778b206262efce0932d6cea821879c6 (patch)
treef85bc4cd83b575a0c42f08d746e644f6dfc0e2ef /sys
parent95ca970257de274a90a1a867048c5ace5acf532d (diff)
downloadFreeBSD-src-364d0522f778b206262efce0932d6cea821879c6.zip
FreeBSD-src-364d0522f778b206262efce0932d6cea821879c6.tar.gz
With retirement of cpumask_t and usage of cpuset_t for representing a
mask of CPUs, pc_other_cpus and pc_cpumask become highly inefficient. Remove them and replace their usage with custom pc_cpuid magic (as, atm, pc_cpumask can be easilly represented by (1 << pc_cpuid) and pc_other_cpus by (all_cpus & ~(1 << pc_cpuid))). This change is not targeted for MFC because of struct pcpu members removal and dependency by cpumask_t retirement. MD review by: marcel, marius, alc Tested by: pluknet MD testing by: marcel, marius, gonzo, andreast
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/acpica/acpi_wakeup.c3
-rw-r--r--sys/amd64/amd64/mp_machdep.c71
-rw-r--r--sys/amd64/amd64/pmap.c70
-rw-r--r--sys/amd64/amd64/vm_machdep.c9
-rw-r--r--sys/cddl/dev/dtrace/amd64/dtrace_subr.c4
-rw-r--r--sys/cddl/dev/dtrace/i386/dtrace_subr.c4
-rw-r--r--sys/dev/xen/control/control.c9
-rw-r--r--sys/i386/i386/mp_machdep.c60
-rw-r--r--sys/i386/i386/pmap.c90
-rw-r--r--sys/i386/i386/vm_machdep.c20
-rw-r--r--sys/i386/xen/mp_machdep.c54
-rw-r--r--sys/i386/xen/pmap.c53
-rw-r--r--sys/ia64/ia64/mp_machdep.c16
-rw-r--r--sys/kern/kern_rmlock.c6
-rw-r--r--sys/kern/sched_4bsd.c57
-rw-r--r--sys/kern/subr_kdb.c16
-rw-r--r--sys/kern/subr_pcpu.c1
-rw-r--r--sys/kern/subr_smp.c4
-rw-r--r--sys/mips/mips/mp_machdep.c32
-rw-r--r--sys/mips/mips/pmap.c47
-rw-r--r--sys/powerpc/aim/mmu_oea.c8
-rw-r--r--sys/powerpc/aim/mmu_oea64.c8
-rw-r--r--sys/powerpc/booke/pmap.c12
-rw-r--r--sys/powerpc/powerpc/mp_machdep.c21
-rw-r--r--sys/sparc64/sparc64/mp_machdep.c8
-rw-r--r--sys/sys/pcpu.h12
26 files changed, 301 insertions, 394 deletions
diff --git a/sys/amd64/acpica/acpi_wakeup.c b/sys/amd64/acpica/acpi_wakeup.c
index 29e66c5..43aeec3 100644
--- a/sys/amd64/acpica/acpi_wakeup.c
+++ b/sys/amd64/acpica/acpi_wakeup.c
@@ -226,7 +226,8 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
return (ret);
#ifdef SMP
- wakeup_cpus = PCPU_GET(other_cpus);
+ wakeup_cpus = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &wakeup_cpus);
#endif
AcpiSetFirmwareWakingVector(WAKECODE_PADDR(sc));
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 53988e9..668e79c 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -604,10 +604,10 @@ cpu_mp_announce(void)
void
init_secondary(void)
{
- cpuset_t tcpuset, tallcpus;
struct pcpu *pc;
struct nmi_pcpu *np;
u_int64_t msr, cr0;
+ u_int cpuid;
int cpu, gsel_tss, x;
struct region_descriptor ap_gdt;
@@ -711,8 +711,9 @@ init_secondary(void)
fpuinit();
/* A quick check from sanity claus */
+ cpuid = PCPU_GET(cpuid);
if (PCPU_GET(apic_id) != lapic_id()) {
- printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
+ printf("SMP: cpuid = %d\n", cpuid);
printf("SMP: actual apic_id = %d\n", lapic_id());
printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
panic("cpuid mismatch! boom!!");
@@ -734,19 +735,13 @@ init_secondary(void)
smp_cpus++;
- CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
- printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
- tcpuset = PCPU_GET(cpumask);
+ CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
+ printf("SMP: AP CPU #%d Launched!\n", cpuid);
/* Determine if we are a logical CPU. */
/* XXX Calculation depends on cpu_logical being a power of 2, e.g. 2 */
if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0)
- CPU_OR(&logical_cpus_mask, &tcpuset);
-
- /* Build our map of 'other' CPUs. */
- tallcpus = all_cpus;
- CPU_NAND(&tallcpus, &tcpuset);
- PCPU_SET(other_cpus, tallcpus);
+ CPU_SET(cpuid, &logical_cpus_mask);
if (bootverbose)
lapic_dump("AP");
@@ -893,7 +888,6 @@ assign_cpu_ids(void)
static int
start_all_aps(void)
{
- cpuset_t tallcpus, tcpuset;
vm_offset_t va = boot_address + KERNBASE;
u_int64_t *pt4, *pt3, *pt2;
u_int32_t mpbioswarmvec;
@@ -961,12 +955,6 @@ start_all_aps(void)
CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
}
- /* build our map of 'other' CPUs */
- tallcpus = all_cpus;
- tcpuset = PCPU_GET(cpumask);
- CPU_NAND(&tallcpus, &tcpuset);
- PCPU_SET(other_cpus, tallcpus);
-
/* restore the warmstart vector */
*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
@@ -1150,9 +1138,7 @@ smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_of
if (othercpus < 1)
return;
} else {
- sched_pin();
- CPU_NAND(&mask, PCPU_PTR(cpumask));
- sched_unpin();
+ CPU_CLR(PCPU_GET(cpuid), &mask);
if (CPU_EMPTY(&mask))
return;
}
@@ -1349,11 +1335,13 @@ ipi_cpu(int cpu, u_int ipi)
void
ipi_all_but_self(u_int ipi)
{
+ cpuset_t other_cpus;
+
+ other_cpus = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &other_cpus);
- sched_pin();
if (IPI_IS_BITMAPED(ipi)) {
- ipi_selected(PCPU_GET(other_cpus), ipi);
- sched_unpin();
+ ipi_selected(other_cpus, ipi);
return;
}
@@ -1363,8 +1351,7 @@ ipi_all_but_self(u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- CPU_OR_ATOMIC(&ipi_nmi_pending, PCPU_PTR(other_cpus));
- sched_unpin();
+ CPU_OR_ATOMIC(&ipi_nmi_pending, &other_cpus);
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
@@ -1373,7 +1360,7 @@ ipi_all_but_self(u_int ipi)
int
ipi_nmi_handler()
{
- cpuset_t cpumask;
+ u_int cpuid;
/*
* As long as there is not a simple way to know about a NMI's
@@ -1381,13 +1368,11 @@ ipi_nmi_handler()
* the global pending bitword an IPI_STOP_HARD has been issued
* and should be handled.
*/
- sched_pin();
- cpumask = PCPU_GET(cpumask);
- sched_unpin();
- if (!CPU_OVERLAP(&ipi_nmi_pending, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ if (!CPU_ISSET(cpuid, &ipi_nmi_pending))
return (1);
- CPU_NAND_ATOMIC(&ipi_nmi_pending, &cpumask);
+ CPU_CLR_ATOMIC(cpuid, &ipi_nmi_pending);
cpustop_handler();
return (0);
}
@@ -1399,25 +1384,21 @@ ipi_nmi_handler()
void
cpustop_handler(void)
{
- cpuset_t cpumask;
u_int cpu;
- sched_pin();
cpu = PCPU_GET(cpuid);
- cpumask = PCPU_GET(cpumask);
- sched_unpin();
savectx(&stoppcbs[cpu]);
/* Indicate that we are stopped */
- CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
+ CPU_SET_ATOMIC(cpu, &stopped_cpus);
/* Wait for restart */
- while (!CPU_OVERLAP(&started_cpus, &cpumask))
+ while (!CPU_ISSET(cpu, &started_cpus))
ia32_pause();
- CPU_NAND_ATOMIC(&started_cpus, &cpumask);
- CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
+ CPU_CLR_ATOMIC(cpu, &started_cpus);
+ CPU_CLR_ATOMIC(cpu, &stopped_cpus);
if (cpu == 0 && cpustop_restartfunc != NULL) {
cpustop_restartfunc();
@@ -1432,19 +1413,17 @@ cpustop_handler(void)
void
cpususpend_handler(void)
{
- cpuset_t cpumask;
register_t cr3, rf;
u_int cpu;
cpu = PCPU_GET(cpuid);
- cpumask = PCPU_GET(cpumask);
rf = intr_disable();
cr3 = rcr3();
if (savectx(susppcbs[cpu])) {
wbinvd();
- CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
+ CPU_SET_ATOMIC(cpu, &stopped_cpus);
} else {
pmap_init_pat();
PCPU_SET(switchtime, 0);
@@ -1452,11 +1431,11 @@ cpususpend_handler(void)
}
/* Wait for resume */
- while (!CPU_OVERLAP(&started_cpus, &cpumask))
+ while (!CPU_ISSET(cpu, &started_cpus))
ia32_pause();
- CPU_NAND_ATOMIC(&started_cpus, &cpumask);
- CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
+ CPU_CLR_ATOMIC(cpu, &started_cpus);
+ CPU_CLR_ATOMIC(cpu, &stopped_cpus);
/* Restore CR3 and enable interrupts */
load_cr3(cr3);
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 3366f41..bed795b 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -925,16 +925,18 @@ pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde)
void
pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
{
- cpuset_t cpumask, other_cpus;
+ cpuset_t other_cpus;
+ u_int cpuid;
sched_pin();
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
invlpg(va);
smp_invlpg(va);
} else {
- cpumask = PCPU_GET(cpumask);
- other_cpus = PCPU_GET(other_cpus);
- if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
invlpg(va);
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
@@ -946,8 +948,9 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
void
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
- cpuset_t cpumask, other_cpus;
+ cpuset_t other_cpus;
vm_offset_t addr;
+ u_int cpuid;
sched_pin();
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
@@ -955,9 +958,10 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
invlpg(addr);
smp_invlpg_range(sva, eva);
} else {
- cpumask = PCPU_GET(cpumask);
- other_cpus = PCPU_GET(other_cpus);
- if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
CPU_AND(&other_cpus, &pmap->pm_active);
@@ -970,16 +974,18 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
void
pmap_invalidate_all(pmap_t pmap)
{
- cpuset_t cpumask, other_cpus;
+ cpuset_t other_cpus;
+ u_int cpuid;
sched_pin();
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
invltlb();
smp_invltlb();
} else {
- cpumask = PCPU_GET(cpumask);
- other_cpus = PCPU_GET(other_cpus);
- if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
invltlb();
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
@@ -999,11 +1005,11 @@ pmap_invalidate_cache(void)
}
struct pde_action {
- cpuset_t store; /* processor that updates the PDE */
cpuset_t invalidate; /* processors that invalidate their TLB */
vm_offset_t va;
pd_entry_t *pde;
pd_entry_t newpde;
+ u_int store; /* processor that updates the PDE */
};
static void
@@ -1011,12 +1017,8 @@ pmap_update_pde_action(void *arg)
{
struct pde_action *act = arg;
- sched_pin();
- if (!CPU_CMP(&act->store, PCPU_PTR(cpumask))) {
- sched_unpin();
+ if (act->store == PCPU_GET(cpuid))
pde_store(act->pde, act->newpde);
- } else
- sched_unpin();
}
static void
@@ -1024,12 +1026,8 @@ pmap_update_pde_teardown(void *arg)
{
struct pde_action *act = arg;
- sched_pin();
- if (CPU_OVERLAP(&act->invalidate, PCPU_PTR(cpumask))) {
- sched_unpin();
+ if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
pmap_update_pde_invalidate(act->va, act->newpde);
- } else
- sched_unpin();
}
/*
@@ -1044,28 +1042,30 @@ static void
pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
{
struct pde_action act;
- cpuset_t active, cpumask, other_cpus;
+ cpuset_t active, other_cpus;
+ u_int cpuid;
sched_pin();
- cpumask = PCPU_GET(cpumask);
- other_cpus = PCPU_GET(other_cpus);
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
if (pmap == kernel_pmap)
active = all_cpus;
else
active = pmap->pm_active;
if (CPU_OVERLAP(&active, &other_cpus)) {
- act.store = cpumask;
+ act.store = cpuid;
act.invalidate = active;
act.va = va;
act.pde = pde;
act.newpde = newpde;
- CPU_OR(&cpumask, &active);
- smp_rendezvous_cpus(cpumask,
+ CPU_SET(cpuid, &active);
+ smp_rendezvous_cpus(active,
smp_no_rendevous_barrier, pmap_update_pde_action,
pmap_update_pde_teardown, &act);
} else {
pde_store(pde, newpde);
- if (CPU_OVERLAP(&active, &cpumask))
+ if (CPU_ISSET(cpuid, &active))
pmap_update_pde_invalidate(va, newpde);
}
sched_unpin();
@@ -5095,17 +5095,19 @@ void
pmap_activate(struct thread *td)
{
pmap_t pmap, oldpmap;
+ u_int cpuid;
u_int64_t cr3;
critical_enter();
pmap = vmspace_pmap(td->td_proc->p_vmspace);
oldpmap = PCPU_GET(curpmap);
+ cpuid = PCPU_GET(cpuid);
#ifdef SMP
- CPU_NAND_ATOMIC(&oldpmap->pm_active, PCPU_PTR(cpumask));
- CPU_OR_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask));
+ CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
+ CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
#else
- CPU_NAND(&oldpmap->pm_active, PCPU_PTR(cpumask));
- CPU_OR(&pmap->pm_active, PCPU_PTR(cpumask));
+ CPU_CLR(cpuid, &oldpmap->pm_active);
+ CPU_SET(cpuid, &pmap->pm_active);
#endif
cr3 = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4);
td->td_pcb->pcb_cr3 = cr3;
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index 13f5cd0..d05880f 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -59,7 +59,6 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/pioctl.h>
#include <sys/proc.h>
-#include <sys/sched.h>
#include <sys/sf_buf.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
@@ -535,8 +534,8 @@ cpu_reset()
u_int cnt;
if (smp_active) {
- sched_pin();
- map = PCPU_GET(other_cpus);
+ map = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &map);
CPU_NAND(&map, &stopped_cpus);
if (!CPU_EMPTY(&map)) {
printf("cpu_reset: Stopping other CPUs\n");
@@ -545,7 +544,6 @@ cpu_reset()
if (PCPU_GET(cpuid) != 0) {
cpu_reset_proxyid = PCPU_GET(cpuid);
- sched_unpin();
cpustop_restartfunc = cpu_reset_proxy;
cpu_reset_proxy_active = 0;
printf("cpu_reset: Restarting BSP\n");
@@ -564,8 +562,7 @@ cpu_reset()
while (1);
/* NOTREACHED */
- } else
- sched_unpin();
+ }
DELAY(1000000);
}
diff --git a/sys/cddl/dev/dtrace/amd64/dtrace_subr.c b/sys/cddl/dev/dtrace/amd64/dtrace_subr.c
index 0b86eac..d2a1cf1 100644
--- a/sys/cddl/dev/dtrace/amd64/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/amd64/dtrace_subr.c
@@ -412,8 +412,8 @@ dtrace_gethrtime_init(void *arg)
continue;
pc = pcpu_find(i);
- map = PCPU_GET(cpumask);
- CPU_OR(&map, &pc->pc_cpumask);
+ CPU_SETOF(PCPU_GET(cpuid), &map);
+ CPU_SET(pc->pc_cpuid, &map);
smp_rendezvous_cpus(map, NULL,
dtrace_gethrtime_init_cpu,
diff --git a/sys/cddl/dev/dtrace/i386/dtrace_subr.c b/sys/cddl/dev/dtrace/i386/dtrace_subr.c
index 412fc38..b52097b 100644
--- a/sys/cddl/dev/dtrace/i386/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/i386/dtrace_subr.c
@@ -413,8 +413,8 @@ dtrace_gethrtime_init(void *arg)
continue;
pc = pcpu_find(i);
- map = PCPU_GET(cpumask);
- CPU_OR(&map, &pc->pc_cpumask);
+ CPU_SETOF(PCPU_GET(cpuid), &map);
+ CPU_SET(pc->pc_cpuid, &map);
smp_rendezvous_cpus(map, NULL,
dtrace_gethrtime_init_cpu,
diff --git a/sys/dev/xen/control/control.c b/sys/dev/xen/control/control.c
index bc59fa0..301d4e8 100644
--- a/sys/dev/xen/control/control.c
+++ b/sys/dev/xen/control/control.c
@@ -197,6 +197,7 @@ extern void xencons_resume(void);
static void
xctrl_suspend()
{
+ u_int cpuid;
int i, j, k, fpp;
unsigned long max_pfn, start_info_mfn;
@@ -210,11 +211,11 @@ xctrl_suspend()
thread_lock(td);
sched_bind(td, 0);
thread_unlock(td);
- KASSERT(PCPU_GET(cpuid) == 0, ("xen_suspend: not running on cpu 0"));
+ cpuid = PCPU_GET(cpuid);
+ KASSERT(cpuid == 0, ("xen_suspend: not running on cpu 0"));
- sched_pin();
- map = PCPU_GET(other_cpus);
- sched_unpin();
+ map = all_cpus;
+ CPU_CLR(cpuid, &map);
CPU_NAND(&map, &stopped_cpus);
if (!CPU_EMPTY(&map))
stop_cpus(map);
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index 78c90c0..0900901 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -658,12 +658,11 @@ cpu_mp_announce(void)
void
init_secondary(void)
{
- cpuset_t tcpuset, tallcpus;
struct pcpu *pc;
vm_offset_t addr;
int gsel_tss;
int x, myid;
- u_int cr0;
+ u_int cpuid, cr0;
/* bootAP is set in start_ap() to our ID. */
myid = bootAP;
@@ -758,8 +757,9 @@ init_secondary(void)
#endif
/* A quick check from sanity claus */
+ cpuid = PCPU_GET(cpuid);
if (PCPU_GET(apic_id) != lapic_id()) {
- printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
+ printf("SMP: cpuid = %d\n", cpuid);
printf("SMP: actual apic_id = %d\n", lapic_id());
printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
panic("cpuid mismatch! boom!!");
@@ -781,19 +781,13 @@ init_secondary(void)
smp_cpus++;
- CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
- printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
- tcpuset = PCPU_GET(cpumask);
+ CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
+ printf("SMP: AP CPU #%d Launched!\n", cpuid);
/* Determine if we are a logical CPU. */
/* XXX Calculation depends on cpu_logical being a power of 2, e.g. 2 */
if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0)
- CPU_OR(&logical_cpus_mask, &tcpuset);
-
- /* Build our map of 'other' CPUs. */
- tallcpus = all_cpus;
- CPU_NAND(&tallcpus, &tcpuset);
- PCPU_SET(other_cpus, tallcpus);
+ CPU_SET(cpuid, &logical_cpus_mask);
if (bootverbose)
lapic_dump("AP");
@@ -934,7 +928,6 @@ assign_cpu_ids(void)
static int
start_all_aps(void)
{
- cpuset_t tallcpus;
#ifndef PC98
u_char mpbiosreason;
#endif
@@ -997,11 +990,6 @@ start_all_aps(void)
CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
}
- /* build our map of 'other' CPUs */
- tallcpus = all_cpus;
- CPU_NAND(&tallcpus, PCPU_PTR(cpumask));
- PCPU_SET(other_cpus, tallcpus);
-
/* restore the warmstart vector */
*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
@@ -1253,9 +1241,7 @@ smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_of
if (othercpus < 1)
return;
} else {
- sched_pin();
- CPU_NAND(&mask, PCPU_PTR(cpumask));
- sched_unpin();
+ CPU_CLR(PCPU_GET(cpuid), &mask);
if (CPU_EMPTY(&mask))
return;
}
@@ -1452,11 +1438,12 @@ ipi_cpu(int cpu, u_int ipi)
void
ipi_all_but_self(u_int ipi)
{
+ cpuset_t other_cpus;
- sched_pin();
+ other_cpus = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &other_cpus);
if (IPI_IS_BITMAPED(ipi)) {
- ipi_selected(PCPU_GET(other_cpus), ipi);
- sched_unpin();
+ ipi_selected(other_cpus, ipi);
return;
}
@@ -1466,8 +1453,7 @@ ipi_all_but_self(u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- CPU_OR_ATOMIC(&ipi_nmi_pending, PCPU_PTR(other_cpus));
- sched_unpin();
+ CPU_OR_ATOMIC(&ipi_nmi_pending, &other_cpus);
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
@@ -1476,7 +1462,7 @@ ipi_all_but_self(u_int ipi)
int
ipi_nmi_handler()
{
- cpuset_t cpumask;
+ u_int cpuid;
/*
* As long as there is not a simple way to know about a NMI's
@@ -1484,13 +1470,11 @@ ipi_nmi_handler()
* the global pending bitword an IPI_STOP_HARD has been issued
* and should be handled.
*/
- sched_pin();
- cpumask = PCPU_GET(cpumask);
- sched_unpin();
- if (!CPU_OVERLAP(&ipi_nmi_pending, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ if (!CPU_ISSET(cpuid, &ipi_nmi_pending))
return (1);
- CPU_NAND_ATOMIC(&ipi_nmi_pending, &cpumask);
+ CPU_CLR_ATOMIC(cpuid, &ipi_nmi_pending);
cpustop_handler();
return (0);
}
@@ -1502,25 +1486,21 @@ ipi_nmi_handler()
void
cpustop_handler(void)
{
- cpuset_t cpumask;
u_int cpu;
- sched_pin();
cpu = PCPU_GET(cpuid);
- cpumask = PCPU_GET(cpumask);
- sched_unpin();
savectx(&stoppcbs[cpu]);
/* Indicate that we are stopped */
- CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
+ CPU_SET_ATOMIC(cpu, &stopped_cpus);
/* Wait for restart */
- while (!CPU_OVERLAP(&started_cpus, &cpumask))
+ while (!CPU_ISSET(cpu, &started_cpus))
ia32_pause();
- CPU_NAND_ATOMIC(&started_cpus, &cpumask);
- CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
+ CPU_CLR_ATOMIC(cpu, &started_cpus);
+ CPU_CLR_ATOMIC(cpu, &stopped_cpus);
if (cpu == 0 && cpustop_restartfunc != NULL) {
cpustop_restartfunc();
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index e63cddd..002e529 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -932,16 +932,18 @@ pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde)
void
pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
{
- cpuset_t cpumask, other_cpus;
+ cpuset_t other_cpus;
+ u_int cpuid;
sched_pin();
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
invlpg(va);
smp_invlpg(va);
} else {
- cpumask = PCPU_GET(cpumask);
- other_cpus = PCPU_GET(other_cpus);
- if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
invlpg(va);
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
@@ -953,8 +955,9 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
void
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
- cpuset_t cpumask, other_cpus;
+ cpuset_t other_cpus;
vm_offset_t addr;
+ u_int cpuid;
sched_pin();
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
@@ -962,9 +965,10 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
invlpg(addr);
smp_invlpg_range(sva, eva);
} else {
- cpumask = PCPU_GET(cpumask);
- other_cpus = PCPU_GET(other_cpus);
- if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
CPU_AND(&other_cpus, &pmap->pm_active);
@@ -977,16 +981,18 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
void
pmap_invalidate_all(pmap_t pmap)
{
- cpuset_t cpumask, other_cpus;
+ cpuset_t other_cpus;
+ u_int cpuid;
sched_pin();
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
invltlb();
smp_invltlb();
} else {
- cpumask = PCPU_GET(cpumask);
- other_cpus = PCPU_GET(other_cpus);
- if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
invltlb();
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
@@ -1006,11 +1012,11 @@ pmap_invalidate_cache(void)
}
struct pde_action {
- cpuset_t store; /* processor that updates the PDE */
cpuset_t invalidate; /* processors that invalidate their TLB */
vm_offset_t va;
pd_entry_t *pde;
pd_entry_t newpde;
+ u_int store; /* processor that updates the PDE */
};
static void
@@ -1020,9 +1026,7 @@ pmap_update_pde_kernel(void *arg)
pd_entry_t *pde;
pmap_t pmap;
- sched_pin();
- if (!CPU_CMP(&act->store, PCPU_PTR(cpumask))) {
- sched_unpin();
+ if (act->store == PCPU_GET(cpuid)) {
/*
* Elsewhere, this operation requires allpmaps_lock for
@@ -1033,8 +1037,7 @@ pmap_update_pde_kernel(void *arg)
pde = pmap_pde(pmap, act->va);
pde_store(pde, act->newpde);
}
- } else
- sched_unpin();
+ }
}
static void
@@ -1042,12 +1045,8 @@ pmap_update_pde_user(void *arg)
{
struct pde_action *act = arg;
- sched_pin();
- if (!CPU_CMP(&act->store, PCPU_PTR(cpumask))) {
- sched_unpin();
+ if (act->store == PCPU_GET(cpuid))
pde_store(act->pde, act->newpde);
- } else
- sched_unpin();
}
static void
@@ -1055,12 +1054,8 @@ pmap_update_pde_teardown(void *arg)
{
struct pde_action *act = arg;
- sched_pin();
- if (CPU_OVERLAP(&act->invalidate, PCPU_PTR(cpumask))) {
- sched_unpin();
+ if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
pmap_update_pde_invalidate(act->va, act->newpde);
- } else
- sched_unpin();
}
/*
@@ -1075,23 +1070,25 @@ static void
pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
{
struct pde_action act;
- cpuset_t active, cpumask, other_cpus;
+ cpuset_t active, other_cpus;
+ u_int cpuid;
sched_pin();
- cpumask = PCPU_GET(cpumask);
- other_cpus = PCPU_GET(other_cpus);
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
if (pmap == kernel_pmap)
active = all_cpus;
else
active = pmap->pm_active;
if (CPU_OVERLAP(&active, &other_cpus)) {
- act.store = cpumask;
+ act.store = cpuid;
act.invalidate = active;
act.va = va;
act.pde = pde;
act.newpde = newpde;
- CPU_OR(&cpumask, &active);
- smp_rendezvous_cpus(cpumask,
+ CPU_SET(cpuid, &active);
+ smp_rendezvous_cpus(active,
smp_no_rendevous_barrier, pmap == kernel_pmap ?
pmap_update_pde_kernel : pmap_update_pde_user,
pmap_update_pde_teardown, &act);
@@ -1100,7 +1097,7 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
pmap_kenter_pde(va, newpde);
else
pde_store(pde, newpde);
- if (CPU_OVERLAP(&active, &cpumask))
+ if (CPU_ISSET(cpuid, &active))
pmap_update_pde_invalidate(va, newpde);
}
sched_unpin();
@@ -1925,12 +1922,12 @@ pmap_lazyfix_action(void)
}
static void
-pmap_lazyfix_self(cpuset_t mymask)
+pmap_lazyfix_self(u_int cpuid)
{
if (rcr3() == lazyptd)
load_cr3(PCPU_GET(curpcb)->pcb_cr3);
- CPU_NAND_ATOMIC(lazymask, &mymask);
+ CPU_CLR_ATOMIC(cpuid, lazymask);
}
@@ -1938,7 +1935,7 @@ static void
pmap_lazyfix(pmap_t pmap)
{
cpuset_t mymask, mask;
- u_int spins;
+ u_int cpuid, spins;
int lsb;
mask = pmap->pm_active;
@@ -1956,10 +1953,13 @@ pmap_lazyfix(pmap_t pmap)
#else
lazyptd = vtophys(pmap->pm_pdir);
#endif
- mymask = PCPU_GET(cpumask);
+ cpuid = PCPU_GET(cpuid);
+
+ /* Use a cpuset just for having an easy check. */
+ CPU_SETOF(cpuid, &mymask);
if (!CPU_CMP(&mask, &mymask)) {
lazymask = &pmap->pm_active;
- pmap_lazyfix_self(mymask);
+ pmap_lazyfix_self(cpuid);
} else {
atomic_store_rel_int((u_int *)&lazymask,
(u_int)&pmap->pm_active);
@@ -5098,17 +5098,19 @@ void
pmap_activate(struct thread *td)
{
pmap_t pmap, oldpmap;
+ u_int cpuid;
u_int32_t cr3;
critical_enter();
pmap = vmspace_pmap(td->td_proc->p_vmspace);
oldpmap = PCPU_GET(curpmap);
+ cpuid = PCPU_GET(cpuid);
#if defined(SMP)
- CPU_NAND_ATOMIC(&oldpmap->pm_active, PCPU_PTR(cpumask));
- CPU_OR_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask));
+ CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
+ CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
#else
- CPU_NAND(&oldpmap->pm_active, PCPU_PTR(cpumask));
- CPU_OR(&pmap->pm_active, PCPU_PTR(cpumask));
+ CPU_CLR(cpuid, &oldpmap->pm_active);
+ CPU_SET(cpuid, &pmap->pm_active);
#endif
#ifdef PAE
cr3 = vtophys(pmap->pm_pdpt);
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index a084e09..0a7bc21 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -602,8 +602,8 @@ cpu_reset()
u_int cnt;
if (smp_active) {
- sched_pin();
- map = PCPU_GET(other_cpus);
+ map = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &map);
CPU_NAND(&map, &stopped_cpus);
if (!CPU_EMPTY(&map)) {
printf("cpu_reset: Stopping other CPUs\n");
@@ -612,7 +612,6 @@ cpu_reset()
if (PCPU_GET(cpuid) != 0) {
cpu_reset_proxyid = PCPU_GET(cpuid);
- sched_unpin();
cpustop_restartfunc = cpu_reset_proxy;
cpu_reset_proxy_active = 0;
printf("cpu_reset: Restarting BSP\n");
@@ -632,8 +631,7 @@ cpu_reset()
while (1);
/* NOTREACHED */
- } else
- sched_unpin();
+ }
DELAY(1000000);
}
@@ -802,7 +800,8 @@ sf_buf_alloc(struct vm_page *m, int flags)
struct sf_head *hash_list;
struct sf_buf *sf;
#ifdef SMP
- cpuset_t cpumask, other_cpus;
+ cpuset_t other_cpus;
+ u_int cpuid;
#endif
int error;
@@ -877,13 +876,14 @@ sf_buf_alloc(struct vm_page *m, int flags)
CPU_ZERO(&sf->cpumask);
shootdown:
sched_pin();
- cpumask = PCPU_GET(cpumask);
- if (!CPU_OVERLAP(&cpumask, &sf->cpumask)) {
- CPU_OR(&sf->cpumask, &cpumask);
+ cpuid = PCPU_GET(cpuid);
+ if (!CPU_ISSET(cpuid, &sf->cpumask)) {
+ CPU_SET(cpuid, &sf->cpumask);
invlpg(sf->kva);
}
if ((flags & SFB_CPUPRIVATE) == 0) {
- other_cpus = PCPU_GET(other_cpus);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
CPU_NAND(&other_cpus, &sf->cpumask);
if (!CPU_EMPTY(&other_cpus)) {
CPU_OR(&sf->cpumask, &other_cpus);
diff --git a/sys/i386/xen/mp_machdep.c b/sys/i386/xen/mp_machdep.c
index 2d05596..a0ef1e8 100644
--- a/sys/i386/xen/mp_machdep.c
+++ b/sys/i386/xen/mp_machdep.c
@@ -523,8 +523,8 @@ xen_smp_intr_init_cpus(void *unused)
void
init_secondary(void)
{
- cpuset_t tcpuset, tallcpus;
vm_offset_t addr;
+ u_int cpuid;
int gsel_tss;
@@ -601,23 +601,18 @@ init_secondary(void)
#endif
smp_cpus++;
- CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
- printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
- tcpuset = PCPU_GET(cpumask);
+ cpuid = PCPU_GET(cpuid);
+ CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
+ printf("SMP: AP CPU #%d Launched!\n", cpuid);
/* Determine if we are a logical CPU. */
if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
- CPU_OR(&logical_cpus_mask, &tcpuset);
+ CPU_SET(cpuid, &logical_cpus_mask);
/* Determine if we are a hyperthread. */
if (hyperthreading_cpus > 1 &&
PCPU_GET(apic_id) % hyperthreading_cpus != 0)
- CPU_OR(&hyperthreading_cpus_mask, &tcpuset);
-
- /* Build our map of 'other' CPUs. */
- tallcpus = all_cpus;
- CPU_NAND(&tallcpus, &tcpuset);
- PCPU_SET(other_cpus, tallcpus);
+ CPU_SET(cpuid, &hyperthreading_cpus_mask);
#if 0
if (bootverbose)
lapic_dump("AP");
@@ -731,7 +726,6 @@ assign_cpu_ids(void)
int
start_all_aps(void)
{
- cpuset_t tallcpus;
int x,apic_id, cpu;
struct pcpu *pc;
@@ -789,11 +783,6 @@ start_all_aps(void)
}
- /* build our map of 'other' CPUs */
- tallcpus = all_cpus;
- CPU_NAND(&tallcpus, PCPU_PTR(cpumask));
- PCPU_SET(other_cpus, tallcpus);
-
pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1);
/* number of APs actually started */
@@ -1031,9 +1020,7 @@ smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_of
if (othercpus < 1)
return;
} else {
- critical_enter();
- CPU_NAND(&mask, PCPU_PTR(cpumask));
- critical_exit();
+ CPU_CLR(PCPU_GET(cpuid), &mask);
if (CPU_EMPTY(&mask))
return;
}
@@ -1184,9 +1171,8 @@ ipi_all_but_self(u_int ipi)
* of help in order to understand what is the source.
* Set the mask of receiving CPUs for this purpose.
*/
- sched_pin();
- other_cpus = PCPU_GET(other_cpus);
- sched_unpin();
+ other_cpus = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &other_cpus);
if (ipi == IPI_STOP_HARD)
CPU_OR_ATOMIC(&ipi_nmi_pending, &other_cpus);
@@ -1197,7 +1183,7 @@ ipi_all_but_self(u_int ipi)
int
ipi_nmi_handler()
{
- cpuset_t cpumask;
+ u_int cpuid;
/*
* As long as there is not a simple way to know about a NMI's
@@ -1205,13 +1191,11 @@ ipi_nmi_handler()
* the global pending bitword an IPI_STOP_HARD has been issued
* and should be handled.
*/
- sched_pin();
- cpumask = PCPU_GET(cpumask);
- sched_unpin();
- if (!CPU_OVERLAP(&ipi_nmi_pending, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ if (!CPU_ISSET(cpuid, &ipi_nmi_pending))
return (1);
- CPU_NAND_ATOMIC(&ipi_nmi_pending, &cpumask);
+ CPU_CLR_ATOMIC(cpuid, &ipi_nmi_pending);
cpustop_handler();
return (0);
}
@@ -1223,25 +1207,21 @@ ipi_nmi_handler()
void
cpustop_handler(void)
{
- cpuset_t cpumask;
int cpu;
- sched_pin();
- cpumask = PCPU_GET(cpumask);
cpu = PCPU_GET(cpuid);
- sched_unpin();
savectx(&stoppcbs[cpu]);
/* Indicate that we are stopped */
- CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
+ CPU_SET_ATOMIC(cpu, &stopped_cpus);
/* Wait for restart */
- while (!CPU_OVERLAP(&started_cpus, &cpumask))
+ while (!CPU_ISSET(cpu, &started_cpus))
ia32_pause();
- CPU_NAND_ATOMIC(&started_cpus, &cpumask);
- CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
+ CPU_CLR_ATOMIC(cpu, &started_cpus);
+ CPU_CLR_ATOMIC(cpu, &stopped_cpus);
if (cpu == 0 && cpustop_restartfunc != NULL) {
cpustop_restartfunc();
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index 87bc0a0..b5e71da 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -802,7 +802,8 @@ pmap_cache_bits(int mode, boolean_t is_pde)
void
pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
{
- cpuset_t cpumask, other_cpus;
+ cpuset_t other_cpus;
+ u_int cpuid;
CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x",
pmap, va);
@@ -812,9 +813,10 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
invlpg(va);
smp_invlpg(va);
} else {
- cpumask = PCPU_GET(cpumask);
- other_cpus = PCPU_GET(other_cpus);
- if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
invlpg(va);
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
@@ -827,8 +829,9 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
void
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
- cpuset_t cpumask, other_cpus;
+ cpuset_t other_cpus;
vm_offset_t addr;
+ u_int cpuid;
CTR3(KTR_PMAP, "pmap_invalidate_page: pmap=%p eva=0x%x sva=0x%x",
pmap, sva, eva);
@@ -839,9 +842,10 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
invlpg(addr);
smp_invlpg_range(sva, eva);
} else {
- cpumask = PCPU_GET(cpumask);
- other_cpus = PCPU_GET(other_cpus);
- if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
CPU_AND(&other_cpus, &pmap->pm_active);
@@ -855,7 +859,8 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
void
pmap_invalidate_all(pmap_t pmap)
{
- cpuset_t cpumask, other_cpus;
+ cpuset_t other_cpus;
+ u_int cpuid;
CTR1(KTR_PMAP, "pmap_invalidate_page: pmap=%p", pmap);
@@ -864,9 +869,10 @@ pmap_invalidate_all(pmap_t pmap)
invltlb();
smp_invltlb();
} else {
- cpumask = PCPU_GET(cpumask);
- other_cpus = PCPU_GET(other_cpus);
- if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
invltlb();
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
@@ -1708,12 +1714,12 @@ pmap_lazyfix_action(void)
}
static void
-pmap_lazyfix_self(cpuset_t mymask)
+pmap_lazyfix_self(u_int cpuid)
{
if (rcr3() == lazyptd)
load_cr3(PCPU_GET(curpcb)->pcb_cr3);
- CPU_NAND_ATOMIC(lazymask, &mymask);
+ CPU_CLR_ATOMIC(cpuid, lazymask);
}
@@ -1721,7 +1727,7 @@ static void
pmap_lazyfix(pmap_t pmap)
{
cpuset_t mymask, mask;
- u_int spins;
+ u_int cpuid, spins;
int lsb;
mask = pmap->pm_active;
@@ -1739,10 +1745,13 @@ pmap_lazyfix(pmap_t pmap)
#else
lazyptd = vtophys(pmap->pm_pdir);
#endif
- mymask = PCPU_GET(cpumask);
+ cpuid = PCPU_GET(cpuid);
+
+ /* Use a cpuset just for having an easy check. */
+ CPU_SETOF(cpuid, &mymask);
if (!CPU_CMP(&mask, &mymask)) {
lazymask = &pmap->pm_active;
- pmap_lazyfix_self(mymask);
+ pmap_lazyfix_self(cpuid);
} else {
atomic_store_rel_int((u_int *)&lazymask,
(u_int)&pmap->pm_active);
@@ -4126,17 +4135,19 @@ void
pmap_activate(struct thread *td)
{
pmap_t pmap, oldpmap;
+ u_int cpuid;
u_int32_t cr3;
critical_enter();
pmap = vmspace_pmap(td->td_proc->p_vmspace);
oldpmap = PCPU_GET(curpmap);
+ cpuid = PCPU_GET(cpuid);
#if defined(SMP)
- CPU_NAND_ATOMIC(&oldpmap->pm_active, PCPU_PTR(cpumask));
- CPU_OR_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask));
+ CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
+ CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
#else
- CPU_NAND(&oldpmap->pm_active, PCPU_PTR(cpumask));
- CPU_OR(&pmap->pm_active, PCPU_PTR(cpumask));
+ CPU_CLR(cpuid, &oldpmap->pm_active);
+ CPU_SET(cpuid, &pmap->pm_active);
#endif
#ifdef PAE
cr3 = vtophys(pmap->pm_pdpt);
diff --git a/sys/ia64/ia64/mp_machdep.c b/sys/ia64/ia64/mp_machdep.c
index 59b14d0..0d8f241 100644
--- a/sys/ia64/ia64/mp_machdep.c
+++ b/sys/ia64/ia64/mp_machdep.c
@@ -150,18 +150,18 @@ ia64_ih_rndzvs(struct thread *td, u_int xiv, struct trapframe *tf)
static u_int
ia64_ih_stop(struct thread *td, u_int xiv, struct trapframe *tf)
{
- cpuset_t mybit;
+ u_int cpuid;
PCPU_INC(md.stats.pcs_nstops);
- mybit = PCPU_GET(cpumask);
+ cpuid = PCPU_GET(cpuid);
savectx(PCPU_PTR(md.pcb));
- CPU_OR_ATOMIC(&stopped_cpus, &mybit);
- while (!CPU_OVERLAP(&started_cpus, &mybit))
+ CPU_SET_ATOMIC(cpuid, &stopped_cpus);
+ while (!CPU_ISSET(cpuid, &started_cpus))
cpu_spinwait();
- CPU_NAND_ATOMIC(&started_cpus, &mybit);
- CPU_NAND_ATOMIC(&stopped_cpus, &mybit);
+ CPU_CLR_ATOMIC(cpuid, &started_cpus);
+ CPU_CLR_ATOMIC(cpuid, &stopped_cpus);
return (0);
}
@@ -371,8 +371,6 @@ cpu_mp_start()
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
pc->pc_md.current_pmap = kernel_pmap;
- pc->pc_other_cpus = all_cpus;
- CPU_NAND(&pc->pc_other_cpus, &pc->pc_cpumask);
/* The BSP is obviously running already. */
if (pc->pc_cpuid == 0) {
pc->pc_md.awake = 1;
@@ -478,7 +476,7 @@ ipi_selected(cpuset_t cpus, int ipi)
struct pcpu *pc;
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
- if (CPU_OVERLAP(&cpus, &pc->pc_cpumask))
+ if (CPU_ISSET(pc->pc_cpuid, &cpus))
ipi_send(pc, ipi);
}
}
diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
index 3214e1b..1c7337d 100644
--- a/sys/kern/kern_rmlock.c
+++ b/sys/kern/kern_rmlock.c
@@ -263,7 +263,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
pc = pcpu_find(curcpu);
/* Check if we just need to do a proper critical_exit. */
- if (!CPU_OVERLAP(&pc->pc_cpumask, &rm->rm_writecpus)) {
+ if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
critical_exit();
return (1);
}
@@ -325,7 +325,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
critical_enter();
pc = pcpu_find(curcpu);
- CPU_NAND(&rm->rm_writecpus, &pc->pc_cpumask);
+ CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
rm_tracker_add(pc, tracker);
sched_pin();
critical_exit();
@@ -367,7 +367,7 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
* conditional jump.
*/
if (0 == (td->td_owepreempt |
- CPU_OVERLAP(&rm->rm_writecpus, &pc->pc_cpumask)))
+ CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
return (1);
/* We do not have a read token and need to acquire one. */
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 592bb80..574755f0 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -951,8 +951,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
if (td->td_flags & TDF_IDLETD) {
TD_SET_CAN_RUN(td);
#ifdef SMP
- /* Spinlock held here, assume no migration. */
- CPU_NAND(&idle_cpus_mask, PCPU_PTR(cpumask));
+ CPU_CLR(PCPU_GET(cpuid), &idle_cpus_mask);
#endif
} else {
if (TD_IS_RUNNING(td)) {
@@ -1026,7 +1025,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
#ifdef SMP
if (td->td_flags & TDF_IDLETD)
- CPU_OR(&idle_cpus_mask, PCPU_PTR(cpumask));
+ CPU_SET(PCPU_GET(cpuid), &idle_cpus_mask);
#endif
sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);
@@ -1055,7 +1054,8 @@ static int
forward_wakeup(int cpunum)
{
struct pcpu *pc;
- cpuset_t dontuse, id, map, map2, me;
+ cpuset_t dontuse, map, map2;
+ u_int id, me;
int iscpuset;
mtx_assert(&sched_lock, MA_OWNED);
@@ -1073,27 +1073,24 @@ forward_wakeup(int cpunum)
/*
* Check the idle mask we received against what we calculated
* before in the old version.
- *
- * Also note that sched_lock is held now, thus no migration is
- * expected.
*/
- me = PCPU_GET(cpumask);
+ me = PCPU_GET(cpuid);
/* Don't bother if we should be doing it ourself. */
- if (CPU_OVERLAP(&me, &idle_cpus_mask) &&
- (cpunum == NOCPU || CPU_ISSET(cpunum, &me)))
+ if (CPU_ISSET(me, &idle_cpus_mask) &&
+ (cpunum == NOCPU || me == cpunum))
return (0);
- dontuse = me;
+ CPU_SETOF(me, &dontuse);
CPU_OR(&dontuse, &stopped_cpus);
CPU_OR(&dontuse, &hlt_cpus_mask);
CPU_ZERO(&map2);
if (forward_wakeup_use_loop) {
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
- id = pc->pc_cpumask;
- if (!CPU_OVERLAP(&id, &dontuse) &&
+ id = pc->pc_cpuid;
+ if (!CPU_ISSET(id, &dontuse) &&
pc->pc_curthread == pc->pc_idlethread) {
- CPU_OR(&map2, &id);
+ CPU_SET(id, &map2);
}
}
}
@@ -1125,11 +1122,11 @@ forward_wakeup(int cpunum)
if (!CPU_EMPTY(&map)) {
forward_wakeups_delivered++;
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
- id = pc->pc_cpumask;
- if (!CPU_OVERLAP(&map, &id))
+ id = pc->pc_cpuid;
+ if (!CPU_ISSET(id, &map))
continue;
if (cpu_idle_wakeup(pc->pc_cpuid))
- CPU_NAND(&map, &id);
+ CPU_CLR(id, &map);
}
if (!CPU_EMPTY(&map))
ipi_selected(map, IPI_AST);
@@ -1147,7 +1144,7 @@ kick_other_cpu(int pri, int cpuid)
int cpri;
pcpu = pcpu_find(cpuid);
- if (CPU_OVERLAP(&idle_cpus_mask, &pcpu->pc_cpumask)) {
+ if (CPU_ISSET(cpuid, &idle_cpus_mask)) {
forward_wakeups_delivered++;
if (!cpu_idle_wakeup(cpuid))
ipi_cpu(cpuid, IPI_AST);
@@ -1205,10 +1202,10 @@ void
sched_add(struct thread *td, int flags)
#ifdef SMP
{
- cpuset_t idle, me, tidlemsk;
+ cpuset_t tidlemsk;
struct td_sched *ts;
+ u_int cpu, cpuid;
int forwarded = 0;
- int cpu;
int single_cpu = 0;
ts = td->td_sched;
@@ -1271,23 +1268,17 @@ sched_add(struct thread *td, int flags)
ts->ts_runq = &runq;
}
- if (single_cpu && (cpu != PCPU_GET(cpuid))) {
+ cpuid = PCPU_GET(cpuid);
+ if (single_cpu && cpu != cpuid) {
kick_other_cpu(td->td_priority, cpu);
} else {
if (!single_cpu) {
+ tidlemsk = idle_cpus_mask;
+ CPU_NAND(&tidlemsk, &hlt_cpus_mask);
+ CPU_CLR(cpuid, &tidlemsk);
- /*
- * Thread spinlock is held here, assume no
- * migration is possible.
- */
- me = PCPU_GET(cpumask);
- idle = idle_cpus_mask;
- tidlemsk = idle;
- CPU_AND(&idle, &me);
- CPU_OR(&me, &hlt_cpus_mask);
- CPU_NAND(&tidlemsk, &me);
-
- if (CPU_EMPTY(&idle) && ((flags & SRQ_INTR) == 0) &&
+ if (!CPU_ISSET(cpuid, &idle_cpus_mask) &&
+ ((flags & SRQ_INTR) == 0) &&
!CPU_EMPTY(&tidlemsk))
forwarded = forward_wakeup(cpu);
}
diff --git a/sys/kern/subr_kdb.c b/sys/kern/subr_kdb.c
index 2bc5ab2..f5cb31e 100644
--- a/sys/kern/subr_kdb.c
+++ b/sys/kern/subr_kdb.c
@@ -197,9 +197,12 @@ kdb_sysctl_trap_code(SYSCTL_HANDLER_ARGS)
void
kdb_panic(const char *msg)
{
-
#ifdef SMP
- stop_cpus_hard(PCPU_GET(other_cpus));
+ cpuset_t other_cpus;
+
+ other_cpus = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &other_cpus);
+ stop_cpus_hard(other_cpus);
#endif
printf("KDB: panic\n");
panic("%s", msg);
@@ -415,7 +418,7 @@ kdb_thr_ctx(struct thread *thr)
#if defined(SMP) && defined(KDB_STOPPEDPCB)
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
if (pc->pc_curthread == thr &&
- CPU_OVERLAP(&stopped_cpus, &pc->pc_cpumask))
+ CPU_ISSET(pc->pc_cpuid, &stopped_cpus))
return (KDB_STOPPEDPCB(pc));
}
#endif
@@ -499,6 +502,9 @@ kdb_thr_select(struct thread *thr)
int
kdb_trap(int type, int code, struct trapframe *tf)
{
+#ifdef SMP
+ cpuset_t other_cpus;
+#endif
struct kdb_dbbe *be;
register_t intr;
int handled;
@@ -514,7 +520,9 @@ kdb_trap(int type, int code, struct trapframe *tf)
intr = intr_disable();
#ifdef SMP
- stop_cpus_hard(PCPU_GET(other_cpus));
+ other_cpus = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &other_cpus);
+ stop_cpus_hard(other_cpus);
#endif
kdb_active++;
diff --git a/sys/kern/subr_pcpu.c b/sys/kern/subr_pcpu.c
index a6b3ae0..ec6b590 100644
--- a/sys/kern/subr_pcpu.c
+++ b/sys/kern/subr_pcpu.c
@@ -87,7 +87,6 @@ pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
KASSERT(cpuid >= 0 && cpuid < MAXCPU,
("pcpu_init: invalid cpuid %d", cpuid));
pcpu->pc_cpuid = cpuid;
- CPU_SETOF(cpuid, &pcpu->pc_cpumask);
cpuid_to_pcpu[cpuid] = pcpu;
STAILQ_INSERT_TAIL(&cpuhead, pcpu, pc_allcpu);
cpu_pcpu_init(pcpu, cpuid, size);
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index 0929ab9..caec965 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -142,7 +142,7 @@ mp_start(void *dummy)
/* Probe for MP hardware. */
if (smp_disabled != 0 || cpu_mp_probe() == 0) {
mp_ncpus = 1;
- all_cpus = PCPU_GET(cpumask);
+ CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
return;
}
@@ -706,7 +706,7 @@ mp_setvariables_for_up(void *dummy)
{
mp_ncpus = 1;
mp_maxid = PCPU_GET(cpuid);
- all_cpus = PCPU_GET(cpumask);
+ CPU_SETOF(mp_maxid, &all_cpus);
KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
}
SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
diff --git a/sys/mips/mips/mp_machdep.c b/sys/mips/mips/mp_machdep.c
index 79a3476..2298124 100644
--- a/sys/mips/mips/mp_machdep.c
+++ b/sys/mips/mips/mp_machdep.c
@@ -75,8 +75,11 @@ ipi_send(struct pcpu *pc, int ipi)
void
ipi_all_but_self(int ipi)
{
+ cpuset_t other_cpus;
- ipi_selected(PCPU_GET(other_cpus), ipi);
+ other_cpus = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &other_cpus);
+ ipi_selected(other_cpus, ipi);
}
/* Send an IPI to a set of cpus. */
@@ -86,7 +89,7 @@ ipi_selected(cpuset_t cpus, int ipi)
struct pcpu *pc;
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
- if (CPU_OVERLAP(&cpus, &pc->pc_cpumask)) {
+ if (CPU_ISSET(pc->pc_cpuid, &cpus)) {
CTR3(KTR_SMP, "%s: pc: %p, ipi: %x\n", __func__, pc,
ipi);
ipi_send(pc, ipi);
@@ -109,13 +112,10 @@ ipi_cpu(int cpu, u_int ipi)
static int
mips_ipi_handler(void *arg)
{
- int cpu;
- cpuset_t cpumask;
- u_int ipi, ipi_bitmap;
+ u_int cpu, ipi, ipi_bitmap;
int bit;
cpu = PCPU_GET(cpuid);
- cpumask = PCPU_GET(cpumask);
platform_ipi_clear(); /* quiesce the pending ipi interrupt */
@@ -150,14 +150,14 @@ mips_ipi_handler(void *arg)
tlb_save();
/* Indicate we are stopped */
- CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
+ CPU_SET_ATOMIC(cpu, &stopped_cpus);
/* Wait for restart */
- while (!CPU_OVERLAP(&started_cpus, &cpumask))
+ while (!CPU_ISSET(cpu, &started_cpus))
cpu_spinwait();
- CPU_NAND_ATOMIC(&started_cpus, &cpumask);
- CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
+ CPU_CLR_ATOMIC(cpu, &started_cpus);
+ CPU_CLR_ATOMIC(cpu, &stopped_cpus);
CTR0(KTR_SMP, "IPI_STOP (restart)");
break;
case IPI_PREEMPT:
@@ -243,7 +243,7 @@ void
cpu_mp_start(void)
{
int error, cpuid;
- cpuset_t cpumask, ocpus;
+ cpuset_t cpumask;
mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
@@ -269,16 +269,11 @@ cpu_mp_start(void)
}
CPU_SET(cpuid, &all_cpus);
}
-
- ocpus = all_cpus;
- CPU_CLR(PCPU_GET(cpuid), &ocpus);
- PCPU_SET(other_cpus, ocpus);
}
void
smp_init_secondary(u_int32_t cpuid)
{
- cpuset_t ocpus;
/* TLB */
mips_wr_wired(0);
@@ -316,11 +311,6 @@ smp_init_secondary(u_int32_t cpuid)
CTR1(KTR_SMP, "SMP: AP CPU #%d launched", PCPU_GET(cpuid));
- /* Build our map of 'other' CPUs. */
- ocpus = all_cpus;
- CPU_CLR(PCPU_GET(cpuid), &ocpus);
- PCPU_SET(other_cpus, ocpus);
-
if (bootverbose)
printf("SMP: AP CPU #%d launched.\n", PCPU_GET(cpuid));
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index f05bffa..4d46099 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -625,19 +625,18 @@ pmap_init(void)
static __inline void
pmap_invalidate_all_local(pmap_t pmap)
{
+ u_int cpuid;
+
+ cpuid = PCPU_GET(cpuid);
if (pmap == kernel_pmap) {
tlb_invalidate_all();
return;
}
- sched_pin();
- if (CPU_OVERLAP(&pmap->pm_active, PCPU_PTR(cpumask))) {
- sched_unpin();
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
tlb_invalidate_all_user(pmap);
- } else {
- sched_unpin();
- pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
- }
+ else
+ pmap->pm_asid[cpuid].gen = 0;
}
#ifdef SMP
@@ -666,21 +665,20 @@ pmap_invalidate_all(pmap_t pmap)
static __inline void
pmap_invalidate_page_local(pmap_t pmap, vm_offset_t va)
{
+ u_int cpuid;
+
+ cpuid = PCPU_GET(cpuid);
if (is_kernel_pmap(pmap)) {
tlb_invalidate_address(pmap, va);
return;
}
- sched_pin();
- if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation)) {
- sched_unpin();
+ if (pmap->pm_asid[cpuid].gen != PCPU_GET(asid_generation))
return;
- } else if (!CPU_OVERLAP(&pmap->pm_active, PCPU_PTR(cpumask))) {
- pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
- sched_unpin();
+ else if (!CPU_ISSET(cpuid, &pmap->pm_active)) {
+ pmap->pm_asid[cpuid].gen = 0;
return;
}
- sched_unpin();
tlb_invalidate_address(pmap, va);
}
@@ -719,21 +717,20 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
static __inline void
pmap_update_page_local(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
{
+ u_int cpuid;
+
+ cpuid = PCPU_GET(cpuid);
if (is_kernel_pmap(pmap)) {
tlb_update(pmap, va, pte);
return;
}
- sched_pin();
- if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation)) {
- sched_unpin();
+ if (pmap->pm_asid[cpuid].gen != PCPU_GET(asid_generation))
return;
- } else if (!CPU_OVERLAP(&pmap->pm_active, PCPU_PTR(cpumask))) {
- pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
- sched_unpin();
+ else if (!CPU_ISSET(cpuid, &pmap->pm_active)) {
+ pmap->pm_asid[cpuid].gen = 0;
return;
}
- sched_unpin();
tlb_update(pmap, va, pte);
}
@@ -2953,19 +2950,21 @@ pmap_activate(struct thread *td)
{
pmap_t pmap, oldpmap;
struct proc *p = td->td_proc;
+ u_int cpuid;
critical_enter();
pmap = vmspace_pmap(p->p_vmspace);
oldpmap = PCPU_GET(curpmap);
+ cpuid = PCPU_GET(cpuid);
if (oldpmap)
- CPU_NAND_ATOMIC(&oldpmap->pm_active, PCPU_PTR(cpumask));
- CPU_OR_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask));
+ CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
+ CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
pmap_asid_alloc(pmap);
if (td == curthread) {
PCPU_SET(segbase, pmap->pm_segtab);
- mips_wr_entryhi(pmap->pm_asid[PCPU_GET(cpuid)].asid);
+ mips_wr_entryhi(pmap->pm_asid[cpuid].asid);
}
PCPU_SET(curpmap, pmap);
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index be80455..23354f9 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -945,9 +945,7 @@ moea_activate(mmu_t mmu, struct thread *td)
pm = &td->td_proc->p_vmspace->vm_pmap;
pmr = pm->pmap_phys;
- sched_pin();
- CPU_OR(&pm->pm_active, PCPU_PTR(cpumask));
- sched_unpin();
+ CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
PCPU_SET(curpmap, pmr);
}
@@ -957,9 +955,7 @@ moea_deactivate(mmu_t mmu, struct thread *td)
pmap_t pm;
pm = &td->td_proc->p_vmspace->vm_pmap;
- sched_pin();
- CPU_NAND(&pm->pm_active, PCPU_PTR(cpumask));
- sched_unpin();
+ CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
PCPU_SET(curpmap, NULL);
}
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 8f4028f..f051b61 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -998,9 +998,7 @@ moea64_activate(mmu_t mmu, struct thread *td)
pmap_t pm;
pm = &td->td_proc->p_vmspace->vm_pmap;
- sched_pin();
- CPU_OR(&pm->pm_active, PCPU_PTR(cpumask));
- sched_unpin();
+ CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
#ifdef __powerpc64__
PCPU_SET(userslb, pm->pm_slb);
@@ -1015,9 +1013,7 @@ moea64_deactivate(mmu_t mmu, struct thread *td)
pmap_t pm;
pm = &td->td_proc->p_vmspace->vm_pmap;
- sched_pin();
- CPU_NAND(&pm->pm_active, PCPU_PTR(cpumask));
- sched_unpin();
+ CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
#ifdef __powerpc64__
PCPU_SET(userslb, NULL);
#else
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index e1cd071..18068fc 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -1826,6 +1826,7 @@ static void
mmu_booke_activate(mmu_t mmu, struct thread *td)
{
pmap_t pmap;
+ u_int cpuid;
pmap = &td->td_proc->p_vmspace->vm_pmap;
@@ -1836,14 +1837,15 @@ mmu_booke_activate(mmu_t mmu, struct thread *td)
mtx_lock_spin(&sched_lock);
- CPU_OR_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask));
+ cpuid = PCPU_GET(cpuid);
+ CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
PCPU_SET(curpmap, pmap);
- if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE)
+ if (pmap->pm_tid[cpuid] == TID_NONE)
tid_alloc(pmap);
/* Load PID0 register with pmap tid value. */
- mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]);
+ mtspr(SPR_PID0, pmap->pm_tid[cpuid]);
__asm __volatile("isync");
mtx_unlock_spin(&sched_lock);
@@ -1865,9 +1867,7 @@ mmu_booke_deactivate(mmu_t mmu, struct thread *td)
CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
__func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
- sched_pin();
- CPU_NAND_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask));
- sched_unpin();
+ CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active);
PCPU_SET(curpmap, NULL);
}
diff --git a/sys/powerpc/powerpc/mp_machdep.c b/sys/powerpc/powerpc/mp_machdep.c
index 3563376..db20a6f 100644
--- a/sys/powerpc/powerpc/mp_machdep.c
+++ b/sys/powerpc/powerpc/mp_machdep.c
@@ -171,9 +171,8 @@ cpu_mp_start(void)
pc->pc_cpuid = bsp.cr_cpuid;
pc->pc_bsp = 1;
}
- CPU_SETOF(pc->pc_cpuid, &pc->pc_cpumask);
pc->pc_hwref = cpu.cr_hwref;
- CPU_OR(&all_cpus, &pc->pc_cpumask);
+ CPU_SET(pc->pc_cpuid, &all_cpus);
next:
error = platform_smp_next_cpu(&cpu);
}
@@ -211,8 +210,6 @@ cpu_mp_unleash(void *dummy)
smp_cpus = 0;
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
cpus++;
- pc->pc_other_cpus = all_cpus;
- CPU_NAND(&pc->pc_other_cpus, &pc->pc_cpumask);
if (!pc->pc_bsp) {
if (bootverbose)
printf("Waking up CPU %d (dev=%x)\n",
@@ -274,7 +271,7 @@ SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
int
powerpc_ipi_handler(void *arg)
{
- cpuset_t self;
+ u_int cpuid;
uint32_t ipimask;
int msg;
@@ -306,14 +303,14 @@ powerpc_ipi_handler(void *arg)
*/
CTR1(KTR_SMP, "%s: IPI_STOP or IPI_STOP_HARD (stop)",
__func__);
- savectx(&stoppcbs[PCPU_GET(cpuid)]);
- self = PCPU_GET(cpumask);
+ cpuid = PCPU_GET(cpuid);
+ savectx(&stoppcbs[cpuid]);
savectx(PCPU_GET(curpcb));
- CPU_OR_ATOMIC(&stopped_cpus, &self);
- while (!CPU_OVERLAP(&started_cpus, &self))
+ CPU_SET_ATOMIC(cpuid, &stopped_cpus);
+ while (!CPU_ISSET(cpuid, &started_cpus))
cpu_spinwait();
- CPU_NAND_ATOMIC(&started_cpus, &self);
- CPU_NAND_ATOMIC(&stopped_cpus, &self);
+ CPU_CLR_ATOMIC(cpuid, &stopped_cpus);
+ CPU_CLR_ATOMIC(cpuid, &started_cpus);
CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__);
break;
case IPI_HARDCLOCK:
@@ -346,7 +343,7 @@ ipi_selected(cpuset_t cpus, int ipi)
struct pcpu *pc;
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
- if (CPU_OVERLAP(&cpus, &pc->pc_cpumask))
+ if (CPU_ISSET(pc->pc_cpuid, &cpus))
ipi_send(pc, ipi);
}
}
diff --git a/sys/sparc64/sparc64/mp_machdep.c b/sys/sparc64/sparc64/mp_machdep.c
index 1345201..4fc8562 100644
--- a/sys/sparc64/sparc64/mp_machdep.c
+++ b/sys/sparc64/sparc64/mp_machdep.c
@@ -281,7 +281,6 @@ sun4u_startcpu(phandle_t cpu, void *func, u_long arg)
void
cpu_mp_start(void)
{
- cpuset_t ocpus;
mtx_init(&ipi_mtx, "ipi", NULL, MTX_SPIN);
@@ -298,9 +297,6 @@ cpu_mp_start(void)
KASSERT(!isjbus || mp_ncpus <= IDR_JALAPENO_MAX_BN_PAIRS,
("%s: can only IPI a maximum of %d JBus-CPUs",
__func__, IDR_JALAPENO_MAX_BN_PAIRS));
- ocpus = all_cpus;
- CPU_CLR(curcpu, &ocpus);
- PCPU_SET(other_cpus, ocpus);
smp_active = 1;
}
@@ -422,7 +418,6 @@ cpu_mp_unleash(void *v)
void
cpu_mp_bootstrap(struct pcpu *pc)
{
- cpuset_t ocpus;
volatile struct cpu_start_args *csa;
csa = &cpu_start_args;
@@ -466,9 +461,6 @@ cpu_mp_bootstrap(struct pcpu *pc)
smp_cpus++;
KASSERT(curthread != NULL, ("%s: curthread", __func__));
- ocpus = all_cpus;
- CPU_CLR(curcpu, &ocpus);
- PCPU_SET(other_cpus, ocpus);
printf("SMP: AP CPU #%d Launched!\n", curcpu);
csa->csa_count--;
diff --git a/sys/sys/pcpu.h b/sys/sys/pcpu.h
index e6044a7..2e69f81 100644
--- a/sys/sys/pcpu.h
+++ b/sys/sys/pcpu.h
@@ -196,18 +196,6 @@ struct pcpu {
* if only to make kernel debugging easier.
*/
PCPU_MD_FIELDS;
-
- /*
- * XXX
- * For the time being, keep the cpuset_t objects as the very last
- * members of the structure.
- * They are actually tagged to be removed soon, but as long as this
- * does not happen, it is necessary to find a way to implement
- * easilly interfaces to userland and leaving them last makes that
- * possible.
- */
- cpuset_t pc_cpumask; /* This cpu mask */
- cpuset_t pc_other_cpus; /* Mask of all other cpus */
} __aligned(CACHE_LINE_SIZE);
#ifdef _KERNEL
OpenPOWER on IntegriCloud