summaryrefslogtreecommitdiffstats
path: root/sys/amd64/amd64/mp_machdep.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2011-05-05 14:39:14 +0000
committerattilio <attilio@FreeBSD.org>2011-05-05 14:39:14 +0000
commitfe4de567b50f7ca317b16f69b7b3a7de693025af (patch)
treee5d54bcefbf1fe0c4c6804bdc5c4852b1b64518a /sys/amd64/amd64/mp_machdep.c
parentd3d3db9bac709a7fa4319bf5e8c8fb4e05918772 (diff)
downloadFreeBSD-src-fe4de567b50f7ca317b16f69b7b3a7de693025af.zip
FreeBSD-src-fe4de567b50f7ca317b16f69b7b3a7de693025af.tar.gz
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects. That is going to offer the underlying support for a simple bump of MAXCPU and then support for number of cpus > 32 (as it is today). Right now, cpumask_t is an int, 32 bits on all our supported architecture. cpumask_t on the other side is implemented as an array of longs, and easilly extendible by definition. The architectures touched by this commit are the following: - amd64 - i386 - pc98 - arm - ia64 - XEN while the others are still missing. Userland is believed to be fully converted with the changes contained here. Some technical notes: - This commit may be considered an ABI nop for all the architectures different from amd64 and ia64 (and sparc64 in the future) - per-cpu members, which are now converted to cpuset_t, needs to be accessed avoiding migration, because the size of cpuset_t should be considered unknown - size of cpuset_t objects is different from kernel and userland (this is primirally done in order to leave some more space in userland to cope with KBI extensions). If you need to access kernel cpuset_t from the userland please refer to example in this patch on how to do that correctly (kgdb may be a good source, for example). - Support for other architectures is going to be added soon - Only MAXCPU for amd64 is bumped now The patch has been tested by sbruno and Nicholas Esborn on opteron 4 x 12 pack CPUs. More testing on big SMP is expected to came soon. pluknet tested the patch with his 8-ways on both amd64 and i386. Tested by: pluknet, sbruno, gianni, Nicholas Esborn Reviewed by: jeff, jhb, sbruno
Diffstat (limited to 'sys/amd64/amd64/mp_machdep.c')
-rw-r--r--sys/amd64/amd64/mp_machdep.c224
1 files changed, 121 insertions, 103 deletions
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 8e8dc67..57e91a8 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
+#include <sys/cpuset.h>
#ifdef GPROF
#include <sys/gmon.h>
#endif
@@ -125,7 +126,7 @@ extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
* Local data and functions.
*/
-static volatile cpumask_t ipi_nmi_pending;
+static volatile cpuset_t ipi_nmi_pending;
/* used to hold the AP's until we are ready to release them */
static struct mtx ap_boot_mtx;
@@ -161,7 +162,7 @@ static void release_aps(void *dummy);
static int hlt_logical_cpus;
static u_int hyperthreading_cpus; /* logical cpus sharing L1 cache */
-static cpumask_t hyperthreading_cpus_mask;
+static cpuset_t hyperthreading_cpus_mask;
static int hyperthreading_allowed = 1;
static struct sysctl_ctx_list logical_cpu_clist;
static u_int bootMP_size;
@@ -311,7 +312,7 @@ topo_probe(void)
if (cpu_topo_probed)
return;
- logical_cpus_mask = 0;
+ CPU_ZERO(&logical_cpus_mask);
if (mp_ncpus <= 1)
cpu_cores = cpu_logical = 1;
else if (cpu_vendor_id == CPU_VENDOR_AMD)
@@ -455,7 +456,7 @@ cpu_mp_probe(void)
* Always record BSP in CPU map so that the mbuf init code works
* correctly.
*/
- all_cpus = 1;
+ CPU_SETOF(0, &all_cpus);
if (mp_ncpus == 0) {
/*
* No CPUs were found, so this must be a UP system. Setup
@@ -582,6 +583,7 @@ cpu_mp_announce(void)
void
init_secondary(void)
{
+ cpuset_t tcpuset, tallcpus;
struct pcpu *pc;
struct nmi_pcpu *np;
u_int64_t msr, cr0;
@@ -713,19 +715,22 @@ init_secondary(void)
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
+ tcpuset = PCPU_GET(cpumask);
/* Determine if we are a logical CPU. */
/* XXX Calculation depends on cpu_logical being a power of 2, e.g. 2 */
if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0)
- logical_cpus_mask |= PCPU_GET(cpumask);
-
+ CPU_OR(&logical_cpus_mask, &tcpuset);
+
/* Determine if we are a hyperthread. */
if (hyperthreading_cpus > 1 &&
PCPU_GET(apic_id) % hyperthreading_cpus != 0)
- hyperthreading_cpus_mask |= PCPU_GET(cpumask);
+ CPU_OR(&hyperthreading_cpus_mask, &tcpuset);
/* Build our map of 'other' CPUs. */
- PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
+ tallcpus = all_cpus;
+ CPU_NAND(&tallcpus, &tcpuset);
+ PCPU_SET(other_cpus, tallcpus);
if (bootverbose)
lapic_dump("AP");
@@ -868,6 +873,7 @@ assign_cpu_ids(void)
static int
start_all_aps(void)
{
+ cpuset_t tallcpus, tcpuset;
vm_offset_t va = boot_address + KERNBASE;
u_int64_t *pt4, *pt3, *pt2;
u_int32_t mpbioswarmvec;
@@ -932,11 +938,14 @@ start_all_aps(void)
panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
}
- all_cpus |= (1 << cpu); /* record AP in CPU map */
+ CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
}
/* build our map of 'other' CPUs */
- PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
+ tallcpus = all_cpus;
+ tcpuset = PCPU_GET(cpumask);
+ CPU_NAND(&tallcpus, &tcpuset);
+ PCPU_SET(other_cpus, tallcpus);
/* restore the warmstart vector */
*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
@@ -1065,6 +1074,30 @@ SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW,
#endif /* COUNT_XINVLTLB_HITS */
/*
+ * Send an IPI to specified CPU handling the bitmap logic.
+ */
+static void
+ipi_send_cpu(int cpu, u_int ipi)
+{
+ u_int bitmap, old_pending, new_pending;
+
+ KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
+
+ if (IPI_IS_BITMAPED(ipi)) {
+ bitmap = 1 << ipi;
+ ipi = IPI_BITMAP_VECTOR;
+ do {
+ old_pending = cpu_ipi_pending[cpu];
+ new_pending = old_pending | bitmap;
+ } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
+ old_pending, new_pending));
+ if (old_pending)
+ return;
+ }
+ lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
+}
+
+/*
* Flush the TLB on all other CPU's
*/
static void
@@ -1088,28 +1121,19 @@ smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
}
static void
-smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
+smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
- int ncpu, othercpus;
+ int cpu, ncpu, othercpus;
othercpus = mp_ncpus - 1;
- if (mask == (cpumask_t)-1) {
- ncpu = othercpus;
- if (ncpu < 1)
+ if (CPU_ISFULLSET(&mask)) {
+ if (othercpus < 1)
return;
} else {
- mask &= ~PCPU_GET(cpumask);
- if (mask == 0)
- return;
- ncpu = bitcount32(mask);
- if (ncpu > othercpus) {
- /* XXX this should be a panic offence */
- printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
- ncpu, othercpus);
- ncpu = othercpus;
- }
- /* XXX should be a panic, implied by mask == 0 above */
- if (ncpu < 1)
+ sched_pin();
+ CPU_NAND(&mask, PCPU_PTR(cpumask));
+ sched_unpin();
+ if (CPU_EMPTY(&mask))
return;
}
if (!(read_rflags() & PSL_I))
@@ -1118,39 +1142,25 @@ smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_o
smp_tlb_addr1 = addr1;
smp_tlb_addr2 = addr2;
atomic_store_rel_int(&smp_tlb_wait, 0);
- if (mask == (cpumask_t)-1)
+ if (CPU_ISFULLSET(&mask)) {
+ ncpu = othercpus;
ipi_all_but_self(vector);
- else
- ipi_selected(mask, vector);
+ } else {
+ ncpu = 0;
+ while ((cpu = cpusetobj_ffs(&mask)) != 0) {
+ cpu--;
+ CPU_CLR(cpu, &mask);
+ CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__,
+ cpu, vector);
+ ipi_send_cpu(cpu, vector);
+ ncpu++;
+ }
+ }
while (smp_tlb_wait < ncpu)
ia32_pause();
mtx_unlock_spin(&smp_ipi_mtx);
}
-/*
- * Send an IPI to specified CPU handling the bitmap logic.
- */
-static void
-ipi_send_cpu(int cpu, u_int ipi)
-{
- u_int bitmap, old_pending, new_pending;
-
- KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
-
- if (IPI_IS_BITMAPED(ipi)) {
- bitmap = 1 << ipi;
- ipi = IPI_BITMAP_VECTOR;
- do {
- old_pending = cpu_ipi_pending[cpu];
- new_pending = old_pending | bitmap;
- } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
- old_pending, new_pending));
- if (old_pending)
- return;
- }
- lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
-}
-
void
smp_cache_flush(void)
{
@@ -1197,7 +1207,7 @@ smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
}
void
-smp_masked_invltlb(cpumask_t mask)
+smp_masked_invltlb(cpuset_t mask)
{
if (smp_started) {
@@ -1209,7 +1219,7 @@ smp_masked_invltlb(cpumask_t mask)
}
void
-smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
+smp_masked_invlpg(cpuset_t mask, vm_offset_t addr)
{
if (smp_started) {
@@ -1221,7 +1231,7 @@ smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
}
void
-smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
+smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2)
{
if (smp_started) {
@@ -1274,7 +1284,7 @@ ipi_bitmap_handler(struct trapframe frame)
* send an IPI to a set of cpus.
*/
void
-ipi_selected(cpumask_t cpus, u_int ipi)
+ipi_selected(cpuset_t cpus, u_int ipi)
{
int cpu;
@@ -1284,12 +1294,12 @@ ipi_selected(cpumask_t cpus, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- atomic_set_int(&ipi_nmi_pending, cpus);
+ CPU_OR_ATOMIC(&ipi_nmi_pending, &cpus);
- CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
- while ((cpu = ffs(cpus)) != 0) {
+ while ((cpu = cpusetobj_ffs(&cpus)) != 0) {
cpu--;
- cpus &= ~(1 << cpu);
+ CPU_CLR(cpu, &cpus);
+ CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
ipi_send_cpu(cpu, ipi);
}
}
@@ -1307,7 +1317,7 @@ ipi_cpu(int cpu, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- atomic_set_int(&ipi_nmi_pending, 1 << cpu);
+ CPU_SET_ATOMIC(cpu, &ipi_nmi_pending);
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
ipi_send_cpu(cpu, ipi);
@@ -1320,8 +1330,10 @@ void
ipi_all_but_self(u_int ipi)
{
+ sched_pin();
if (IPI_IS_BITMAPED(ipi)) {
ipi_selected(PCPU_GET(other_cpus), ipi);
+ sched_unpin();
return;
}
@@ -1331,7 +1343,8 @@ ipi_all_but_self(u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- atomic_set_int(&ipi_nmi_pending, PCPU_GET(other_cpus));
+ CPU_OR_ATOMIC(&ipi_nmi_pending, PCPU_PTR(other_cpus));
+ sched_unpin();
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
@@ -1340,7 +1353,7 @@ ipi_all_but_self(u_int ipi)
int
ipi_nmi_handler()
{
- cpumask_t cpumask;
+ cpuset_t cpumask;
/*
* As long as there is not a simple way to know about a NMI's
@@ -1348,11 +1361,13 @@ ipi_nmi_handler()
* the global pending bitword an IPI_STOP_HARD has been issued
* and should be handled.
*/
+ sched_pin();
cpumask = PCPU_GET(cpumask);
- if ((ipi_nmi_pending & cpumask) == 0)
+ sched_unpin();
+ if (!CPU_OVERLAP(&ipi_nmi_pending, &cpumask))
return (1);
- atomic_clear_int(&ipi_nmi_pending, cpumask);
+ CPU_NAND_ATOMIC(&ipi_nmi_pending, &cpumask);
cpustop_handler();
return (0);
}
@@ -1364,23 +1379,25 @@ ipi_nmi_handler()
void
cpustop_handler(void)
{
- cpumask_t cpumask;
+ cpuset_t cpumask;
u_int cpu;
+ sched_pin();
cpu = PCPU_GET(cpuid);
cpumask = PCPU_GET(cpumask);
+ sched_unpin();
savectx(&stoppcbs[cpu]);
/* Indicate that we are stopped */
- atomic_set_int(&stopped_cpus, cpumask);
+ CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
/* Wait for restart */
- while (!(started_cpus & cpumask))
+ while (!CPU_OVERLAP(&started_cpus, &cpumask))
ia32_pause();
- atomic_clear_int(&started_cpus, cpumask);
- atomic_clear_int(&stopped_cpus, cpumask);
+ CPU_NAND_ATOMIC(&started_cpus, &cpumask);
+ CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
if (cpu == 0 && cpustop_restartfunc != NULL) {
cpustop_restartfunc();
@@ -1395,7 +1412,7 @@ cpustop_handler(void)
void
cpususpend_handler(void)
{
- cpumask_t cpumask;
+ cpuset_t cpumask;
register_t cr3, rf;
u_int cpu;
@@ -1407,7 +1424,7 @@ cpususpend_handler(void)
if (savectx(susppcbs[cpu])) {
wbinvd();
- atomic_set_int(&stopped_cpus, cpumask);
+ CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
} else {
pmap_init_pat();
PCPU_SET(switchtime, 0);
@@ -1415,11 +1432,11 @@ cpususpend_handler(void)
}
/* Wait for resume */
- while (!(started_cpus & cpumask))
+ while (!CPU_OVERLAP(&started_cpus, &cpumask))
ia32_pause();
- atomic_clear_int(&started_cpus, cpumask);
- atomic_clear_int(&stopped_cpus, cpumask);
+ CPU_NAND_ATOMIC(&started_cpus, &cpumask);
+ CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
/* Restore CR3 and enable interrupts */
load_cr3(cr3);
@@ -1447,30 +1464,30 @@ SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
static int
sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
{
- cpumask_t mask;
+ cpuset_t mask;
int error;
mask = hlt_cpus_mask;
- error = sysctl_handle_int(oidp, &mask, 0, req);
+ error = sysctl_handle_opaque(oidp, &mask, sizeof(mask), req);
if (error || !req->newptr)
return (error);
- if (logical_cpus_mask != 0 &&
- (mask & logical_cpus_mask) == logical_cpus_mask)
+ if (!CPU_EMPTY(&logical_cpus_mask) &&
+ CPU_SUBSET(&mask, &logical_cpus_mask))
hlt_logical_cpus = 1;
else
hlt_logical_cpus = 0;
if (! hyperthreading_allowed)
- mask |= hyperthreading_cpus_mask;
+ CPU_OR(&mask, &hyperthreading_cpus_mask);
- if ((mask & all_cpus) == all_cpus)
- mask &= ~(1<<0);
+ if (CPU_SUBSET(&mask, &all_cpus))
+ CPU_CLR(0, &mask);
hlt_cpus_mask = mask;
return (error);
}
-SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
- 0, 0, sysctl_hlt_cpus, "IU",
+SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus,
+ CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, sysctl_hlt_cpus, "S",
"Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
static int
@@ -1484,15 +1501,15 @@ sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
return (error);
if (disable)
- hlt_cpus_mask |= logical_cpus_mask;
+ CPU_OR(&hlt_cpus_mask, &logical_cpus_mask);
else
- hlt_cpus_mask &= ~logical_cpus_mask;
+ CPU_NAND(&hlt_cpus_mask, &logical_cpus_mask);
if (! hyperthreading_allowed)
- hlt_cpus_mask |= hyperthreading_cpus_mask;
+ CPU_OR(&hlt_cpus_mask, &hyperthreading_cpus_mask);
- if ((hlt_cpus_mask & all_cpus) == all_cpus)
- hlt_cpus_mask &= ~(1<<0);
+ if (CPU_SUBSET(&hlt_cpus_mask, &all_cpus))
+ CPU_CLR(0, &hlt_cpus_mask);
hlt_logical_cpus = disable;
return (error);
@@ -1519,18 +1536,18 @@ sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
#endif
if (allowed)
- hlt_cpus_mask &= ~hyperthreading_cpus_mask;
+ CPU_NAND(&hlt_cpus_mask, &hyperthreading_cpus_mask);
else
- hlt_cpus_mask |= hyperthreading_cpus_mask;
+ CPU_OR(&hlt_cpus_mask, &hyperthreading_cpus_mask);
- if (logical_cpus_mask != 0 &&
- (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
+ if (!CPU_EMPTY(&logical_cpus_mask) &&
+ CPU_SUBSET(&hlt_cpus_mask, &logical_cpus_mask))
hlt_logical_cpus = 1;
else
hlt_logical_cpus = 0;
- if ((hlt_cpus_mask & all_cpus) == all_cpus)
- hlt_cpus_mask &= ~(1<<0);
+ if (CPU_SUBSET(&hlt_cpus_mask, &all_cpus))
+ CPU_CLR(0, &hlt_cpus_mask);
hyperthreading_allowed = allowed;
return (error);
@@ -1540,7 +1557,7 @@ static void
cpu_hlt_setup(void *dummy __unused)
{
- if (logical_cpus_mask != 0) {
+ if (!CPU_EMPTY(&logical_cpus_mask)) {
TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
&hlt_logical_cpus);
sysctl_ctx_init(&logical_cpu_clist);
@@ -1554,20 +1571,21 @@ cpu_hlt_setup(void *dummy __unused)
&logical_cpus_mask, 0, "");
if (hlt_logical_cpus)
- hlt_cpus_mask |= logical_cpus_mask;
+ CPU_OR(&hlt_cpus_mask, &logical_cpus_mask);
/*
* If necessary for security purposes, force
* hyperthreading off, regardless of the value
* of hlt_logical_cpus.
*/
- if (hyperthreading_cpus_mask) {
+ if (!CPU_EMPTY(&hyperthreading_cpus_mask)) {
SYSCTL_ADD_PROC(&logical_cpu_clist,
SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
"hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
0, 0, sysctl_hyperthreading_allowed, "IU", "");
if (! hyperthreading_allowed)
- hlt_cpus_mask |= hyperthreading_cpus_mask;
+ CPU_OR(&hlt_cpus_mask,
+ &hyperthreading_cpus_mask);
}
}
}
@@ -1576,7 +1594,7 @@ SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
int
mp_grab_cpu_hlt(void)
{
- cpumask_t mask;
+ cpuset_t mask;
#ifdef MP_WATCHDOG
u_int cpuid;
#endif
@@ -1589,7 +1607,7 @@ mp_grab_cpu_hlt(void)
#endif
retval = 0;
- while (mask & hlt_cpus_mask) {
+ while (CPU_OVERLAP(&mask, &hlt_cpus_mask)) {
retval = 1;
__asm __volatile("sti; hlt" : : : "memory");
}
OpenPOWER on IntegriCloud