summaryrefslogtreecommitdiffstats
path: root/sys/i386
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2009-05-14 17:43:00 +0000
committerattilio <attilio@FreeBSD.org>2009-05-14 17:43:00 +0000
commit902219327c608b14c9bcbd63038b9842bae52f7b (patch)
tree22b20a38e37e187948ac0d924a006065dcd27968 /sys/i386
parentb8aa665b4a6150092ef1acac35dac70302c2795d (diff)
downloadFreeBSD-src-902219327c608b14c9bcbd63038b9842bae52f7b.zip
FreeBSD-src-902219327c608b14c9bcbd63038b9842bae52f7b.tar.gz
FreeBSD right now support 32 CPUs on all the architectures at least.
With the arrival of 128+ cores it is necessary to handle more than that. One of the first thing to change is the support for cpumask_t that needs to handle more than 32 bits masking (which happens now). Some places, however, still assume that cpumask_t is a 32 bits mask. Fix that situation by using always correctly cpumask_t when needed. While here, remove the part under STOP_NMI for the Xen support as it is broken in any case. Additively make ipi_nmi_pending as static. Reviewed by: jhb, kmacy Tested by: Giovanni Trematerra <giovanni dot trematerra at gmail dot com>
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/mp_machdep.c18
-rw-r--r--sys/i386/i386/pmap.c9
-rw-r--r--sys/i386/include/smp.h6
-rw-r--r--sys/i386/xen/mp_machdep.c66
4 files changed, 21 insertions, 78 deletions
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index 4495a57..c233f25 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -155,9 +155,9 @@ vm_offset_t smp_tlb_addr2;
volatile int smp_tlb_wait;
#ifdef STOP_NMI
-volatile cpumask_t ipi_nmi_pending;
+static volatile cpumask_t ipi_nmi_pending;
-static void ipi_nmi_selected(u_int32_t cpus);
+static void ipi_nmi_selected(cpumask_t cpus);
#endif
#ifdef COUNT_IPIS
@@ -1146,7 +1146,7 @@ smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
}
static void
-smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
+smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
int ncpu, othercpus;
@@ -1231,7 +1231,7 @@ smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
}
void
-smp_masked_invltlb(u_int mask)
+smp_masked_invltlb(cpumask_t mask)
{
if (smp_started) {
@@ -1243,7 +1243,7 @@ smp_masked_invltlb(u_int mask)
}
void
-smp_masked_invlpg(u_int mask, vm_offset_t addr)
+smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
{
if (smp_started) {
@@ -1255,7 +1255,7 @@ smp_masked_invlpg(u_int mask, vm_offset_t addr)
}
void
-smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
+smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
{
if (smp_started) {
@@ -1303,7 +1303,7 @@ ipi_bitmap_handler(struct trapframe frame)
* send an IPI to a set of cpus.
*/
void
-ipi_selected(u_int32_t cpus, u_int ipi)
+ipi_selected(cpumask_t cpus, u_int ipi)
{
int cpu;
u_int bitmap = 0;
@@ -1367,7 +1367,7 @@ ipi_all_but_self(u_int ipi)
#define BEFORE_SPIN 1000000
void
-ipi_nmi_selected(u_int32_t cpus)
+ipi_nmi_selected(cpumask_t cpus)
{
int cpu;
register_t icrlo;
@@ -1456,7 +1456,7 @@ SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
static int
sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
{
- u_int mask;
+ cpumask_t mask;
int error;
mask = hlt_cpus_mask;
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 5f016d5..5d7dc97 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -1624,7 +1624,7 @@ retry:
* Deal with a SMP shootdown of other users of the pmap that we are
* trying to dispose of. This can be a bit hairy.
*/
-static u_int *lazymask;
+static cpumask_t *lazymask;
static u_int lazyptd;
static volatile u_int lazywait;
@@ -1633,7 +1633,7 @@ void pmap_lazyfix_action(void);
void
pmap_lazyfix_action(void)
{
- u_int mymask = PCPU_GET(cpumask);
+ cpumask_t mymask = PCPU_GET(cpumask);
#ifdef COUNT_IPIS
(*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
@@ -1645,7 +1645,7 @@ pmap_lazyfix_action(void)
}
static void
-pmap_lazyfix_self(u_int mymask)
+pmap_lazyfix_self(cpumask_t mymask)
{
if (rcr3() == lazyptd)
@@ -1657,8 +1657,7 @@ pmap_lazyfix_self(u_int mymask)
static void
pmap_lazyfix(pmap_t pmap)
{
- u_int mymask;
- u_int mask;
+ cpumask_t mymask, mask;
u_int spins;
while ((mask = pmap->pm_active) != 0) {
diff --git a/sys/i386/include/smp.h b/sys/i386/include/smp.h
index 33739cc..917c285 100644
--- a/sys/i386/include/smp.h
+++ b/sys/i386/include/smp.h
@@ -69,12 +69,12 @@ u_int mp_bootaddress(u_int);
int mp_grab_cpu_hlt(void);
void smp_cache_flush(void);
void smp_invlpg(vm_offset_t addr);
-void smp_masked_invlpg(u_int mask, vm_offset_t addr);
+void smp_masked_invlpg(cpumask_t mask, vm_offset_t addr);
void smp_invlpg_range(vm_offset_t startva, vm_offset_t endva);
-void smp_masked_invlpg_range(u_int mask, vm_offset_t startva,
+void smp_masked_invlpg_range(cpumask_t mask, vm_offset_t startva,
vm_offset_t endva);
void smp_invltlb(void);
-void smp_masked_invltlb(u_int mask);
+void smp_masked_invltlb(cpumask_t mask);
#ifdef STOP_NMI
int ipi_nmi_handler(void);
diff --git a/sys/i386/xen/mp_machdep.c b/sys/i386/xen/mp_machdep.c
index 7af4437..d8e1eea 100644
--- a/sys/i386/xen/mp_machdep.c
+++ b/sys/i386/xen/mp_machdep.c
@@ -993,7 +993,7 @@ smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
}
static void
-smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
+smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
int ncpu, othercpus;
struct _call_data data;
@@ -1072,7 +1072,7 @@ smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
}
void
-smp_masked_invltlb(u_int mask)
+smp_masked_invltlb(cpumask_t mask)
{
if (smp_started) {
@@ -1081,7 +1081,7 @@ smp_masked_invltlb(u_int mask)
}
void
-smp_masked_invlpg(u_int mask, vm_offset_t addr)
+smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
{
if (smp_started) {
@@ -1090,7 +1090,7 @@ smp_masked_invlpg(u_int mask, vm_offset_t addr)
}
void
-smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
+smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
{
if (smp_started) {
@@ -1102,7 +1102,7 @@ smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
* send an IPI to a set of cpus.
*/
void
-ipi_selected(uint32_t cpus, u_int ipi)
+ipi_selected(cpumask_t cpus, u_int ipi)
{
int cpu;
u_int bitmap = 0;
@@ -1114,12 +1114,6 @@ ipi_selected(uint32_t cpus, u_int ipi)
ipi = IPI_BITMAP_VECTOR;
}
-#ifdef STOP_NMI
- if (ipi == IPI_STOP && stop_cpus_with_nmi) {
- ipi_nmi_selected(cpus);
- return;
- }
-#endif
CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
while ((cpu = ffs(cpus)) != 0) {
cpu--;
@@ -1160,56 +1154,6 @@ ipi_all_but_self(u_int ipi)
ipi_selected(PCPU_GET(other_cpus), ipi);
}
-#ifdef STOP_NMI
-/*
- * send NMI IPI to selected CPUs
- */
-
-#define BEFORE_SPIN 1000000
-
-void
-ipi_nmi_selected(u_int32_t cpus)
-{
- int cpu;
- register_t icrlo;
-
- icrlo = APIC_DELMODE_NMI | APIC_DESTMODE_PHY | APIC_LEVEL_ASSERT
- | APIC_TRIGMOD_EDGE;
-
- CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
-
- atomic_set_int(&ipi_nmi_pending, cpus);
-
- while ((cpu = ffs(cpus)) != 0) {
- cpu--;
- cpus &= ~(1 << cpu);
-
- KASSERT(cpu_apic_ids[cpu] != -1,
- ("IPI NMI to non-existent CPU %d", cpu));
-
- /* Wait for an earlier IPI to finish. */
- if (!lapic_ipi_wait(BEFORE_SPIN))
- panic("ipi_nmi_selected: previous IPI has not cleared");
-
- lapic_ipi_raw(icrlo, cpu_apic_ids[cpu]);
- }
-}
-
-int
-ipi_nmi_handler(void)
-{
- int cpumask = PCPU_GET(cpumask);
-
- if (!(ipi_nmi_pending & cpumask))
- return 1;
-
- atomic_clear_int(&ipi_nmi_pending, cpumask);
- cpustop_handler();
- return 0;
-}
-
-#endif /* STOP_NMI */
-
/*
* Handle an IPI_STOP by saving our current context and spinning until we
* are resumed.
OpenPOWER on IntegriCloud