summaryrefslogtreecommitdiffstats
path: root/sys/i386/xen/mp_machdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/i386/xen/mp_machdep.c')
-rw-r--r--sys/i386/xen/mp_machdep.c117
1 files changed, 69 insertions, 48 deletions
diff --git a/sys/i386/xen/mp_machdep.c b/sys/i386/xen/mp_machdep.c
index 2919570..2d05596 100644
--- a/sys/i386/xen/mp_machdep.c
+++ b/sys/i386/xen/mp_machdep.c
@@ -52,6 +52,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/cons.h> /* cngetc() */
+#include <sys/cpuset.h>
#ifdef GPROF
#include <sys/gmon.h>
#endif
@@ -116,7 +117,7 @@ volatile int smp_tlb_wait;
typedef void call_data_func_t(uintptr_t , uintptr_t);
static u_int logical_cpus;
-static volatile cpumask_t ipi_nmi_pending;
+static volatile cpuset_t ipi_nmi_pending;
/* used to hold the AP's until we are ready to release them */
static struct mtx ap_boot_mtx;
@@ -149,7 +150,7 @@ static int start_ap(int apic_id);
static void release_aps(void *dummy);
static u_int hyperthreading_cpus;
-static cpumask_t hyperthreading_cpus_mask;
+static cpuset_t hyperthreading_cpus_mask;
extern void Xhypervisor_callback(void);
extern void failsafe_callback(void);
@@ -239,7 +240,7 @@ cpu_mp_probe(void)
* Always record BSP in CPU map so that the mbuf init code works
* correctly.
*/
- all_cpus = 1;
+ CPU_SETOF(0, &all_cpus);
if (mp_ncpus == 0) {
/*
* No CPUs were found, so this must be a UP system. Setup
@@ -293,7 +294,8 @@ cpu_mp_start(void)
start_all_aps();
/* Setup the initial logical CPUs info. */
- logical_cpus = logical_cpus_mask = 0;
+ logical_cpus = 0;
+ CPU_ZERO(&logical_cpus_mask);
if (cpu_feature & CPUID_HTT)
logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
@@ -521,6 +523,7 @@ xen_smp_intr_init_cpus(void *unused)
void
init_secondary(void)
{
+ cpuset_t tcpuset, tallcpus;
vm_offset_t addr;
int gsel_tss;
@@ -600,18 +603,21 @@ init_secondary(void)
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
+ tcpuset = PCPU_GET(cpumask);
/* Determine if we are a logical CPU. */
if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
- logical_cpus_mask |= PCPU_GET(cpumask);
+ CPU_OR(&logical_cpus_mask, &tcpuset);
/* Determine if we are a hyperthread. */
if (hyperthreading_cpus > 1 &&
PCPU_GET(apic_id) % hyperthreading_cpus != 0)
- hyperthreading_cpus_mask |= PCPU_GET(cpumask);
+ CPU_OR(&hyperthreading_cpus_mask, &tcpuset);
/* Build our map of 'other' CPUs. */
- PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
+ tallcpus = all_cpus;
+ CPU_NAND(&tallcpus, &tcpuset);
+ PCPU_SET(other_cpus, tallcpus);
#if 0
if (bootverbose)
lapic_dump("AP");
@@ -725,6 +731,7 @@ assign_cpu_ids(void)
int
start_all_aps(void)
{
+ cpuset_t tallcpus;
int x,apic_id, cpu;
struct pcpu *pc;
@@ -778,12 +785,14 @@ start_all_aps(void)
panic("bye-bye");
}
- all_cpus |= (1 << cpu); /* record AP in CPU map */
+ CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
}
/* build our map of 'other' CPUs */
- PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
+ tallcpus = all_cpus;
+ CPU_NAND(&tallcpus, PCPU_PTR(cpumask));
+ PCPU_SET(other_cpus, tallcpus);
pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1);
@@ -1012,29 +1021,20 @@ smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
}
static void
-smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
+smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
- int ncpu, othercpus;
+ int cpu, ncpu, othercpus;
struct _call_data data;
othercpus = mp_ncpus - 1;
- if (mask == (u_int)-1) {
- ncpu = othercpus;
- if (ncpu < 1)
+ if (CPU_ISFULLSET(&mask)) {
+ if (othercpus < 1)
return;
} else {
- mask &= ~PCPU_GET(cpumask);
- if (mask == 0)
- return;
- ncpu = bitcount32(mask);
- if (ncpu > othercpus) {
- /* XXX this should be a panic offence */
- printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
- ncpu, othercpus);
- ncpu = othercpus;
- }
- /* XXX should be a panic, implied by mask == 0 above */
- if (ncpu < 1)
+ critical_enter();
+ CPU_NAND(&mask, PCPU_PTR(cpumask));
+ critical_exit();
+ if (CPU_EMPTY(&mask))
return;
}
if (!(read_eflags() & PSL_I))
@@ -1046,10 +1046,20 @@ smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_o
call_data->arg1 = addr1;
call_data->arg2 = addr2;
atomic_store_rel_int(&smp_tlb_wait, 0);
- if (mask == (u_int)-1)
+ if (CPU_ISFULLSET(&mask)) {
+ ncpu = othercpus;
ipi_all_but_self(vector);
- else
- ipi_selected(mask, vector);
+ } else {
+ ncpu = 0;
+ while ((cpu = cpusetobj_ffs(&mask)) != 0) {
+ cpu--;
+ CPU_CLR(cpu, &mask);
+ CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu,
+ vector);
+ ipi_send_cpu(cpu, vector);
+ ncpu++;
+ }
+ }
while (smp_tlb_wait < ncpu)
ia32_pause();
call_data = NULL;
@@ -1092,7 +1102,7 @@ smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
}
void
-smp_masked_invltlb(cpumask_t mask)
+smp_masked_invltlb(cpuset_t mask)
{
if (smp_started) {
@@ -1101,7 +1111,7 @@ smp_masked_invltlb(cpumask_t mask)
}
void
-smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
+smp_masked_invlpg(cpuset_t mask, vm_offset_t addr)
{
if (smp_started) {
@@ -1110,7 +1120,7 @@ smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
}
void
-smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
+smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2)
{
if (smp_started) {
@@ -1122,7 +1132,7 @@ smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
* send an IPI to a set of cpus.
*/
void
-ipi_selected(cpumask_t cpus, u_int ipi)
+ipi_selected(cpuset_t cpus, u_int ipi)
{
int cpu;
@@ -1132,11 +1142,11 @@ ipi_selected(cpumask_t cpus, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- atomic_set_int(&ipi_nmi_pending, cpus);
+ CPU_OR_ATOMIC(&ipi_nmi_pending, &cpus);
- while ((cpu = ffs(cpus)) != 0) {
+ while ((cpu = cpusetobj_ffs(&cpus)) != 0) {
cpu--;
- cpus &= ~(1 << cpu);
+ CPU_CLR(cpu, &cpus);
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
ipi_send_cpu(cpu, ipi);
}
@@ -1155,7 +1165,7 @@ ipi_cpu(int cpu, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- atomic_set_int(&ipi_nmi_pending, 1 << cpu);
+ CPU_SET_ATOMIC(cpu, &ipi_nmi_pending);
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
ipi_send_cpu(cpu, ipi);
@@ -1167,23 +1177,27 @@ ipi_cpu(int cpu, u_int ipi)
void
ipi_all_but_self(u_int ipi)
{
+ cpuset_t other_cpus;
/*
* IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
* of help in order to understand what is the source.
* Set the mask of receiving CPUs for this purpose.
*/
+ sched_pin();
+ other_cpus = PCPU_GET(other_cpus);
+ sched_unpin();
if (ipi == IPI_STOP_HARD)
- atomic_set_int(&ipi_nmi_pending, PCPU_GET(other_cpus));
+ CPU_OR_ATOMIC(&ipi_nmi_pending, &other_cpus);
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
- ipi_selected(PCPU_GET(other_cpus), ipi);
+ ipi_selected(other_cpus, ipi);
}
int
ipi_nmi_handler()
{
- cpumask_t cpumask;
+ cpuset_t cpumask;
/*
* As long as there is not a simple way to know about a NMI's
@@ -1191,11 +1205,13 @@ ipi_nmi_handler()
* the global pending bitword an IPI_STOP_HARD has been issued
* and should be handled.
*/
+ sched_pin();
cpumask = PCPU_GET(cpumask);
- if ((ipi_nmi_pending & cpumask) == 0)
+ sched_unpin();
+ if (!CPU_OVERLAP(&ipi_nmi_pending, &cpumask))
return (1);
- atomic_clear_int(&ipi_nmi_pending, cpumask);
+ CPU_NAND_ATOMIC(&ipi_nmi_pending, &cpumask);
cpustop_handler();
return (0);
}
@@ -1207,20 +1223,25 @@ ipi_nmi_handler()
void
cpustop_handler(void)
{
- int cpu = PCPU_GET(cpuid);
- int cpumask = PCPU_GET(cpumask);
+ cpuset_t cpumask;
+ int cpu;
+
+ sched_pin();
+ cpumask = PCPU_GET(cpumask);
+ cpu = PCPU_GET(cpuid);
+ sched_unpin();
savectx(&stoppcbs[cpu]);
/* Indicate that we are stopped */
- atomic_set_int(&stopped_cpus, cpumask);
+ CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
/* Wait for restart */
- while (!(started_cpus & cpumask))
+ while (!CPU_OVERLAP(&started_cpus, &cpumask))
ia32_pause();
- atomic_clear_int(&started_cpus, cpumask);
- atomic_clear_int(&stopped_cpus, cpumask);
+ CPU_NAND_ATOMIC(&started_cpus, &cpumask);
+ CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
if (cpu == 0 && cpustop_restartfunc != NULL) {
cpustop_restartfunc();
OpenPOWER on IntegriCloud