summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/amd64/mp_machdep.c25
-rw-r--r--sys/i386/i386/mp_machdep.c24
-rw-r--r--sys/kern/subr_kdb.c21
3 files changed, 30 insertions, 40 deletions
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 9d771ad..286dae9 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -902,7 +902,6 @@ smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
}
}
-
void
ipi_bitmap_handler(struct clockframe frame)
{
@@ -1011,7 +1010,6 @@ ipi_self(u_int ipi)
void
ipi_nmi_selected(u_int32_t cpus)
{
-
int cpu;
register_t icrlo;
@@ -1020,10 +1018,8 @@ ipi_nmi_selected(u_int32_t cpus)
CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
-
atomic_set_int(&ipi_nmi_pending, cpus);
-
while ((cpu = ffs(cpus)) != 0) {
cpu--;
cpus &= ~(1 << cpu);
@@ -1035,35 +1031,34 @@ ipi_nmi_selected(u_int32_t cpus)
if (!lapic_ipi_wait(BEFORE_SPIN))
panic("ipi_nmi_selected: previous IPI has not cleared");
- lapic_ipi_raw(icrlo,cpu_apic_ids[cpu]);
+ lapic_ipi_raw(icrlo, cpu_apic_ids[cpu]);
}
}
-
int
ipi_nmi_handler()
{
- int cpu = PCPU_GET(cpuid);
+ int cpu = PCPU_GET(cpuid);
+ int cpumask = PCPU_GET(cpumask);
- if(!(atomic_load_acq_int(&ipi_nmi_pending) & (1 << cpu)))
+ if (!(atomic_load_acq_int(&ipi_nmi_pending) & cpumask))
return 1;
- atomic_clear_int(&ipi_nmi_pending,1 << cpu);
+ atomic_clear_int(&ipi_nmi_pending, cpumask);
savectx(&stoppcbs[cpu]);
/* Indicate that we are stopped */
- atomic_set_int(&stopped_cpus,1 << cpu);
-
+ atomic_set_int(&stopped_cpus, cpumask);
/* Wait for restart */
- while(!(atomic_load_acq_int(&started_cpus) & (1 << cpu)))
+ while (!(atomic_load_acq_int(&started_cpus) & cpumask))
ia32_pause();
- atomic_clear_int(&started_cpus,1 << cpu);
- atomic_clear_int(&stopped_cpus,1 << cpu);
+ atomic_clear_int(&started_cpus, cpumask);
+ atomic_clear_int(&stopped_cpus, cpumask);
- if(cpu == 0 && cpustop_restartfunc != NULL)
+ if (cpu == 0 && cpustop_restartfunc != NULL)
cpustop_restartfunc();
return 0;
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index aeb79bd..90ded0c 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -1132,7 +1132,6 @@ smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
}
}
-
void
ipi_bitmap_handler(struct clockframe frame)
{
@@ -1249,7 +1248,6 @@ ipi_self(u_int ipi)
void
ipi_nmi_selected(u_int32_t cpus)
{
-
int cpu;
register_t icrlo;
@@ -1258,10 +1256,8 @@ ipi_nmi_selected(u_int32_t cpus)
CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
-
atomic_set_int(&ipi_nmi_pending, cpus);
-
while ((cpu = ffs(cpus)) != 0) {
cpu--;
cpus &= ~(1 << cpu);
@@ -1273,7 +1269,7 @@ ipi_nmi_selected(u_int32_t cpus)
if (!lapic_ipi_wait(BEFORE_SPIN))
panic("ipi_nmi_selected: previous IPI has not cleared");
- lapic_ipi_raw(icrlo,cpu_apic_ids[cpu]);
+ lapic_ipi_raw(icrlo, cpu_apic_ids[cpu]);
}
}
@@ -1281,27 +1277,27 @@ ipi_nmi_selected(u_int32_t cpus)
int
ipi_nmi_handler()
{
- int cpu = PCPU_GET(cpuid);
+ int cpu = PCPU_GET(cpuid);
+ int cpumask = PCPU_GET(cpumask);
- if(!(atomic_load_acq_int(&ipi_nmi_pending) & (1 << cpu)))
+ if (!(atomic_load_acq_int(&ipi_nmi_pending) & cpumask))
return 1;
- atomic_clear_int(&ipi_nmi_pending,1 << cpu);
+ atomic_clear_int(&ipi_nmi_pending, cpumask);
savectx(&stoppcbs[cpu]);
/* Indicate that we are stopped */
- atomic_set_int(&stopped_cpus,1 << cpu);
-
+ atomic_set_int(&stopped_cpus, cpumask);
/* Wait for restart */
- while(!(atomic_load_acq_int(&started_cpus) & (1 << cpu)))
+ while (!(atomic_load_acq_int(&started_cpus) & cpumask))
ia32_pause();
- atomic_clear_int(&started_cpus,1 << cpu);
- atomic_clear_int(&stopped_cpus,1 << cpu);
+ atomic_clear_int(&started_cpus, cpumask);
+ atomic_clear_int(&stopped_cpus, cpumask);
- if(cpu == 0 && cpustop_restartfunc != NULL)
+ if (cpu == 0 && cpustop_restartfunc != NULL)
cpustop_restartfunc();
return 0;
diff --git a/sys/kern/subr_kdb.c b/sys/kern/subr_kdb.c
index 0216df0..c4f14d8 100644
--- a/sys/kern/subr_kdb.c
+++ b/sys/kern/subr_kdb.c
@@ -337,19 +337,18 @@ struct pcb *
kdb_thr_ctx(struct thread *thr)
#ifdef KDB_STOP_NMI
{
- u_int cpuid;
- struct pcpu *pc;
+ struct pcpu *pc;
+ u_int cpuid;
- if (thr == curthread)
- return &kdb_pcb;
+ if (thr == curthread)
+ return (&kdb_pcb);
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
- cpuid = pc->pc_cpuid;
- if (pc->pc_curthread == thr && (atomic_load_acq_int(&stopped_cpus) & (1 << cpuid)))
- return &stoppcbs[cpuid];
- }
-
- return thr->td_pcb;
+ SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ cpuid = pc->pc_cpuid;
+ if (pc->pc_curthread == thr && (atomic_load_acq_int(&stopped_cpus) & (1 << cpuid)))
+ return (&stoppcbs[cpuid]);
+ }
+ return (thr->td_pcb);
}
#else
{
OpenPOWER on IntegriCloud