summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordwhite <dwhite@FreeBSD.org>2005-04-30 20:01:00 +0000
committerdwhite <dwhite@FreeBSD.org>2005-04-30 20:01:00 +0000
commitc8fa8099674d921d50dae72e3a58ef66ac9c7fbc (patch)
treeb1d1d16a2806520655c3bf213640cc3b5593de38
parentebe4b8304d164a842cc96bcc863c9321a37f57ff (diff)
downloadFreeBSD-src-c8fa8099674d921d50dae72e3a58ef66ac9c7fbc.zip
FreeBSD-src-c8fa8099674d921d50dae72e3a58ef66ac9c7fbc.tar.gz
Implement an alternate method to stop CPUs when entering DDB. Normally we use
a regular IPI vector, but this vector is blocked when interrupts are disabled. With "options KDB_STOP_NMI" and debug.kdb.stop_cpus_with_nmi set, KDB will send an NMI to each CPU instead. The code also has a context-stuffing feature which helps ddb extract the state of processes running on the stopped CPUs. KDB_STOP_NMI is only useful with SMP and complains if SMP is not defined. This feature only applies to i386 and amd64 at the moment, but could be used on other architectures with the appropriate MD bits. Submitted by: ups
-rw-r--r--sys/amd64/amd64/mp_machdep.c74
-rw-r--r--sys/amd64/amd64/trap.c8
-rw-r--r--sys/amd64/conf/NOTES6
-rw-r--r--sys/amd64/include/smp.h5
-rw-r--r--sys/conf/options.amd643
-rw-r--r--sys/conf/options.i3863
-rw-r--r--sys/i386/conf/NOTES5
-rw-r--r--sys/i386/i386/mp_machdep.c74
-rw-r--r--sys/i386/i386/trap.c8
-rw-r--r--sys/i386/include/smp.h5
-rw-r--r--sys/kern/subr_kdb.c50
-rw-r--r--sys/kern/subr_smp.c29
-rw-r--r--sys/sys/smp.h4
13 files changed, 274 insertions, 0 deletions
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 9d3bf8f..d1e4cb2 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -192,6 +192,10 @@ mp_topology(void)
}
+#ifdef KDB_STOP_NMI
+volatile cpumask_t ipi_nmi_pending;
+#endif
+
/*
* Calculate usable address in base memory for AP trampoline code.
*/
@@ -972,6 +976,76 @@ ipi_self(u_int ipi)
lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
}
+#ifdef KDB_STOP_NMI
+/*
+ * send NMI IPI to selected CPUs
+ */
+
+#define BEFORE_SPIN 1000000
+
+void
+ipi_nmi_selected(u_int32_t cpus)
+{
+
+ int cpu;
+ register_t icrlo;
+
+ icrlo = APIC_DELMODE_NMI | APIC_DESTMODE_PHY | APIC_LEVEL_ASSERT
+ | APIC_TRIGMOD_EDGE;
+
+ CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
+
+
+ atomic_set_int(&ipi_nmi_pending, cpus);
+
+
+ while ((cpu = ffs(cpus)) != 0) {
+ cpu--;
+ cpus &= ~(1 << cpu);
+
+ KASSERT(cpu_apic_ids[cpu] != -1,
+ ("IPI NMI to non-existent CPU %d", cpu));
+
+ /* Wait for an earlier IPI to finish. */
+ if (!lapic_ipi_wait(BEFORE_SPIN))
+ panic("ipi_nmi_selected: previous IPI has not cleared");
+
+ lapic_ipi_raw(icrlo,cpu_apic_ids[cpu]);
+ }
+}
+
+
+int
+ipi_nmi_handler()
+{
+ int cpu = PCPU_GET(cpuid);
+
+ if(!(atomic_load_acq_int(&ipi_nmi_pending) & (1 << cpu)))
+ return 1;
+
+ atomic_clear_int(&ipi_nmi_pending,1 << cpu);
+
+ savectx(&stoppcbs[cpu]);
+
+ /* Indicate that we are stopped */
+ atomic_set_int(&stopped_cpus,1 << cpu);
+
+
+ /* Wait for restart */
+ while(!(atomic_load_acq_int(&started_cpus) & (1 << cpu)))
+ ia32_pause();
+
+ atomic_clear_int(&started_cpus,1 << cpu);
+ atomic_clear_int(&stopped_cpus,1 << cpu);
+
+ if(cpu == 0 && cpustop_restartfunc != NULL)
+ cpustop_restartfunc();
+
+ return 0;
+}
+
+#endif /* KDB_STOP_NMI */
+
/*
* This is called once the rest of the system is up and running and we're
* ready to let the AP's out of the pen.
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index 7baaeee..9820f87 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -165,6 +165,14 @@ trap(frame)
PCPU_LAZY_INC(cnt.v_trap);
type = frame.tf_trapno;
+#ifdef KDB_STOP_NMI
+ /* Handler for NMI IPIs used for debugging */
+ if (type == T_NMI) {
+ if (ipi_nmi_handler() == 0)
+ goto out;
+ }
+#endif /* KDB_STOP_NMI */
+
#ifdef KDB
if (kdb_active) {
kdb_reenter();
diff --git a/sys/amd64/conf/NOTES b/sys/amd64/conf/NOTES
index 6677fbe..8978c55 100644
--- a/sys/amd64/conf/NOTES
+++ b/sys/amd64/conf/NOTES
@@ -33,6 +33,12 @@ device mptable # Optional MPSPEC mptable support
#
options MP_WATCHDOG
+#
+# Debugging options.
+#
+options KDB_STOP_NMI # Stop CPUS using NMI instead of IPI
+
+
#####################################################################
# CPU OPTIONS
diff --git a/sys/amd64/include/smp.h b/sys/amd64/include/smp.h
index 8aea3b9..2c63dc9 100644
--- a/sys/amd64/include/smp.h
+++ b/sys/amd64/include/smp.h
@@ -63,6 +63,11 @@ void smp_masked_invlpg_range(u_int mask, vm_offset_t startva,
void smp_invltlb(void);
void smp_masked_invltlb(u_int mask);
+#ifdef KDB_STOP_NMI
+int ipi_nmi_handler(void);
+void ipi_nmi_selected(u_int32_t cpus);
+#endif
+
#endif /* !LOCORE */
#endif /* SMP */
diff --git a/sys/conf/options.amd64 b/sys/conf/options.amd64
index 82f864d..0227bfd 100644
--- a/sys/conf/options.amd64
+++ b/sys/conf/options.amd64
@@ -55,3 +55,6 @@ PSM_HOOKRESUME opt_psm.h
PSM_RESETAFTERSUSPEND opt_psm.h
PSM_DEBUG opt_psm.h
DEV_ATPIC opt_atpic.h
+
+# Debugging
+KDB_STOP_NMI opt_global.h
diff --git a/sys/conf/options.i386 b/sys/conf/options.i386
index a1dfee6..405614f 100644
--- a/sys/conf/options.i386
+++ b/sys/conf/options.i386
@@ -160,3 +160,6 @@ IPR_LOG opt_i4b.h
DEV_APIC opt_apic.h
DEV_NPX opt_npx.h
ASR_COMPAT opt_asr.h
+
+# Debugging
+KDB_STOP_NMI opt_global.h
diff --git a/sys/i386/conf/NOTES b/sys/i386/conf/NOTES
index 3c34894..82b2c6a 100644
--- a/sys/i386/conf/NOTES
+++ b/sys/i386/conf/NOTES
@@ -51,6 +51,11 @@ options MPTABLE_FORCE_HTT # Enable HTT CPUs with the MP Table
#
options MP_WATCHDOG
+# Debugging options.
+#
+options KDB_STOP_NMI # Stop CPUS using NMI instead of IPI
+
+
#####################################################################
# CPU OPTIONS
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index d601f6d..2fc958c 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -175,6 +175,10 @@ vm_offset_t smp_tlb_addr1;
vm_offset_t smp_tlb_addr2;
volatile int smp_tlb_wait;
+#ifdef KDB_STOP_NMI
+volatile cpumask_t ipi_nmi_pending;
+#endif
+
/*
* Local data and functions.
*/
@@ -1191,6 +1195,76 @@ ipi_self(u_int ipi)
lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
}
+#ifdef KDB_STOP_NMI
+/*
+ * send NMI IPI to selected CPUs
+ */
+
+#define BEFORE_SPIN 1000000
+
+void
+ipi_nmi_selected(u_int32_t cpus)
+{
+
+ int cpu;
+ register_t icrlo;
+
+ icrlo = APIC_DELMODE_NMI | APIC_DESTMODE_PHY | APIC_LEVEL_ASSERT
+ | APIC_TRIGMOD_EDGE;
+
+ CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
+
+
+ atomic_set_int(&ipi_nmi_pending, cpus);
+
+
+ while ((cpu = ffs(cpus)) != 0) {
+ cpu--;
+ cpus &= ~(1 << cpu);
+
+ KASSERT(cpu_apic_ids[cpu] != -1,
+ ("IPI NMI to non-existent CPU %d", cpu));
+
+ /* Wait for an earlier IPI to finish. */
+ if (!lapic_ipi_wait(BEFORE_SPIN))
+ panic("ipi_nmi_selected: previous IPI has not cleared");
+
+ lapic_ipi_raw(icrlo,cpu_apic_ids[cpu]);
+ }
+}
+
+
+int
+ipi_nmi_handler()
+{
+ int cpu = PCPU_GET(cpuid);
+
+ if(!(atomic_load_acq_int(&ipi_nmi_pending) & (1 << cpu)))
+ return 1;
+
+ atomic_clear_int(&ipi_nmi_pending,1 << cpu);
+
+ savectx(&stoppcbs[cpu]);
+
+ /* Indicate that we are stopped */
+ atomic_set_int(&stopped_cpus,1 << cpu);
+
+
+ /* Wait for restart */
+ while(!(atomic_load_acq_int(&started_cpus) & (1 << cpu)))
+ ia32_pause();
+
+ atomic_clear_int(&started_cpus,1 << cpu);
+ atomic_clear_int(&stopped_cpus,1 << cpu);
+
+ if(cpu == 0 && cpustop_restartfunc != NULL)
+ cpustop_restartfunc();
+
+ return 0;
+}
+
+#endif /* KDB_STOP_NMI */
+
/*
* This is called once the rest of the system is up and running and we're
* ready to let the AP's out of the pen.
diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c
index ae2b4ed..2bca068 100644
--- a/sys/i386/i386/trap.c
+++ b/sys/i386/i386/trap.c
@@ -183,6 +183,14 @@ trap(frame)
PCPU_LAZY_INC(cnt.v_trap);
type = frame.tf_trapno;
+#ifdef KDB_STOP_NMI
+ /* Handler for NMI IPIs used for debugging */
+ if (type == T_NMI) {
+ if (ipi_nmi_handler() == 0)
+ goto out;
+ }
+#endif /* KDB_STOP_NMI */
+
#ifdef KDB
if (kdb_active) {
kdb_reenter();
diff --git a/sys/i386/include/smp.h b/sys/i386/include/smp.h
index 7837120..4c9ae76 100644
--- a/sys/i386/include/smp.h
+++ b/sys/i386/include/smp.h
@@ -79,6 +79,11 @@ void smp_masked_invlpg_range(u_int mask, vm_offset_t startva,
void smp_invltlb(void);
void smp_masked_invltlb(u_int mask);
+#ifdef KDB_STOP_NMI
+int ipi_nmi_handler(void);
+void ipi_nmi_selected(u_int32_t cpus);
+#endif
+
#endif /* !LOCORE */
#endif /* SMP */
diff --git a/sys/kern/subr_kdb.c b/sys/kern/subr_kdb.c
index 54274db..5726a84 100644
--- a/sys/kern/subr_kdb.c
+++ b/sys/kern/subr_kdb.c
@@ -40,6 +40,18 @@ __FBSDID("$FreeBSD$");
#include <machine/kdb.h>
#include <machine/pcb.h>
+#ifdef KDB_STOP_NMI
+#include <machine/smp.h>
+#endif
+
+/*
+ * KDB_STOP_NMI requires SMP to pick up the right dependencies
+ * (And isn't useful on UP anyway)
+ */
+#if defined(KDB_STOP_NMI) && !defined(SMP)
+#error "options KDB_STOP_NMI" requires "options SMP"
+#endif
+
int kdb_active = 0;
void *kdb_jmpbufp = NULL;
struct kdb_dbbe *kdb_dbbe = NULL;
@@ -77,6 +89,19 @@ static int kdb_stop_cpus = 1;
SYSCTL_INT(_debug_kdb, OID_AUTO, stop_cpus, CTLTYPE_INT | CTLFLAG_RW,
&kdb_stop_cpus, 0, "");
TUNABLE_INT("debug.kdb.stop_cpus", &kdb_stop_cpus);
+
+#ifdef KDB_STOP_NMI
+/*
+ * Provide an alternate method of stopping other CPUs. If another CPU has
+ * disabled interrupts the conventional STOP IPI will be blocked. This
+ * NMI-based stop should get through in that case.
+ */
+static int kdb_stop_cpus_with_nmi = 0;
+SYSCTL_INT(_debug_kdb, OID_AUTO, stop_cpus_with_nmi, CTLTYPE_INT | CTLFLAG_RW,
+ &kdb_stop_cpus_with_nmi, 0, "");
+TUNABLE_INT("debug.kdb.stop_cpus_with_nmi", &kdb_stop_cpus_with_nmi);
+#endif /* KDB_STOP_NMI */
+
#endif
static int
@@ -308,9 +333,27 @@ kdb_reenter(void)
struct pcb *
kdb_thr_ctx(struct thread *thr)
+#ifdef KDB_STOP_NMI
+{
+ u_int cpuid;
+ struct pcpu *pc;
+
+ if (thr == curthread)
+ return &kdb_pcb;
+
+ SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ cpuid = pc->pc_cpuid;
+ if (pc->pc_curthread == thr && (atomic_load_acq_int(&stopped_cpus) & (1 << cpuid)))
+ return &stoppcbs[cpuid];
+ }
+
+ return thr->td_pcb;
+}
+#else
{
return ((thr == curthread) ? &kdb_pcb : thr->td_pcb);
}
+#endif /* KDB_STOP_NMI */
struct thread *
kdb_thr_first(void)
@@ -407,7 +450,14 @@ kdb_trap(int type, int code, struct trapframe *tf)
#ifdef SMP
if ((did_stop_cpus = kdb_stop_cpus) != 0)
+ {
+#ifdef KDB_STOP_NMI
+ if(kdb_stop_cpus_with_nmi)
+ stop_cpus_nmi(PCPU_GET(other_cpus));
+ else
+#endif /* KDB_STOP_NMI */
stop_cpus(PCPU_GET(other_cpus));
+ }
#endif
kdb_frame = tf;
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index aed3118..3d08ad8 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -254,6 +254,35 @@ stop_cpus(cpumask_t map)
return 1;
}
+#ifdef KDB_STOP_NMI
+int
+stop_cpus_nmi(cpumask_t map)
+{
+ int i;
+
+ if (!smp_started)
+ return 0;
+
+ CTR1(KTR_SMP, "stop_cpus(%x)", map);
+
+ /* send the stop IPI to all CPUs in map */
+ ipi_nmi_selected(map);
+
+ i = 0;
+ while ((atomic_load_acq_int(&stopped_cpus) & map) != map) {
+ /* spin */
+ i++;
+#ifdef DIAGNOSTIC
+ if (i == 100000) {
+ printf("timeout stopping cpus\n");
+ break;
+ }
+#endif
+ }
+
+ return 1;
+}
+#endif /* KDB_STOP_NMI */
/*
* Called by a CPU to restart stopped CPUs.
diff --git a/sys/sys/smp.h b/sys/sys/smp.h
index 88353fc..88c6ae2 100644
--- a/sys/sys/smp.h
+++ b/sys/sys/smp.h
@@ -101,6 +101,10 @@ int restart_cpus(cpumask_t);
int stop_cpus(cpumask_t);
void smp_rendezvous_action(void);
extern struct mtx smp_ipi_mtx;
+
+#ifdef KDB_STOP_NMI
+int stop_cpus_nmi(cpumask_t);
+#endif
#endif /* SMP */
void smp_rendezvous(void (*)(void *),
void (*)(void *),
OpenPOWER on IntegriCloud