summaryrefslogtreecommitdiffstats
path: root/sys/x86
diff options
context:
space:
mode:
authormarkj <markj@FreeBSD.org>2015-09-11 03:54:37 +0000
committermarkj <markj@FreeBSD.org>2015-09-11 03:54:37 +0000
commite8967c8bd970aade474f6148fbbe4b0e6183751a (patch)
tree28426773e296f84e10d3d95e2a1489974f368b93 /sys/x86
parentb30bc0e211f2c169b84e83127d7d57de24fb688d (diff)
downloadFreeBSD-src-e8967c8bd970aade474f6148fbbe4b0e6183751a.zip
FreeBSD-src-e8967c8bd970aade474f6148fbbe4b0e6183751a.tar.gz
Add stack_save_td_running(), a function to trace the kernel stack of a
running thread. It is currently implemented only on amd64 and i386; on these architectures, it is implemented by raising an NMI on the CPU on which the target thread is currently running. Unlike stack_save_td(), it may fail, for example if the thread is running in user mode. This change also modifies the kern.proc.kstack sysctl to use this function, so that stacks of running threads are shown in the output of "procstat -kk". This is handy for debugging threads that are stuck in a busy loop. Reviewed by: bdrewery, jhb, kib Sponsored by: EMC / Isilon Storage Division Differential Revision: https://reviews.freebsd.org/D3256
Diffstat (limited to 'sys/x86')
-rw-r--r--sys/x86/include/apicvar.h4
-rw-r--r--sys/x86/include/stack.h4
-rw-r--r--sys/x86/x86/local_apic.c7
-rw-r--r--sys/x86/x86/mp_x86.c12
-rw-r--r--sys/x86/x86/stack_machdep.c68
5 files changed, 82 insertions, 13 deletions
diff --git a/sys/x86/include/apicvar.h b/sys/x86/include/apicvar.h
index 0bd9fe5..58fcced 100644
--- a/sys/x86/include/apicvar.h
+++ b/sys/x86/include/apicvar.h
@@ -129,12 +129,14 @@
#else
#define IPI_DYN_FIRST (APIC_IPI_INTS + 8)
#endif
-#define IPI_DYN_LAST (254) /* IPIs allocated at runtime */
+#define IPI_DYN_LAST (253) /* IPIs allocated at runtime */
/*
* IPI_STOP_HARD does not need to occupy a slot in the IPI vector space since
* it is delivered using an NMI anyways.
*/
+#define IPI_NMI_FIRST 254
+#define IPI_TRACE 254 /* Interrupt for tracing. */
#define IPI_STOP_HARD 255 /* Stop CPU with a NMI. */
/*
diff --git a/sys/x86/include/stack.h b/sys/x86/include/stack.h
index 3489e42..7f6930a 100644
--- a/sys/x86/include/stack.h
+++ b/sys/x86/include/stack.h
@@ -54,4 +54,8 @@ struct i386_frame {
};
#endif /* __amd64__ */
+#ifdef _KERNEL
+int stack_nmi_handler(struct trapframe *);
+#endif
+
#endif /* !_X86_STACK_H */
diff --git a/sys/x86/x86/local_apic.c b/sys/x86/x86/local_apic.c
index 8198971..106a842 100644
--- a/sys/x86/x86/local_apic.c
+++ b/sys/x86/x86/local_apic.c
@@ -1703,11 +1703,10 @@ native_lapic_ipi_vectored(u_int vector, int dest)
icrlo = APIC_DESTMODE_PHY | APIC_TRIGMOD_EDGE | APIC_LEVEL_ASSERT;
/*
- * IPI_STOP_HARD is just a "fake" vector used to send a NMI.
- * Use special rules regard NMI if passed, otherwise specify
- * the vector.
+ * NMI IPIs are just fake vectors used to send a NMI. Use special rules
+ * regarding NMIs if passed, otherwise specify the vector.
*/
- if (vector == IPI_STOP_HARD)
+ if (vector >= IPI_NMI_FIRST)
icrlo |= APIC_DELMODE_NMI;
else
icrlo |= vector | APIC_DELMODE_FIXED;
diff --git a/sys/x86/x86/mp_x86.c b/sys/x86/x86/mp_x86.c
index c23108c..9e1cec2 100644
--- a/sys/x86/x86/mp_x86.c
+++ b/sys/x86/x86/mp_x86.c
@@ -120,7 +120,7 @@ struct cpu_ops cpu_ops;
* Local data and functions.
*/
-static volatile cpuset_t ipi_nmi_pending;
+static volatile cpuset_t ipi_stop_nmi_pending;
/* used to hold the AP's until we are ready to release them */
struct mtx ap_boot_mtx;
@@ -894,7 +894,7 @@ ipi_selected(cpuset_t cpus, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- CPU_OR_ATOMIC(&ipi_nmi_pending, &cpus);
+ CPU_OR_ATOMIC(&ipi_stop_nmi_pending, &cpus);
while ((cpu = CPU_FFS(&cpus)) != 0) {
cpu--;
@@ -917,7 +917,7 @@ ipi_cpu(int cpu, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- CPU_SET_ATOMIC(cpu, &ipi_nmi_pending);
+ CPU_SET_ATOMIC(cpu, &ipi_stop_nmi_pending);
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
ipi_send_cpu(cpu, ipi);
@@ -944,7 +944,7 @@ ipi_all_but_self(u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- CPU_OR_ATOMIC(&ipi_nmi_pending, &other_cpus);
+ CPU_OR_ATOMIC(&ipi_stop_nmi_pending, &other_cpus);
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
@@ -962,10 +962,10 @@ ipi_nmi_handler()
* and should be handled.
*/
cpuid = PCPU_GET(cpuid);
- if (!CPU_ISSET(cpuid, &ipi_nmi_pending))
+ if (!CPU_ISSET(cpuid, &ipi_stop_nmi_pending))
return (1);
- CPU_CLR_ATOMIC(cpuid, &ipi_nmi_pending);
+ CPU_CLR_ATOMIC(cpuid, &ipi_stop_nmi_pending);
cpustop_handler();
return (0);
}
diff --git a/sys/x86/x86/stack_machdep.c b/sys/x86/x86/stack_machdep.c
index 3ebf5a9..a56d423 100644
--- a/sys/x86/x86/stack_machdep.c
+++ b/sys/x86/x86/stack_machdep.c
@@ -1,4 +1,5 @@
/*-
+ * Copyright (c) 2015 EMC Corporation
* Copyright (c) 2005 Antoine Brodin
* All rights reserved.
*
@@ -29,17 +30,21 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/stack.h>
-#include <x86/stack.h>
-
#include <machine/pcb.h>
+#include <machine/smp.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
+#include <x86/stack.h>
+
#ifdef __i386__
#define PCB_FP(pcb) ((pcb)->pcb_ebp)
#define TF_FP(tf) ((tf)->tf_ebp)
@@ -54,6 +59,14 @@ typedef struct i386_frame *x86_frame_t;
typedef struct amd64_frame *x86_frame_t;
#endif
+static struct stack *nmi_stack;
+static volatile struct thread *nmi_pending;
+
+#ifdef SMP
+static struct mtx nmi_lock;
+MTX_SYSINIT(nmi_lock, &nmi_lock, "stack_nmi", MTX_SPIN);
+#endif
+
static void
stack_capture(struct thread *td, struct stack *st, register_t fp)
{
@@ -78,6 +91,24 @@ stack_capture(struct thread *td, struct stack *st, register_t fp)
}
}
+int
+stack_nmi_handler(struct trapframe *tf)
+{
+
+ /* Don't consume an NMI that wasn't meant for us. */
+ if (nmi_stack == NULL || curthread != nmi_pending)
+ return (0);
+
+ if (INKERNEL(TF_PC(tf)))
+ stack_capture(curthread, nmi_stack, TF_FP(tf));
+ else
+ /* We interrupted a thread in user mode. */
+ nmi_stack->depth = 0;
+
+ atomic_store_rel_ptr((long *)&nmi_pending, (long)NULL);
+ return (1);
+}
+
void
stack_save_td(struct stack *st, struct thread *td)
{
@@ -90,6 +121,39 @@ stack_save_td(struct stack *st, struct thread *td)
stack_capture(td, st, PCB_FP(td->td_pcb));
}
+int
+stack_save_td_running(struct stack *st, struct thread *td)
+{
+
+ THREAD_LOCK_ASSERT(td, MA_OWNED);
+ MPASS(TD_IS_RUNNING(td));
+
+ if (td == curthread) {
+ stack_save(st);
+ return (0);
+ }
+
+#ifdef SMP
+ mtx_lock_spin(&nmi_lock);
+
+ nmi_stack = st;
+ nmi_pending = td;
+ ipi_cpu(td->td_oncpu, IPI_TRACE);
+ while ((void *)atomic_load_acq_ptr((long *)&nmi_pending) != NULL)
+ cpu_spinwait();
+ nmi_stack = NULL;
+
+ mtx_unlock_spin(&nmi_lock);
+
+ if (st->depth == 0)
+ /* We interrupted a thread in user mode. */
+ return (EAGAIN);
+#else
+ KASSERT(0, ("curthread isn't running"));
+#endif
+ return (0);
+}
+
void
stack_save(struct stack *st)
{
OpenPOWER on IntegriCloud