summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kexec_core.c1
-rw-r--r--kernel/panic.c6
-rw-r--r--kernel/printk/internal.h2
-rw-r--r--kernel/printk/nmi.c39
-rw-r--r--kernel/printk/printk.c2
5 files changed, 47 insertions, 3 deletions
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 1c03dfb..d5d4082 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -893,6 +893,7 @@ void crash_kexec(struct pt_regs *regs)
old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
if (old_cpu == PANIC_CPU_INVALID) {
/* This is the 1st CPU which comes here, so go ahead. */
+ printk_nmi_flush_on_panic();
__crash_kexec(regs);
/*
diff --git a/kernel/panic.c b/kernel/panic.c
index 535c965..8aa7449 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -160,8 +160,10 @@ void panic(const char *fmt, ...)
*
* Bypass the panic_cpu check and call __crash_kexec directly.
*/
- if (!crash_kexec_post_notifiers)
+ if (!crash_kexec_post_notifiers) {
+ printk_nmi_flush_on_panic();
__crash_kexec(NULL);
+ }
/*
* Note smp_send_stop is the usual smp shutdown function, which
@@ -176,6 +178,8 @@ void panic(const char *fmt, ...)
*/
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
+ /* Call flush even twice. It tries harder with a single online CPU */
+ printk_nmi_flush_on_panic();
kmsg_dump(KMSG_DUMP_PANIC);
/*
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index 341bedc..7fd2838 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -22,6 +22,8 @@ int __printf(1, 0) vprintk_default(const char *fmt, va_list args);
#ifdef CONFIG_PRINTK_NMI
+extern raw_spinlock_t logbuf_lock;
+
/*
* printk() could not take logbuf_lock in NMI context. Instead,
* it temporary stores the strings into a per-CPU buffer.
diff --git a/kernel/printk/nmi.c b/kernel/printk/nmi.c
index bf08557..b69eb8a 100644
--- a/kernel/printk/nmi.c
+++ b/kernel/printk/nmi.c
@@ -17,6 +17,7 @@
#include <linux/preempt.h>
#include <linux/spinlock.h>
+#include <linux/debug_locks.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/irq_work.h>
@@ -106,7 +107,16 @@ static void print_nmi_seq_line(struct nmi_seq_buf *s, int start, int end)
{
const char *buf = s->buffer + start;
- printk("%.*s", (end - start) + 1, buf);
+ /*
+ * The buffers are flushed in NMI only on panic. The messages must
+ * go only into the ring buffer at this stage. Consoles will get
+ * explicitly called later when a crashdump is not generated.
+ */
+ if (in_nmi())
+ printk_deferred("%.*s", (end - start) + 1, buf);
+ else
+ printk("%.*s", (end - start) + 1, buf);
+
}
/*
@@ -194,6 +204,33 @@ void printk_nmi_flush(void)
__printk_nmi_flush(&per_cpu(nmi_print_seq, cpu).work);
}
+/**
+ * printk_nmi_flush_on_panic - flush all per-cpu nmi buffers when the system
+ * goes down.
+ *
+ * Similar to printk_nmi_flush() but it can be called even in NMI context when
+ * the system goes down. It does the best effort to get NMI messages into
+ * the main ring buffer.
+ *
+ * Note that it could try harder when there is only one CPU online.
+ */
+void printk_nmi_flush_on_panic(void)
+{
+ /*
+ * Make sure that we could access the main ring buffer.
+ * Do not risk a double release when more CPUs are up.
+ */
+ if (in_nmi() && raw_spin_is_locked(&logbuf_lock)) {
+ if (num_online_cpus() > 1)
+ return;
+
+ debug_locks_off();
+ raw_spin_lock_init(&logbuf_lock);
+ }
+
+ printk_nmi_flush();
+}
+
void __init printk_nmi_init(void)
{
int cpu;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index e38579d..60cdf63 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -245,7 +245,7 @@ __packed __aligned(4)
* within the scheduler's rq lock. It must be released before calling
* console_unlock() or anything else that might wake up a process.
*/
-static DEFINE_RAW_SPINLOCK(logbuf_lock);
+DEFINE_RAW_SPINLOCK(logbuf_lock);
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
OpenPOWER on IntegriCloud