|
@@ -1,5 +1,5 @@
|
|
|
/*
|
|
|
- * printk_safe.c - Safe printk in NMI context
|
|
|
+ * printk_safe.c - Safe printk for printk-deadlock-prone contexts
|
|
|
*
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
* modify it under the terms of the GNU General Public License
|
|
@@ -32,13 +32,13 @@
|
|
|
* is later flushed into the main ring buffer via IRQ work.
|
|
|
*
|
|
|
* The alternative implementation is chosen transparently
|
|
|
- * via @printk_func per-CPU variable.
|
|
|
+ * by examinig current printk() context mask stored in @printk_context
|
|
|
+ * per-CPU variable.
|
|
|
*
|
|
|
* The implementation allows to flush the strings also from another CPU.
|
|
|
* There are situations when we want to make sure that all buffers
|
|
|
* were handled or when IRQs are blocked.
|
|
|
*/
|
|
|
-DEFINE_PER_CPU(printk_func_t, printk_func) = vprintk_default;
|
|
|
static int printk_safe_irq_ready;
|
|
|
atomic_t nmi_message_lost;
|
|
|
|
|
@@ -50,18 +50,28 @@ struct printk_safe_seq_buf {
|
|
|
struct irq_work work; /* IRQ work that flushes the buffer */
|
|
|
unsigned char buffer[SAFE_LOG_BUF_LEN];
|
|
|
};
|
|
|
+
|
|
|
+static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
|
|
|
+static DEFINE_PER_CPU(int, printk_context);
|
|
|
+
|
|
|
+#ifdef CONFIG_PRINTK_NMI
|
|
|
static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
|
|
|
+#endif
|
|
|
|
|
|
/*
|
|
|
- * Safe printk() for NMI context. It uses a per-CPU buffer to
|
|
|
- * store the message. NMIs are not nested, so there is always only
|
|
|
- * one writer running. But the buffer might get flushed from another
|
|
|
- * CPU, so we need to be careful.
|
|
|
+ * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
|
|
|
+ * have dedicated buffers, because otherwise printk-safe preempted by
|
|
|
+ * NMI-printk would have overwritten the NMI messages.
|
|
|
+ *
|
|
|
+ * The messages are fushed from irq work (or from panic()), possibly,
|
|
|
+ * from other CPU, concurrently with printk_safe_log_store(). Should this
|
|
|
+ * happen, printk_safe_log_store() will notice the buffer->len mismatch
|
|
|
+ * and repeat the write.
|
|
|
*/
|
|
|
-static int vprintk_nmi(const char *fmt, va_list args)
|
|
|
+static int printk_safe_log_store(struct printk_safe_seq_buf *s,
|
|
|
+ const char *fmt, va_list args)
|
|
|
{
|
|
|
- struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
|
|
|
- int add = 0;
|
|
|
+ int add;
|
|
|
size_t len;
|
|
|
|
|
|
again:
|
|
@@ -74,8 +84,8 @@ again:
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Make sure that all old data have been read before the buffer was
|
|
|
- * reseted. This is not needed when we just append data.
|
|
|
+ * Make sure that all old data have been read before the buffer
|
|
|
+ * was reset. This is not needed when we just append data.
|
|
|
*/
|
|
|
if (!len)
|
|
|
smp_rmb();
|
|
@@ -161,7 +171,7 @@ static int printk_safe_flush_buffer(const char *start, size_t len)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Flush data from the associated per_CPU buffer. The function
|
|
|
+ * Flush data from the associated per-CPU buffer. The function
|
|
|
* can be called either via IRQ work or independently.
|
|
|
*/
|
|
|
static void __printk_safe_flush(struct irq_work *work)
|
|
@@ -231,8 +241,12 @@ void printk_safe_flush(void)
|
|
|
{
|
|
|
int cpu;
|
|
|
|
|
|
- for_each_possible_cpu(cpu)
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+#ifdef CONFIG_PRINTK_NMI
|
|
|
__printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
|
|
|
+#endif
|
|
|
+ __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -262,14 +276,88 @@ void printk_safe_flush_on_panic(void)
|
|
|
printk_safe_flush();
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_PRINTK_NMI
|
|
|
+/*
|
|
|
+ * Safe printk() for NMI context. It uses a per-CPU buffer to
|
|
|
+ * store the message. NMIs are not nested, so there is always only
|
|
|
+ * one writer running. But the buffer might get flushed from another
|
|
|
+ * CPU, so we need to be careful.
|
|
|
+ */
|
|
|
+static int vprintk_nmi(const char *fmt, va_list args)
|
|
|
+{
|
|
|
+ struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
|
|
|
+
|
|
|
+ return printk_safe_log_store(s, fmt, args);
|
|
|
+}
|
|
|
+
|
|
|
+void printk_nmi_enter(void)
|
|
|
+{
|
|
|
+ this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
|
|
|
+}
|
|
|
+
|
|
|
+void printk_nmi_exit(void)
|
|
|
+{
|
|
|
+ this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
|
|
|
+}
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
+static int vprintk_nmi(const char *fmt, va_list args)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+#endif /* CONFIG_PRINTK_NMI */
|
|
|
+
|
|
|
+/*
|
|
|
+ * Lock-less printk(), to avoid deadlocks should the printk() recurse
|
|
|
+ * into itself. It uses a per-CPU buffer to store the message, just like
|
|
|
+ * NMI.
|
|
|
+ */
|
|
|
+static int vprintk_safe(const char *fmt, va_list args)
|
|
|
+{
|
|
|
+ struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
|
|
|
+
|
|
|
+ return printk_safe_log_store(s, fmt, args);
|
|
|
+}
|
|
|
+
|
|
|
+/* Can be preempted by NMI. */
|
|
|
+void __printk_safe_enter(void)
|
|
|
+{
|
|
|
+ this_cpu_inc(printk_context);
|
|
|
+}
|
|
|
+
|
|
|
+/* Can be preempted by NMI. */
|
|
|
+void __printk_safe_exit(void)
|
|
|
+{
|
|
|
+ this_cpu_dec(printk_context);
|
|
|
+}
|
|
|
+
|
|
|
+__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
|
|
|
+{
|
|
|
+ if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
|
|
|
+ return vprintk_nmi(fmt, args);
|
|
|
+
|
|
|
+ if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
|
|
|
+ return vprintk_safe(fmt, args);
|
|
|
+
|
|
|
+ return vprintk_default(fmt, args);
|
|
|
+}
|
|
|
+
|
|
|
void __init printk_safe_init(void)
|
|
|
{
|
|
|
int cpu;
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
- struct printk_safe_seq_buf *s = &per_cpu(nmi_print_seq, cpu);
|
|
|
+ struct printk_safe_seq_buf *s;
|
|
|
+
|
|
|
+ s = &per_cpu(safe_print_seq, cpu);
|
|
|
+ init_irq_work(&s->work, __printk_safe_flush);
|
|
|
|
|
|
+#ifdef CONFIG_PRINTK_NMI
|
|
|
+ s = &per_cpu(nmi_print_seq, cpu);
|
|
|
init_irq_work(&s->work, __printk_safe_flush);
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
/* Make sure that IRQ works are initialized before enabling. */
|
|
@@ -279,13 +367,3 @@ void __init printk_safe_init(void)
|
|
|
/* Flush pending messages that did not have scheduled IRQ works. */
|
|
|
printk_safe_flush();
|
|
|
}
|
|
|
-
|
|
|
-void printk_nmi_enter(void)
|
|
|
-{
|
|
|
- this_cpu_write(printk_func, vprintk_nmi);
|
|
|
-}
|
|
|
-
|
|
|
-void printk_nmi_exit(void)
|
|
|
-{
|
|
|
- this_cpu_write(printk_func, vprintk_default);
|
|
|
-}
|