printk: report lost messages in printk safe/nmi contexts

Account lost messages in pritk-safe and printk-safe-nmi
contexts and report those numbers during printk_safe_flush().

The patch also moves lost message counter to struct
`printk_safe_seq_buf' instead of having dedicated static
counters - this simplifies the code.

Link: http://lkml.kernel.org/r/20161227141611.940-6-sergey.senozhatsky@gmail.com
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Calvin Owens <calvinowens@fb.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
This commit is contained in:
Sergey Senozhatsky 2016-12-27 23:16:08 +09:00 committed by Petr Mladek
parent 7acac3445a
commit ddb9baa822
3 changed files with 28 additions and 37 deletions

View File

@ -16,23 +16,6 @@
*/
#include <linux/percpu.h>
#ifdef CONFIG_PRINTK_NMI
extern atomic_t nmi_message_lost;
static inline int get_nmi_message_lost(void)
{
return atomic_xchg(&nmi_message_lost, 0);
}
#else /* CONFIG_PRINTK_NMI */
static inline int get_nmi_message_lost(void)
{
return 0;
}
#endif /* CONFIG_PRINTK_NMI */
#ifdef CONFIG_PRINTK
#define PRINTK_SAFE_CONTEXT_MASK 0x7fffffff

View File

@ -1677,7 +1677,6 @@ asmlinkage int vprintk_emit(int facility, int level,
unsigned long flags;
int this_cpu;
int printed_len = 0;
int nmi_message_lost;
bool in_sched = false;
/* cpu currently holding logbuf_lock in this function */
static unsigned int logbuf_cpu = UINT_MAX;
@ -1728,15 +1727,6 @@ asmlinkage int vprintk_emit(int facility, int level,
strlen(recursion_msg));
}
nmi_message_lost = get_nmi_message_lost();
if (unlikely(nmi_message_lost)) {
text_len = scnprintf(textbuf, sizeof(textbuf),
"BAD LUCK: lost %d message(s) from NMI context!",
nmi_message_lost);
printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
NULL, 0, textbuf, text_len);
}
/*
* The printf needs to come first; we need the syslog
* prefix which might be passed-in as a parameter.

View File

@ -40,13 +40,15 @@
* were handled or when IRQs are blocked.
*/
static int printk_safe_irq_ready;
atomic_t nmi_message_lost;
#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
sizeof(atomic_t) - sizeof(struct irq_work))
sizeof(atomic_t) - \
sizeof(atomic_t) - \
sizeof(struct irq_work))
struct printk_safe_seq_buf {
atomic_t len; /* length of written data */
atomic_t message_lost;
struct irq_work work; /* IRQ work that flushes the buffer */
unsigned char buffer[SAFE_LOG_BUF_LEN];
};
@ -58,6 +60,16 @@ static DEFINE_PER_CPU(int, printk_context);
static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
#endif
/* Get flushed in a more safe context. */
static void queue_flush_work(struct printk_safe_seq_buf *s)
{
if (printk_safe_irq_ready) {
/* Make sure that IRQ work is really initialized. */
smp_rmb();
irq_work_queue(&s->work);
}
}
/*
* Add a message to per-CPU context-dependent buffer. NMI and printk-safe
* have dedicated buffers, because otherwise printk-safe preempted by
@ -79,7 +91,8 @@ static int printk_safe_log_store(struct printk_safe_seq_buf *s,
/* The trailing '\0' is not counted into len. */
if (len >= sizeof(s->buffer) - 1) {
atomic_inc(&nmi_message_lost);
atomic_inc(&s->message_lost);
queue_flush_work(s);
return 0;
}
@ -91,6 +104,8 @@ static int printk_safe_log_store(struct printk_safe_seq_buf *s,
smp_rmb();
add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
if (!add)
return 0;
/*
* Do it once again if the buffer has been flushed in the meantime.
@ -100,13 +115,7 @@ static int printk_safe_log_store(struct printk_safe_seq_buf *s,
if (atomic_cmpxchg(&s->len, len, len + add) != len)
goto again;
/* Get flushed in a more safe context. */
if (add && printk_safe_irq_ready) {
/* Make sure that IRQ work is really initialized. */
smp_rmb();
irq_work_queue(&s->work);
}
queue_flush_work(s);
return add;
}
@ -168,6 +177,14 @@ static int printk_safe_flush_buffer(const char *start, size_t len)
return len;
}
static void report_message_lost(struct printk_safe_seq_buf *s)
{
int lost = atomic_xchg(&s->message_lost, 0);
if (lost)
printk_deferred("Lost %d message(s)!\n", lost);
}
/*
* Flush data from the associated per-CPU buffer. The function
* can be called either via IRQ work or independently.
@ -225,6 +242,7 @@ static void __printk_safe_flush(struct irq_work *work)
goto more;
out:
report_message_lost(s);
raw_spin_unlock_irqrestore(&read_lock, flags);
}