Thomas Gleixner | 1ccea77 | 2019-05-19 15:51:43 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 2 | /* |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 3 | * printk_safe.c - Safe printk for printk-deadlock-prone contexts |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/preempt.h> |
| 7 | #include <linux/spinlock.h> |
Petr Mladek | cf9b110 | 2016-05-20 17:00:42 -0700 | [diff] [blame] | 8 | #include <linux/debug_locks.h> |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 9 | #include <linux/smp.h> |
| 10 | #include <linux/cpumask.h> |
| 11 | #include <linux/irq_work.h> |
| 12 | #include <linux/printk.h> |
| 13 | |
| 14 | #include "internal.h" |
| 15 | |
| 16 | /* |
| 17 | * printk() could not take logbuf_lock in NMI context. Instead, |
| 18 | * it uses an alternative implementation that temporary stores |
| 19 | * the strings into a per-CPU buffer. The content of the buffer |
| 20 | * is later flushed into the main ring buffer via IRQ work. |
| 21 | * |
| 22 | * The alternative implementation is chosen transparently |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 23 | * by examinig current printk() context mask stored in @printk_context |
| 24 | * per-CPU variable. |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 25 | * |
| 26 | * The implementation allows to flush the strings also from another CPU. |
| 27 | * There are situations when we want to make sure that all buffers |
| 28 | * were handled or when IRQs are blocked. |
| 29 | */ |
Steven Rostedt (VMware) | af41acf | 2017-10-11 12:46:47 -0400 | [diff] [blame] | 30 | static int printk_safe_irq_ready __read_mostly; |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 31 | |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 32 | #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \ |
Sergey Senozhatsky | ddb9baa | 2016-12-27 23:16:08 +0900 | [diff] [blame] | 33 | sizeof(atomic_t) - \ |
| 34 | sizeof(atomic_t) - \ |
| 35 | sizeof(struct irq_work)) |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 36 | |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 37 | struct printk_safe_seq_buf { |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 38 | atomic_t len; /* length of written data */ |
Sergey Senozhatsky | ddb9baa | 2016-12-27 23:16:08 +0900 | [diff] [blame] | 39 | atomic_t message_lost; |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 40 | struct irq_work work; /* IRQ work that flushes the buffer */ |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 41 | unsigned char buffer[SAFE_LOG_BUF_LEN]; |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 42 | }; |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 43 | |
| 44 | static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq); |
| 45 | static DEFINE_PER_CPU(int, printk_context); |
| 46 | |
| 47 | #ifdef CONFIG_PRINTK_NMI |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 48 | static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq); |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 49 | #endif |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 50 | |
Sergey Senozhatsky | ddb9baa | 2016-12-27 23:16:08 +0900 | [diff] [blame] | 51 | /* Get flushed in a more safe context. */ |
| 52 | static void queue_flush_work(struct printk_safe_seq_buf *s) |
| 53 | { |
Steven Rostedt (VMware) | af41acf | 2017-10-11 12:46:47 -0400 | [diff] [blame] | 54 | if (printk_safe_irq_ready) |
Sergey Senozhatsky | ddb9baa | 2016-12-27 23:16:08 +0900 | [diff] [blame] | 55 | irq_work_queue(&s->work); |
Sergey Senozhatsky | ddb9baa | 2016-12-27 23:16:08 +0900 | [diff] [blame] | 56 | } |
| 57 | |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 58 | /* |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 59 | * Add a message to per-CPU context-dependent buffer. NMI and printk-safe |
| 60 | * have dedicated buffers, because otherwise printk-safe preempted by |
| 61 | * NMI-printk would have overwritten the NMI messages. |
| 62 | * |
Baoquan He | bc82936 | 2017-10-22 22:30:55 +0800 | [diff] [blame] | 63 | * The messages are flushed from irq work (or from panic()), possibly, |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 64 | * from other CPU, concurrently with printk_safe_log_store(). Should this |
| 65 | * happen, printk_safe_log_store() will notice the buffer->len mismatch |
| 66 | * and repeat the write. |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 67 | */ |
Nicolas Iooss | f4e981c | 2017-05-24 07:49:50 +0200 | [diff] [blame] | 68 | static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s, |
| 69 | const char *fmt, va_list args) |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 70 | { |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 71 | int add; |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 72 | size_t len; |
Tetsuo Handa | 988a35f | 2018-05-11 19:54:19 +0900 | [diff] [blame] | 73 | va_list ap; |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 74 | |
| 75 | again: |
| 76 | len = atomic_read(&s->len); |
| 77 | |
Petr Mladek | 4a998e3 | 2016-12-12 16:45:40 -0800 | [diff] [blame] | 78 | /* The trailing '\0' is not counted into len. */ |
| 79 | if (len >= sizeof(s->buffer) - 1) { |
Sergey Senozhatsky | ddb9baa | 2016-12-27 23:16:08 +0900 | [diff] [blame] | 80 | atomic_inc(&s->message_lost); |
| 81 | queue_flush_work(s); |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 82 | return 0; |
Petr Mladek | b522dea | 2016-05-20 17:00:36 -0700 | [diff] [blame] | 83 | } |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 84 | |
| 85 | /* |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 86 | * Make sure that all old data have been read before the buffer |
| 87 | * was reset. This is not needed when we just append data. |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 88 | */ |
| 89 | if (!len) |
| 90 | smp_rmb(); |
| 91 | |
Tetsuo Handa | 988a35f | 2018-05-11 19:54:19 +0900 | [diff] [blame] | 92 | va_copy(ap, args); |
| 93 | add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap); |
| 94 | va_end(ap); |
Sergey Senozhatsky | ddb9baa | 2016-12-27 23:16:08 +0900 | [diff] [blame] | 95 | if (!add) |
| 96 | return 0; |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 97 | |
| 98 | /* |
| 99 | * Do it once again if the buffer has been flushed in the meantime. |
| 100 | * Note that atomic_cmpxchg() is an implicit memory barrier that |
| 101 | * makes sure that the data were written before updating s->len. |
| 102 | */ |
| 103 | if (atomic_cmpxchg(&s->len, len, len + add) != len) |
| 104 | goto again; |
| 105 | |
Sergey Senozhatsky | ddb9baa | 2016-12-27 23:16:08 +0900 | [diff] [blame] | 106 | queue_flush_work(s); |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 107 | return add; |
| 108 | } |
| 109 | |
Sergey Senozhatsky | 7acac34 | 2017-02-07 01:42:53 +0900 | [diff] [blame] | 110 | static inline void printk_safe_flush_line(const char *text, int len) |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 111 | { |
Petr Mladek | cf9b110 | 2016-05-20 17:00:42 -0700 | [diff] [blame] | 112 | /* |
Sergey Senozhatsky | 7acac34 | 2017-02-07 01:42:53 +0900 | [diff] [blame] | 113 | * Avoid any console drivers calls from here, because we may be |
| 114 | * in NMI or printk_safe context (when in panic). The messages |
| 115 | * must go only into the ring buffer at this stage. Consoles will |
| 116 | * get explicitly called later when a crashdump is not generated. |
Petr Mladek | cf9b110 | 2016-05-20 17:00:42 -0700 | [diff] [blame] | 117 | */ |
Sergey Senozhatsky | 7acac34 | 2017-02-07 01:42:53 +0900 | [diff] [blame] | 118 | printk_deferred("%.*s", len, text); |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 119 | } |
| 120 | |
Petr Mladek | 22c2c7b2 | 2016-12-12 16:45:44 -0800 | [diff] [blame] | 121 | /* printk part of the temporary buffer line by line */ |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 122 | static int printk_safe_flush_buffer(const char *start, size_t len) |
Sergey Senozhatsky | 19feeff | 2016-09-01 16:15:04 -0700 | [diff] [blame] | 123 | { |
Petr Mladek | 22c2c7b2 | 2016-12-12 16:45:44 -0800 | [diff] [blame] | 124 | const char *c, *end; |
| 125 | bool header; |
Sergey Senozhatsky | 19feeff | 2016-09-01 16:15:04 -0700 | [diff] [blame] | 126 | |
Petr Mladek | 22c2c7b2 | 2016-12-12 16:45:44 -0800 | [diff] [blame] | 127 | c = start; |
| 128 | end = start + len; |
| 129 | header = true; |
| 130 | |
| 131 | /* Print line by line. */ |
| 132 | while (c < end) { |
| 133 | if (*c == '\n') { |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 134 | printk_safe_flush_line(start, c - start + 1); |
Petr Mladek | 22c2c7b2 | 2016-12-12 16:45:44 -0800 | [diff] [blame] | 135 | start = ++c; |
| 136 | header = true; |
| 137 | continue; |
| 138 | } |
| 139 | |
| 140 | /* Handle continuous lines or missing new line. */ |
| 141 | if ((c + 1 < end) && printk_get_level(c)) { |
| 142 | if (header) { |
| 143 | c = printk_skip_level(c); |
| 144 | continue; |
| 145 | } |
| 146 | |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 147 | printk_safe_flush_line(start, c - start); |
Petr Mladek | 22c2c7b2 | 2016-12-12 16:45:44 -0800 | [diff] [blame] | 148 | start = c++; |
| 149 | header = true; |
| 150 | continue; |
| 151 | } |
| 152 | |
| 153 | header = false; |
| 154 | c++; |
| 155 | } |
| 156 | |
| 157 | /* Check if there was a partial line. Ignore pure header. */ |
| 158 | if (start < end && !header) { |
| 159 | static const char newline[] = KERN_CONT "\n"; |
| 160 | |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 161 | printk_safe_flush_line(start, end - start); |
| 162 | printk_safe_flush_line(newline, strlen(newline)); |
Petr Mladek | 22c2c7b2 | 2016-12-12 16:45:44 -0800 | [diff] [blame] | 163 | } |
| 164 | |
| 165 | return len; |
Sergey Senozhatsky | 19feeff | 2016-09-01 16:15:04 -0700 | [diff] [blame] | 166 | } |
| 167 | |
Sergey Senozhatsky | ddb9baa | 2016-12-27 23:16:08 +0900 | [diff] [blame] | 168 | static void report_message_lost(struct printk_safe_seq_buf *s) |
| 169 | { |
| 170 | int lost = atomic_xchg(&s->message_lost, 0); |
| 171 | |
| 172 | if (lost) |
| 173 | printk_deferred("Lost %d message(s)!\n", lost); |
| 174 | } |
| 175 | |
Sergey Senozhatsky | 19feeff | 2016-09-01 16:15:04 -0700 | [diff] [blame] | 176 | /* |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 177 | * Flush data from the associated per-CPU buffer. The function |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 178 | * can be called either via IRQ work or independently. |
| 179 | */ |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 180 | static void __printk_safe_flush(struct irq_work *work) |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 181 | { |
| 182 | static raw_spinlock_t read_lock = |
| 183 | __RAW_SPIN_LOCK_INITIALIZER(read_lock); |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 184 | struct printk_safe_seq_buf *s = |
| 185 | container_of(work, struct printk_safe_seq_buf, work); |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 186 | unsigned long flags; |
Petr Mladek | 22c2c7b2 | 2016-12-12 16:45:44 -0800 | [diff] [blame] | 187 | size_t len; |
| 188 | int i; |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 189 | |
| 190 | /* |
| 191 | * The lock has two functions. First, one reader has to flush all |
| 192 | * available message to make the lockless synchronization with |
| 193 | * writers easier. Second, we do not want to mix messages from |
| 194 | * different CPUs. This is especially important when printing |
| 195 | * a backtrace. |
| 196 | */ |
| 197 | raw_spin_lock_irqsave(&read_lock, flags); |
| 198 | |
| 199 | i = 0; |
| 200 | more: |
| 201 | len = atomic_read(&s->len); |
| 202 | |
| 203 | /* |
| 204 | * This is just a paranoid check that nobody has manipulated |
| 205 | * the buffer an unexpected way. If we printed something then |
Petr Mladek | 22c2c7b2 | 2016-12-12 16:45:44 -0800 | [diff] [blame] | 206 | * @len must only increase. Also it should never overflow the |
| 207 | * buffer size. |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 208 | */ |
Petr Mladek | 22c2c7b2 | 2016-12-12 16:45:44 -0800 | [diff] [blame] | 209 | if ((i && i >= len) || len > sizeof(s->buffer)) { |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 210 | const char *msg = "printk_safe_flush: internal error\n"; |
Sergey Senozhatsky | 19feeff | 2016-09-01 16:15:04 -0700 | [diff] [blame] | 211 | |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 212 | printk_safe_flush_line(msg, strlen(msg)); |
Petr Mladek | 22c2c7b2 | 2016-12-12 16:45:44 -0800 | [diff] [blame] | 213 | len = 0; |
Sergey Senozhatsky | 19feeff | 2016-09-01 16:15:04 -0700 | [diff] [blame] | 214 | } |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 215 | |
| 216 | if (!len) |
| 217 | goto out; /* Someone else has already flushed the buffer. */ |
| 218 | |
| 219 | /* Make sure that data has been written up to the @len */ |
| 220 | smp_rmb(); |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 221 | i += printk_safe_flush_buffer(s->buffer + i, len - i); |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 222 | |
| 223 | /* |
| 224 | * Check that nothing has got added in the meantime and truncate |
| 225 | * the buffer. Note that atomic_cmpxchg() is an implicit memory |
| 226 | * barrier that makes sure that the data were copied before |
| 227 | * updating s->len. |
| 228 | */ |
| 229 | if (atomic_cmpxchg(&s->len, len, 0) != len) |
| 230 | goto more; |
| 231 | |
| 232 | out: |
Sergey Senozhatsky | ddb9baa | 2016-12-27 23:16:08 +0900 | [diff] [blame] | 233 | report_message_lost(s); |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 234 | raw_spin_unlock_irqrestore(&read_lock, flags); |
| 235 | } |
| 236 | |
| 237 | /** |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 238 | * printk_safe_flush - flush all per-cpu nmi buffers. |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 239 | * |
| 240 | * The buffers are flushed automatically via IRQ work. This function |
| 241 | * is useful only when someone wants to be sure that all buffers have |
| 242 | * been flushed at some point. |
| 243 | */ |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 244 | void printk_safe_flush(void) |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 245 | { |
| 246 | int cpu; |
| 247 | |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 248 | for_each_possible_cpu(cpu) { |
| 249 | #ifdef CONFIG_PRINTK_NMI |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 250 | __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work); |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 251 | #endif |
| 252 | __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work); |
| 253 | } |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 254 | } |
| 255 | |
Petr Mladek | cf9b110 | 2016-05-20 17:00:42 -0700 | [diff] [blame] | 256 | /** |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 257 | * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system |
Petr Mladek | cf9b110 | 2016-05-20 17:00:42 -0700 | [diff] [blame] | 258 | * goes down. |
| 259 | * |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 260 | * Similar to printk_safe_flush() but it can be called even in NMI context when |
Petr Mladek | cf9b110 | 2016-05-20 17:00:42 -0700 | [diff] [blame] | 261 | * the system goes down. It does the best effort to get NMI messages into |
| 262 | * the main ring buffer. |
| 263 | * |
| 264 | * Note that it could try harder when there is only one CPU online. |
| 265 | */ |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 266 | void printk_safe_flush_on_panic(void) |
Petr Mladek | cf9b110 | 2016-05-20 17:00:42 -0700 | [diff] [blame] | 267 | { |
| 268 | /* |
| 269 | * Make sure that we could access the main ring buffer. |
| 270 | * Do not risk a double release when more CPUs are up. |
| 271 | */ |
Sergey Senozhatsky | 554755b | 2018-05-30 16:03:50 +0900 | [diff] [blame] | 272 | if (raw_spin_is_locked(&logbuf_lock)) { |
Petr Mladek | cf9b110 | 2016-05-20 17:00:42 -0700 | [diff] [blame] | 273 | if (num_online_cpus() > 1) |
| 274 | return; |
| 275 | |
| 276 | debug_locks_off(); |
| 277 | raw_spin_lock_init(&logbuf_lock); |
| 278 | } |
| 279 | |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 280 | printk_safe_flush(); |
Petr Mladek | cf9b110 | 2016-05-20 17:00:42 -0700 | [diff] [blame] | 281 | } |
| 282 | |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 283 | #ifdef CONFIG_PRINTK_NMI |
| 284 | /* |
| 285 | * Safe printk() for NMI context. It uses a per-CPU buffer to |
| 286 | * store the message. NMIs are not nested, so there is always only |
| 287 | * one writer running. But the buffer might get flushed from another |
| 288 | * CPU, so we need to be careful. |
| 289 | */ |
Nicolas Iooss | f4e981c | 2017-05-24 07:49:50 +0200 | [diff] [blame] | 290 | static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 291 | { |
| 292 | struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq); |
| 293 | |
| 294 | return printk_safe_log_store(s, fmt, args); |
| 295 | } |
| 296 | |
Steven Rostedt (VMware) | d1c392c | 2018-09-05 16:29:49 -0400 | [diff] [blame] | 297 | void notrace printk_nmi_enter(void) |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 298 | { |
Petr Mladek | 03fc7f9 | 2018-06-27 16:20:28 +0200 | [diff] [blame] | 299 | this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 300 | } |
| 301 | |
Steven Rostedt (VMware) | d1c392c | 2018-09-05 16:29:49 -0400 | [diff] [blame] | 302 | void notrace printk_nmi_exit(void) |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 303 | { |
Petr Mladek | 03fc7f9 | 2018-06-27 16:20:28 +0200 | [diff] [blame] | 304 | this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); |
| 305 | } |
| 306 | |
| 307 | /* |
| 308 | * Marks a code that might produce many messages in NMI context |
| 309 | * and the risk of losing them is more critical than eventual |
| 310 | * reordering. |
| 311 | * |
| 312 | * It has effect only when called in NMI context. Then printk() |
| 313 | * will try to store the messages into the main logbuf directly |
| 314 | * and use the per-CPU buffers only as a fallback when the lock |
| 315 | * is not available. |
| 316 | */ |
| 317 | void printk_nmi_direct_enter(void) |
| 318 | { |
| 319 | if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) |
| 320 | this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK); |
| 321 | } |
| 322 | |
| 323 | void printk_nmi_direct_exit(void) |
| 324 | { |
| 325 | this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK); |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 326 | } |
| 327 | |
| 328 | #else |
| 329 | |
Nicolas Iooss | f4e981c | 2017-05-24 07:49:50 +0200 | [diff] [blame] | 330 | static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 331 | { |
| 332 | return 0; |
| 333 | } |
| 334 | |
| 335 | #endif /* CONFIG_PRINTK_NMI */ |
| 336 | |
| 337 | /* |
| 338 | * Lock-less printk(), to avoid deadlocks should the printk() recurse |
| 339 | * into itself. It uses a per-CPU buffer to store the message, just like |
| 340 | * NMI. |
| 341 | */ |
Nicolas Iooss | f4e981c | 2017-05-24 07:49:50 +0200 | [diff] [blame] | 342 | static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args) |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 343 | { |
| 344 | struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq); |
| 345 | |
| 346 | return printk_safe_log_store(s, fmt, args); |
| 347 | } |
| 348 | |
| 349 | /* Can be preempted by NMI. */ |
| 350 | void __printk_safe_enter(void) |
| 351 | { |
| 352 | this_cpu_inc(printk_context); |
| 353 | } |
| 354 | |
| 355 | /* Can be preempted by NMI. */ |
| 356 | void __printk_safe_exit(void) |
| 357 | { |
| 358 | this_cpu_dec(printk_context); |
| 359 | } |
| 360 | |
| 361 | __printf(1, 0) int vprintk_func(const char *fmt, va_list args) |
| 362 | { |
Petr Mladek | 03fc7f9 | 2018-06-27 16:20:28 +0200 | [diff] [blame] | 363 | /* |
| 364 | * Try to use the main logbuf even in NMI. But avoid calling console |
| 365 | * drivers that might have their own locks. |
| 366 | */ |
| 367 | if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) && |
| 368 | raw_spin_trylock(&logbuf_lock)) { |
| 369 | int len; |
| 370 | |
| 371 | len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args); |
| 372 | raw_spin_unlock(&logbuf_lock); |
| 373 | defer_console_output(); |
| 374 | return len; |
| 375 | } |
| 376 | |
Petr Mladek | 719f6a7 | 2017-04-20 10:52:31 +0200 | [diff] [blame] | 377 | /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */ |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 378 | if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) |
| 379 | return vprintk_nmi(fmt, args); |
| 380 | |
Petr Mladek | 719f6a7 | 2017-04-20 10:52:31 +0200 | [diff] [blame] | 381 | /* Use extra buffer to prevent a recursion deadlock in safe mode. */ |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 382 | if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) |
| 383 | return vprintk_safe(fmt, args); |
| 384 | |
Petr Mladek | 719f6a7 | 2017-04-20 10:52:31 +0200 | [diff] [blame] | 385 | /* No obstacles. */ |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 386 | return vprintk_default(fmt, args); |
| 387 | } |
| 388 | |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 389 | void __init printk_safe_init(void) |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 390 | { |
| 391 | int cpu; |
| 392 | |
| 393 | for_each_possible_cpu(cpu) { |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 394 | struct printk_safe_seq_buf *s; |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 395 | |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 396 | s = &per_cpu(safe_print_seq, cpu); |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 397 | init_irq_work(&s->work, __printk_safe_flush); |
Sergey Senozhatsky | 099f1c8 | 2016-12-27 23:16:06 +0900 | [diff] [blame] | 398 | |
| 399 | #ifdef CONFIG_PRINTK_NMI |
| 400 | s = &per_cpu(nmi_print_seq, cpu); |
| 401 | init_irq_work(&s->work, __printk_safe_flush); |
| 402 | #endif |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 403 | } |
| 404 | |
Steven Rostedt (VMware) | af41acf | 2017-10-11 12:46:47 -0400 | [diff] [blame] | 405 | /* |
| 406 | * In the highly unlikely event that a NMI were to trigger at |
| 407 | * this moment. Make sure IRQ work is set up before this |
| 408 | * variable is set. |
| 409 | */ |
| 410 | barrier(); |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 411 | printk_safe_irq_ready = 1; |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 412 | |
| 413 | /* Flush pending messages that did not have scheduled IRQ works. */ |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 414 | printk_safe_flush(); |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 415 | } |