Thomas Gleixner | 9c92ab6 | 2019-05-29 07:17:56 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Anton Vorontsov | 060287b | 2012-07-09 17:10:41 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 2012 Google, Inc. |
Anton Vorontsov | 060287b | 2012-07-09 17:10:41 -0700 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/compiler.h> |
| 8 | #include <linux/irqflags.h> |
| 9 | #include <linux/percpu.h> |
| 10 | #include <linux/smp.h> |
| 11 | #include <linux/atomic.h> |
Anton Vorontsov | 65f8c95 | 2012-07-17 14:26:15 -0700 | [diff] [blame] | 12 | #include <linux/types.h> |
| 13 | #include <linux/mutex.h> |
| 14 | #include <linux/ftrace.h> |
| 15 | #include <linux/fs.h> |
| 16 | #include <linux/debugfs.h> |
| 17 | #include <linux/err.h> |
| 18 | #include <linux/cache.h> |
Kees Cook | 16a5830 | 2020-05-08 08:42:12 -0700 | [diff] [blame] | 19 | #include <linux/slab.h> |
Anton Vorontsov | 060287b | 2012-07-09 17:10:41 -0700 | [diff] [blame] | 20 | #include <asm/barrier.h> |
| 21 | #include "internal.h" |
| 22 | |
Joel Fernandes | fbccdeb | 2016-10-20 00:34:05 -0700 | [diff] [blame] | 23 | /* This doesn't need to be atomic: speed is chosen over correctness here. */ |
| 24 | static u64 pstore_ftrace_stamp; |
| 25 | |
Anton Vorontsov | 65f8c95 | 2012-07-17 14:26:15 -0700 | [diff] [blame] | 26 | static void notrace pstore_ftrace_call(unsigned long ip, |
Anton Vorontsov | ebacfd1 | 2012-11-14 18:48:15 -0800 | [diff] [blame] | 27 | unsigned long parent_ip, |
| 28 | struct ftrace_ops *op, |
Steven Rostedt (VMware) | d19ad07 | 2020-10-28 17:42:17 -0400 | [diff] [blame] | 29 | struct ftrace_regs *fregs) |
Anton Vorontsov | 060287b | 2012-07-09 17:10:41 -0700 | [diff] [blame] | 30 | { |
Steven Rostedt (VMware) | 6cdf941 | 2020-11-05 21:32:39 -0500 | [diff] [blame] | 31 | int bit; |
Anton Vorontsov | 65f8c95 | 2012-07-17 14:26:15 -0700 | [diff] [blame] | 32 | unsigned long flags; |
Anton Vorontsov | 060287b | 2012-07-09 17:10:41 -0700 | [diff] [blame] | 33 | struct pstore_ftrace_record rec = {}; |
Kees Cook | b10b471 | 2017-03-05 00:27:54 -0800 | [diff] [blame] | 34 | struct pstore_record record = { |
| 35 | .type = PSTORE_TYPE_FTRACE, |
| 36 | .buf = (char *)&rec, |
| 37 | .size = sizeof(rec), |
| 38 | .psi = psinfo, |
| 39 | }; |
Anton Vorontsov | 060287b | 2012-07-09 17:10:41 -0700 | [diff] [blame] | 40 | |
| 41 | if (unlikely(oops_in_progress)) |
| 42 | return; |
| 43 | |
Steven Rostedt (VMware) | 773c167 | 2020-11-05 21:32:46 -0500 | [diff] [blame] | 44 | bit = ftrace_test_recursion_trylock(ip, parent_ip); |
Steven Rostedt (VMware) | 6cdf941 | 2020-11-05 21:32:39 -0500 | [diff] [blame] | 45 | if (bit < 0) |
| 46 | return; |
| 47 | |
Anton Vorontsov | 65f8c95 | 2012-07-17 14:26:15 -0700 | [diff] [blame] | 48 | local_irq_save(flags); |
| 49 | |
Anton Vorontsov | 060287b | 2012-07-09 17:10:41 -0700 | [diff] [blame] | 50 | rec.ip = ip; |
| 51 | rec.parent_ip = parent_ip; |
Joel Fernandes | fbccdeb | 2016-10-20 00:34:05 -0700 | [diff] [blame] | 52 | pstore_ftrace_write_timestamp(&rec, pstore_ftrace_stamp++); |
Anton Vorontsov | 060287b | 2012-07-09 17:10:41 -0700 | [diff] [blame] | 53 | pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id()); |
Kees Cook | 4c9ec21 | 2017-03-05 22:41:10 -0800 | [diff] [blame] | 54 | psinfo->write(&record); |
Anton Vorontsov | 65f8c95 | 2012-07-17 14:26:15 -0700 | [diff] [blame] | 55 | |
| 56 | local_irq_restore(flags); |
Steven Rostedt (VMware) | 6cdf941 | 2020-11-05 21:32:39 -0500 | [diff] [blame] | 57 | ftrace_test_recursion_unlock(bit); |
Anton Vorontsov | 65f8c95 | 2012-07-17 14:26:15 -0700 | [diff] [blame] | 58 | } |
| 59 | |
| 60 | static struct ftrace_ops pstore_ftrace_ops __read_mostly = { |
| 61 | .func = pstore_ftrace_call, |
| 62 | }; |
| 63 | |
| 64 | static DEFINE_MUTEX(pstore_ftrace_lock); |
| 65 | static bool pstore_ftrace_enabled; |
| 66 | |
| 67 | static ssize_t pstore_ftrace_knob_write(struct file *f, const char __user *buf, |
| 68 | size_t count, loff_t *ppos) |
| 69 | { |
| 70 | u8 on; |
| 71 | ssize_t ret; |
| 72 | |
| 73 | ret = kstrtou8_from_user(buf, count, 2, &on); |
| 74 | if (ret) |
| 75 | return ret; |
| 76 | |
| 77 | mutex_lock(&pstore_ftrace_lock); |
| 78 | |
| 79 | if (!on ^ pstore_ftrace_enabled) |
| 80 | goto out; |
| 81 | |
Joel Fernandes | 7a0032f | 2016-11-15 12:31:21 -0800 | [diff] [blame] | 82 | if (on) { |
| 83 | ftrace_ops_set_global_filter(&pstore_ftrace_ops); |
Anton Vorontsov | 65f8c95 | 2012-07-17 14:26:15 -0700 | [diff] [blame] | 84 | ret = register_ftrace_function(&pstore_ftrace_ops); |
Joel Fernandes | 7a0032f | 2016-11-15 12:31:21 -0800 | [diff] [blame] | 85 | } else { |
Anton Vorontsov | 65f8c95 | 2012-07-17 14:26:15 -0700 | [diff] [blame] | 86 | ret = unregister_ftrace_function(&pstore_ftrace_ops); |
Joel Fernandes | 7a0032f | 2016-11-15 12:31:21 -0800 | [diff] [blame] | 87 | } |
| 88 | |
Anton Vorontsov | 65f8c95 | 2012-07-17 14:26:15 -0700 | [diff] [blame] | 89 | if (ret) { |
| 90 | pr_err("%s: unable to %sregister ftrace ops: %zd\n", |
| 91 | __func__, on ? "" : "un", ret); |
| 92 | goto err; |
| 93 | } |
| 94 | |
| 95 | pstore_ftrace_enabled = on; |
| 96 | out: |
| 97 | ret = count; |
| 98 | err: |
| 99 | mutex_unlock(&pstore_ftrace_lock); |
| 100 | |
| 101 | return ret; |
| 102 | } |
| 103 | |
| 104 | static ssize_t pstore_ftrace_knob_read(struct file *f, char __user *buf, |
| 105 | size_t count, loff_t *ppos) |
| 106 | { |
| 107 | char val[] = { '0' + pstore_ftrace_enabled, '\n' }; |
| 108 | |
| 109 | return simple_read_from_buffer(buf, count, ppos, val, sizeof(val)); |
| 110 | } |
| 111 | |
| 112 | static const struct file_operations pstore_knob_fops = { |
| 113 | .open = simple_open, |
| 114 | .read = pstore_ftrace_knob_read, |
| 115 | .write = pstore_ftrace_knob_write, |
| 116 | }; |
| 117 | |
Geliang Tang | ee1d267 | 2015-10-20 00:39:03 -0700 | [diff] [blame] | 118 | static struct dentry *pstore_ftrace_dir; |
| 119 | |
Anton Vorontsov | 65f8c95 | 2012-07-17 14:26:15 -0700 | [diff] [blame] | 120 | void pstore_register_ftrace(void) |
| 121 | { |
Kees Cook | 4c9ec21 | 2017-03-05 22:41:10 -0800 | [diff] [blame] | 122 | if (!psinfo->write) |
Anton Vorontsov | 65f8c95 | 2012-07-17 14:26:15 -0700 | [diff] [blame] | 123 | return; |
| 124 | |
Geliang Tang | ee1d267 | 2015-10-20 00:39:03 -0700 | [diff] [blame] | 125 | pstore_ftrace_dir = debugfs_create_dir("pstore", NULL); |
Anton Vorontsov | 65f8c95 | 2012-07-17 14:26:15 -0700 | [diff] [blame] | 126 | |
Greg Kroah-Hartman | fa1af75 | 2019-06-12 17:20:33 +0200 | [diff] [blame] | 127 | debugfs_create_file("record_ftrace", 0600, pstore_ftrace_dir, NULL, |
| 128 | &pstore_knob_fops); |
Geliang Tang | ee1d267 | 2015-10-20 00:39:03 -0700 | [diff] [blame] | 129 | } |
| 130 | |
| 131 | void pstore_unregister_ftrace(void) |
| 132 | { |
| 133 | mutex_lock(&pstore_ftrace_lock); |
| 134 | if (pstore_ftrace_enabled) { |
| 135 | unregister_ftrace_function(&pstore_ftrace_ops); |
Thomas Meyer | 6959643 | 2017-10-07 16:02:21 +0200 | [diff] [blame] | 136 | pstore_ftrace_enabled = false; |
Geliang Tang | ee1d267 | 2015-10-20 00:39:03 -0700 | [diff] [blame] | 137 | } |
| 138 | mutex_unlock(&pstore_ftrace_lock); |
| 139 | |
| 140 | debugfs_remove_recursive(pstore_ftrace_dir); |
Anton Vorontsov | 060287b | 2012-07-09 17:10:41 -0700 | [diff] [blame] | 141 | } |
Kees Cook | 16a5830 | 2020-05-08 08:42:12 -0700 | [diff] [blame] | 142 | |
| 143 | ssize_t pstore_ftrace_combine_log(char **dest_log, size_t *dest_log_size, |
| 144 | const char *src_log, size_t src_log_size) |
| 145 | { |
| 146 | size_t dest_size, src_size, total, dest_off, src_off; |
| 147 | size_t dest_idx = 0, src_idx = 0, merged_idx = 0; |
| 148 | void *merged_buf; |
| 149 | struct pstore_ftrace_record *drec, *srec, *mrec; |
| 150 | size_t record_size = sizeof(struct pstore_ftrace_record); |
| 151 | |
| 152 | dest_off = *dest_log_size % record_size; |
| 153 | dest_size = *dest_log_size - dest_off; |
| 154 | |
| 155 | src_off = src_log_size % record_size; |
| 156 | src_size = src_log_size - src_off; |
| 157 | |
| 158 | total = dest_size + src_size; |
| 159 | merged_buf = kmalloc(total, GFP_KERNEL); |
| 160 | if (!merged_buf) |
| 161 | return -ENOMEM; |
| 162 | |
| 163 | drec = (struct pstore_ftrace_record *)(*dest_log + dest_off); |
| 164 | srec = (struct pstore_ftrace_record *)(src_log + src_off); |
| 165 | mrec = (struct pstore_ftrace_record *)(merged_buf); |
| 166 | |
| 167 | while (dest_size > 0 && src_size > 0) { |
| 168 | if (pstore_ftrace_read_timestamp(&drec[dest_idx]) < |
| 169 | pstore_ftrace_read_timestamp(&srec[src_idx])) { |
| 170 | mrec[merged_idx++] = drec[dest_idx++]; |
| 171 | dest_size -= record_size; |
| 172 | } else { |
| 173 | mrec[merged_idx++] = srec[src_idx++]; |
| 174 | src_size -= record_size; |
| 175 | } |
| 176 | } |
| 177 | |
| 178 | while (dest_size > 0) { |
| 179 | mrec[merged_idx++] = drec[dest_idx++]; |
| 180 | dest_size -= record_size; |
| 181 | } |
| 182 | |
| 183 | while (src_size > 0) { |
| 184 | mrec[merged_idx++] = srec[src_idx++]; |
| 185 | src_size -= record_size; |
| 186 | } |
| 187 | |
| 188 | kfree(*dest_log); |
| 189 | *dest_log = merged_buf; |
| 190 | *dest_log_size = total; |
| 191 | |
| 192 | return 0; |
| 193 | } |
| 194 | EXPORT_SYMBOL_GPL(pstore_ftrace_combine_log); |