blob: bf5ba9af0500961e30605d13a214318e7d4c0f80 [file] [log] [blame]
Thomas Gleixner1802d0b2019-05-27 08:55:21 +02001// SPDX-License-Identifier: GPL-2.0-only
Alexander Potapenkocd110162016-03-25 14:22:08 -07002/*
3 * Generic stack depot for storing stack traces.
4 *
5 * Some debugging tools need to save stack traces of certain events which can
6 * be later presented to the user. For example, KASAN needs to safe alloc and
7 * free stacks for each object, but storing two stack traces per object
8 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
9 * that).
10 *
11 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
12 * and free stacks repeat a lot, we save about 100x space.
13 * Stacks are never removed from depot, so we store them contiguously one after
Zhen Lei9dbbc3b2021-07-07 18:07:31 -070014 * another in a contiguous memory allocation.
Alexander Potapenkocd110162016-03-25 14:22:08 -070015 *
16 * Author: Alexander Potapenko <glider@google.com>
17 * Copyright (C) 2016 Google, Inc.
18 *
19 * Based on code by Dmitry Chernenkov.
Alexander Potapenkocd110162016-03-25 14:22:08 -070020 */
21
22#include <linux/gfp.h>
23#include <linux/jhash.h>
24#include <linux/kernel.h>
25#include <linux/mm.h>
Vlastimil Babka2dba5eb2022-01-21 22:14:27 -080026#include <linux/mutex.h>
Alexander Potapenkocd110162016-03-25 14:22:08 -070027#include <linux/percpu.h>
28#include <linux/printk.h>
29#include <linux/slab.h>
30#include <linux/stacktrace.h>
31#include <linux/stackdepot.h>
32#include <linux/string.h>
33#include <linux/types.h>
Vijayanand Jittae1fdc402021-02-25 17:21:27 -080034#include <linux/memblock.h>
Alexander Potapenkocd110162016-03-25 14:22:08 -070035
36#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
37
Joonsoo Kim7c311902016-05-05 16:22:35 -070038#define STACK_ALLOC_NULL_PROTECTION_BITS 1
Alexander Potapenkocd110162016-03-25 14:22:08 -070039#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
40#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
41#define STACK_ALLOC_ALIGN 4
42#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
43 STACK_ALLOC_ALIGN)
Joonsoo Kim7c311902016-05-05 16:22:35 -070044#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
45 STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
Dmitry Vyukov02754e02016-10-27 17:46:44 -070046#define STACK_ALLOC_SLABS_CAP 8192
Alexander Potapenkocd110162016-03-25 14:22:08 -070047#define STACK_ALLOC_MAX_SLABS \
48 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
49 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
50
51/* The compact structure to store the reference to stacks. */
52union handle_parts {
53 depot_stack_handle_t handle;
54 struct {
55 u32 slabindex : STACK_ALLOC_INDEX_BITS;
56 u32 offset : STACK_ALLOC_OFFSET_BITS;
Joonsoo Kim7c311902016-05-05 16:22:35 -070057 u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
Alexander Potapenkocd110162016-03-25 14:22:08 -070058 };
59};
60
61struct stack_record {
62 struct stack_record *next; /* Link in the hashtable */
63 u32 hash; /* Hash in the hastable */
64 u32 size; /* Number of frames in the stack */
65 union handle_parts handle;
Gustavo A. R. Silva3a2b67e2020-12-15 20:43:04 -080066 unsigned long entries[]; /* Variable-sized array of entries. */
Alexander Potapenkocd110162016-03-25 14:22:08 -070067};
68
69static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
70
71static int depot_index;
72static int next_slab_inited;
73static size_t depot_offset;
Zqiang78564b92021-05-06 18:03:40 -070074static DEFINE_RAW_SPINLOCK(depot_lock);
Alexander Potapenkocd110162016-03-25 14:22:08 -070075
76static bool init_stack_slab(void **prealloc)
77{
78 if (!*prealloc)
79 return false;
80 /*
81 * This smp_load_acquire() pairs with smp_store_release() to
82 * |next_slab_inited| below and in depot_alloc_stack().
83 */
84 if (smp_load_acquire(&next_slab_inited))
85 return true;
86 if (stack_slabs[depot_index] == NULL) {
87 stack_slabs[depot_index] = *prealloc;
Alexander Potapenko305e5192020-02-20 20:04:30 -080088 *prealloc = NULL;
Alexander Potapenkocd110162016-03-25 14:22:08 -070089 } else {
Alexander Potapenko305e5192020-02-20 20:04:30 -080090 /* If this is the last depot slab, do not touch the next one. */
91 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
92 stack_slabs[depot_index + 1] = *prealloc;
93 *prealloc = NULL;
94 }
Alexander Potapenkocd110162016-03-25 14:22:08 -070095 /*
96 * This smp_store_release pairs with smp_load_acquire() from
Miles Chenee050dc2019-08-15 19:32:46 +080097 * |next_slab_inited| above and in stack_depot_save().
Alexander Potapenkocd110162016-03-25 14:22:08 -070098 */
99 smp_store_release(&next_slab_inited, 1);
100 }
Alexander Potapenkocd110162016-03-25 14:22:08 -0700101 return true;
102}
103
104/* Allocation of a new stack in raw storage */
Marco Elver7f2b8812021-11-05 13:35:36 -0700105static struct stack_record *
106depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
Alexander Potapenkocd110162016-03-25 14:22:08 -0700107{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700108 struct stack_record *stack;
Gustavo A. R. Silva3a2b67e2020-12-15 20:43:04 -0800109 size_t required_size = struct_size(stack, entries, size);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700110
111 required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
112
113 if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
114 if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
115 WARN_ONCE(1, "Stack depot reached limit capacity");
116 return NULL;
117 }
118 depot_index++;
119 depot_offset = 0;
120 /*
121 * smp_store_release() here pairs with smp_load_acquire() from
Miles Chenee050dc2019-08-15 19:32:46 +0800122 * |next_slab_inited| in stack_depot_save() and
Alexander Potapenkocd110162016-03-25 14:22:08 -0700123 * init_stack_slab().
124 */
125 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
126 smp_store_release(&next_slab_inited, 0);
127 }
128 init_stack_slab(prealloc);
129 if (stack_slabs[depot_index] == NULL)
130 return NULL;
131
132 stack = stack_slabs[depot_index] + depot_offset;
133
134 stack->hash = hash;
135 stack->size = size;
136 stack->handle.slabindex = depot_index;
137 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
Joonsoo Kim7c311902016-05-05 16:22:35 -0700138 stack->handle.valid = 1;
Gustavo A. R. Silva47e684a2020-12-15 20:43:07 -0800139 memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
Alexander Potapenkocd110162016-03-25 14:22:08 -0700140 depot_offset += required_size;
141
142 return stack;
143}
144
Yogesh Lald2620932021-02-25 17:21:24 -0800145#define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER)
Alexander Potapenkocd110162016-03-25 14:22:08 -0700146#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
147#define STACK_HASH_SEED 0x9747b28c
148
Vijayanand Jittae1fdc402021-02-25 17:21:27 -0800149static bool stack_depot_disable;
150static struct stack_record **stack_table;
151
152static int __init is_stack_depot_disabled(char *str)
153{
Vijayanand Jitta64427982021-02-25 17:21:31 -0800154 int ret;
155
156 ret = kstrtobool(str, &stack_depot_disable);
157 if (!ret && stack_depot_disable) {
Vijayanand Jittae1fdc402021-02-25 17:21:27 -0800158 pr_info("Stack Depot is disabled\n");
159 stack_table = NULL;
160 }
161 return 0;
162}
163early_param("stack_depot_disable", is_stack_depot_disabled);
164
Vlastimil Babka2dba5eb2022-01-21 22:14:27 -0800165/*
166 * __ref because of memblock_alloc(), which will not be actually called after
167 * the __init code is gone, because at that point slab_is_available() is true
168 */
169__ref int stack_depot_init(void)
Vijayanand Jittae1fdc402021-02-25 17:21:27 -0800170{
Vlastimil Babka2dba5eb2022-01-21 22:14:27 -0800171 static DEFINE_MUTEX(stack_depot_init_mutex);
172
173 mutex_lock(&stack_depot_init_mutex);
174 if (!stack_depot_disable && !stack_table) {
Vijayanand Jittae1fdc402021-02-25 17:21:27 -0800175 size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
176 int i;
177
Vlastimil Babka2dba5eb2022-01-21 22:14:27 -0800178 if (slab_is_available()) {
179 pr_info("Stack Depot allocating hash table with kvmalloc\n");
180 stack_table = kvmalloc(size, GFP_KERNEL);
181 } else {
182 pr_info("Stack Depot allocating hash table with memblock_alloc\n");
183 stack_table = memblock_alloc(size, SMP_CACHE_BYTES);
184 }
185 if (stack_table) {
186 for (i = 0; i < STACK_HASH_SIZE; i++)
187 stack_table[i] = NULL;
188 } else {
189 pr_err("Stack Depot hash table allocation failed, disabling\n");
190 stack_depot_disable = true;
191 mutex_unlock(&stack_depot_init_mutex);
192 return -ENOMEM;
193 }
Vijayanand Jittae1fdc402021-02-25 17:21:27 -0800194 }
Vlastimil Babka2dba5eb2022-01-21 22:14:27 -0800195 mutex_unlock(&stack_depot_init_mutex);
Vijayanand Jittae1fdc402021-02-25 17:21:27 -0800196 return 0;
197}
Vlastimil Babka2dba5eb2022-01-21 22:14:27 -0800198EXPORT_SYMBOL_GPL(stack_depot_init);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700199
200/* Calculate hash for a stack */
201static inline u32 hash_stack(unsigned long *entries, unsigned int size)
202{
203 return jhash2((u32 *)entries,
Gustavo A. R. Silva180644f2020-12-15 20:43:10 -0800204 array_size(size, sizeof(*entries)) / sizeof(u32),
205 STACK_HASH_SEED);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700206}
207
Alexander Potapenkoa571b272018-02-06 15:38:24 -0800208/* Use our own, non-instrumented version of memcmp().
209 *
210 * We actually don't care about the order, just the equality.
211 */
212static inline
213int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
214 unsigned int n)
215{
216 for ( ; n-- ; u1++, u2++) {
217 if (*u1 != *u2)
218 return 1;
219 }
220 return 0;
221}
222
Alexander Potapenkocd110162016-03-25 14:22:08 -0700223/* Find a stack that is equal to the one stored in entries in the hash */
224static inline struct stack_record *find_stack(struct stack_record *bucket,
225 unsigned long *entries, int size,
226 u32 hash)
227{
228 struct stack_record *found;
229
230 for (found = bucket; found; found = found->next) {
231 if (found->hash == hash &&
232 found->size == size &&
Alexander Potapenkoa571b272018-02-06 15:38:24 -0800233 !stackdepot_memcmp(entries, found->entries, size))
Alexander Potapenkocd110162016-03-25 14:22:08 -0700234 return found;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700235 }
236 return NULL;
237}
238
Thomas Gleixnerc0cfc3372019-04-25 11:44:56 +0200239/**
Imran Khan0f68d452021-11-08 18:33:16 -0800240 * stack_depot_snprint - print stack entries from a depot into a buffer
241 *
242 * @handle: Stack depot handle which was returned from
243 * stack_depot_save().
244 * @buf: Pointer to the print buffer
245 *
246 * @size: Size of the print buffer
247 *
248 * @spaces: Number of leading spaces to print
249 *
250 * Return: Number of bytes printed.
251 */
252int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
253 int spaces)
254{
255 unsigned long *entries;
256 unsigned int nr_entries;
257
258 nr_entries = stack_depot_fetch(handle, &entries);
259 return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
260 spaces) : 0;
261}
262EXPORT_SYMBOL_GPL(stack_depot_snprint);
263
264/**
Imran Khan505be482021-11-08 18:33:12 -0800265 * stack_depot_print - print stack entries from a depot
266 *
267 * @stack: Stack depot handle which was returned from
268 * stack_depot_save().
269 *
270 */
271void stack_depot_print(depot_stack_handle_t stack)
272{
273 unsigned long *entries;
274 unsigned int nr_entries;
275
276 nr_entries = stack_depot_fetch(stack, &entries);
277 if (nr_entries > 0)
278 stack_trace_print(entries, nr_entries, 0);
279}
280EXPORT_SYMBOL_GPL(stack_depot_print);
281
282/**
Thomas Gleixnerc0cfc3372019-04-25 11:44:56 +0200283 * stack_depot_fetch - Fetch stack entries from a depot
284 *
285 * @handle: Stack depot handle which was returned from
286 * stack_depot_save().
287 * @entries: Pointer to store the entries address
288 *
289 * Return: The number of trace entries for this depot.
290 */
291unsigned int stack_depot_fetch(depot_stack_handle_t handle,
292 unsigned long **entries)
Alexander Potapenkocd110162016-03-25 14:22:08 -0700293{
294 union handle_parts parts = { .handle = handle };
Alexander Potapenko69866e12020-04-06 20:10:15 -0700295 void *slab;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700296 size_t offset = parts.offset << STACK_ALLOC_ALIGN;
Alexander Potapenko69866e12020-04-06 20:10:15 -0700297 struct stack_record *stack;
298
299 *entries = NULL;
Imran Khan4d4712c2021-11-08 18:33:09 -0800300 if (!handle)
301 return 0;
302
Alexander Potapenko69866e12020-04-06 20:10:15 -0700303 if (parts.slabindex > depot_index) {
304 WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
305 parts.slabindex, depot_index, handle);
306 return 0;
307 }
308 slab = stack_slabs[parts.slabindex];
309 if (!slab)
310 return 0;
311 stack = slab + offset;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700312
Thomas Gleixnerc0cfc3372019-04-25 11:44:56 +0200313 *entries = stack->entries;
314 return stack->size;
315}
316EXPORT_SYMBOL_GPL(stack_depot_fetch);
317
Alexander Potapenkocd110162016-03-25 14:22:08 -0700318/**
Marco Elver11ac25c62021-11-05 13:35:39 -0700319 * __stack_depot_save - Save a stack trace from an array
Alexander Potapenkocd110162016-03-25 14:22:08 -0700320 *
Thomas Gleixnerc0cfc3372019-04-25 11:44:56 +0200321 * @entries: Pointer to storage array
322 * @nr_entries: Size of the storage array
323 * @alloc_flags: Allocation gfp flags
Marco Elver11ac25c62021-11-05 13:35:39 -0700324 * @can_alloc: Allocate stack slabs (increased chance of failure if false)
Thomas Gleixnerc0cfc3372019-04-25 11:44:56 +0200325 *
Marco Elver11ac25c62021-11-05 13:35:39 -0700326 * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
327 * %true, is allowed to replenish the stack slab pool in case no space is left
328 * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
329 * any allocations and will fail if no space is left to store the stack trace.
330 *
Marco Elvere9400662022-01-21 22:14:31 -0800331 * If the stack trace in @entries is from an interrupt, only the portion up to
332 * interrupt entry is saved.
333 *
Marco Elver11ac25c62021-11-05 13:35:39 -0700334 * Context: Any context, but setting @can_alloc to %false is required if
335 * alloc_pages() cannot be used from the current context. Currently
336 * this is the case from contexts where neither %GFP_ATOMIC nor
337 * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
338 *
339 * Return: The handle of the stack struct stored in depot, 0 on failure.
Alexander Potapenkocd110162016-03-25 14:22:08 -0700340 */
Marco Elver11ac25c62021-11-05 13:35:39 -0700341depot_stack_handle_t __stack_depot_save(unsigned long *entries,
342 unsigned int nr_entries,
343 gfp_t alloc_flags, bool can_alloc)
Alexander Potapenkocd110162016-03-25 14:22:08 -0700344{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700345 struct stack_record *found = NULL, **bucket;
Thomas Gleixnerc0cfc3372019-04-25 11:44:56 +0200346 depot_stack_handle_t retval = 0;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700347 struct page *page = NULL;
348 void *prealloc = NULL;
Thomas Gleixnerc0cfc3372019-04-25 11:44:56 +0200349 unsigned long flags;
350 u32 hash;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700351
Marco Elvere9400662022-01-21 22:14:31 -0800352 /*
353 * If this stack trace is from an interrupt, including anything before
354 * interrupt entry usually leads to unbounded stackdepot growth.
355 *
356 * Because use of filter_irq_stacks() is a requirement to ensure
357 * stackdepot can efficiently deduplicate interrupt stacks, always
358 * filter_irq_stacks() to simplify all callers' use of stackdepot.
359 */
360 nr_entries = filter_irq_stacks(entries, nr_entries);
361
Vijayanand Jittae1fdc402021-02-25 17:21:27 -0800362 if (unlikely(nr_entries == 0) || stack_depot_disable)
Alexander Potapenkocd110162016-03-25 14:22:08 -0700363 goto fast_exit;
364
Thomas Gleixnerc0cfc3372019-04-25 11:44:56 +0200365 hash = hash_stack(entries, nr_entries);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700366 bucket = &stack_table[hash & STACK_HASH_MASK];
367
368 /*
369 * Fast path: look the stack trace up without locking.
370 * The smp_load_acquire() here pairs with smp_store_release() to
371 * |bucket| below.
372 */
Thomas Gleixnerc0cfc3372019-04-25 11:44:56 +0200373 found = find_stack(smp_load_acquire(bucket), entries,
374 nr_entries, hash);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700375 if (found)
376 goto exit;
377
378 /*
379 * Check if the current or the next stack slab need to be initialized.
380 * If so, allocate the memory - we won't be able to do that under the
381 * lock.
382 *
383 * The smp_load_acquire() here pairs with smp_store_release() to
384 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
385 */
Marco Elver11ac25c62021-11-05 13:35:39 -0700386 if (unlikely(can_alloc && !smp_load_acquire(&next_slab_inited))) {
Alexander Potapenkocd110162016-03-25 14:22:08 -0700387 /*
388 * Zero out zone modifiers, as we don't have specific zone
389 * requirements. Keep the flags related to allocation in atomic
390 * contexts and I/O.
391 */
392 alloc_flags &= ~GFP_ZONEMASK;
393 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
Kirill A. Shutemov87cc2712016-07-28 15:49:10 -0700394 alloc_flags |= __GFP_NOWARN;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700395 page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
396 if (page)
397 prealloc = page_address(page);
398 }
399
Zqiang78564b92021-05-06 18:03:40 -0700400 raw_spin_lock_irqsave(&depot_lock, flags);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700401
Thomas Gleixnerc0cfc3372019-04-25 11:44:56 +0200402 found = find_stack(*bucket, entries, nr_entries, hash);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700403 if (!found) {
Marco Elver7f2b8812021-11-05 13:35:36 -0700404 struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc);
405
Alexander Potapenkocd110162016-03-25 14:22:08 -0700406 if (new) {
407 new->next = *bucket;
408 /*
409 * This smp_store_release() pairs with
410 * smp_load_acquire() from |bucket| above.
411 */
412 smp_store_release(bucket, new);
413 found = new;
414 }
415 } else if (prealloc) {
416 /*
417 * We didn't need to store this stack trace, but let's keep
418 * the preallocated memory for the future.
419 */
420 WARN_ON(!init_stack_slab(&prealloc));
421 }
422
Zqiang78564b92021-05-06 18:03:40 -0700423 raw_spin_unlock_irqrestore(&depot_lock, flags);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700424exit:
425 if (prealloc) {
426 /* Nobody used this memory, ok to free it. */
427 free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
428 }
429 if (found)
430 retval = found->handle.handle;
431fast_exit:
432 return retval;
433}
Marco Elver11ac25c62021-11-05 13:35:39 -0700434EXPORT_SYMBOL_GPL(__stack_depot_save);
435
436/**
437 * stack_depot_save - Save a stack trace from an array
438 *
439 * @entries: Pointer to storage array
440 * @nr_entries: Size of the storage array
441 * @alloc_flags: Allocation gfp flags
442 *
443 * Context: Contexts where allocations via alloc_pages() are allowed.
444 * See __stack_depot_save() for more details.
445 *
446 * Return: The handle of the stack struct stored in depot, 0 on failure.
447 */
448depot_stack_handle_t stack_depot_save(unsigned long *entries,
449 unsigned int nr_entries,
450 gfp_t alloc_flags)
451{
452 return __stack_depot_save(entries, nr_entries, alloc_flags, true);
453}
Thomas Gleixnerc0cfc3372019-04-25 11:44:56 +0200454EXPORT_SYMBOL_GPL(stack_depot_save);