blob: e84f8e58495c1434cb3f025700251707ba023625 [file] [log] [blame]
Alexander Potapenkocd110162016-03-25 14:22:08 -07001/*
2 * Generic stack depot for storing stack traces.
3 *
4 * Some debugging tools need to save stack traces of certain events which can
5 * be later presented to the user. For example, KASAN needs to safe alloc and
6 * free stacks for each object, but storing two stack traces per object
7 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
8 * that).
9 *
10 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
11 * and free stacks repeat a lot, we save about 100x space.
12 * Stacks are never removed from depot, so we store them contiguously one after
13 * another in a contiguos memory allocation.
14 *
15 * Author: Alexander Potapenko <glider@google.com>
16 * Copyright (C) 2016 Google, Inc.
17 *
18 * Based on code by Dmitry Chernenkov.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
23 *
24 * This program is distributed in the hope that it will be useful, but
25 * WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27 * General Public License for more details.
28 *
29 */
30
31#include <linux/gfp.h>
32#include <linux/jhash.h>
33#include <linux/kernel.h>
34#include <linux/mm.h>
35#include <linux/percpu.h>
36#include <linux/printk.h>
37#include <linux/slab.h>
38#include <linux/stacktrace.h>
39#include <linux/stackdepot.h>
40#include <linux/string.h>
41#include <linux/types.h>
42
43#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
44
Joonsoo Kim7c311902016-05-05 16:22:35 -070045#define STACK_ALLOC_NULL_PROTECTION_BITS 1
Alexander Potapenkocd110162016-03-25 14:22:08 -070046#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
47#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
48#define STACK_ALLOC_ALIGN 4
49#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
50 STACK_ALLOC_ALIGN)
Joonsoo Kim7c311902016-05-05 16:22:35 -070051#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
52 STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
Dmitry Vyukov02754e02016-10-27 17:46:44 -070053#define STACK_ALLOC_SLABS_CAP 8192
Alexander Potapenkocd110162016-03-25 14:22:08 -070054#define STACK_ALLOC_MAX_SLABS \
55 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
56 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
57
58/* The compact structure to store the reference to stacks. */
59union handle_parts {
60 depot_stack_handle_t handle;
61 struct {
62 u32 slabindex : STACK_ALLOC_INDEX_BITS;
63 u32 offset : STACK_ALLOC_OFFSET_BITS;
Joonsoo Kim7c311902016-05-05 16:22:35 -070064 u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
Alexander Potapenkocd110162016-03-25 14:22:08 -070065 };
66};
67
68struct stack_record {
69 struct stack_record *next; /* Link in the hashtable */
70 u32 hash; /* Hash in the hastable */
71 u32 size; /* Number of frames in the stack */
72 union handle_parts handle;
73 unsigned long entries[1]; /* Variable-sized array of entries. */
74};
75
76static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
77
78static int depot_index;
79static int next_slab_inited;
80static size_t depot_offset;
81static DEFINE_SPINLOCK(depot_lock);
82
83static bool init_stack_slab(void **prealloc)
84{
85 if (!*prealloc)
86 return false;
87 /*
88 * This smp_load_acquire() pairs with smp_store_release() to
89 * |next_slab_inited| below and in depot_alloc_stack().
90 */
91 if (smp_load_acquire(&next_slab_inited))
92 return true;
93 if (stack_slabs[depot_index] == NULL) {
94 stack_slabs[depot_index] = *prealloc;
95 } else {
96 stack_slabs[depot_index + 1] = *prealloc;
97 /*
98 * This smp_store_release pairs with smp_load_acquire() from
99 * |next_slab_inited| above and in depot_save_stack().
100 */
101 smp_store_release(&next_slab_inited, 1);
102 }
103 *prealloc = NULL;
104 return true;
105}
106
107/* Allocation of a new stack in raw storage */
108static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
109 u32 hash, void **prealloc, gfp_t alloc_flags)
110{
111 int required_size = offsetof(struct stack_record, entries) +
112 sizeof(unsigned long) * size;
113 struct stack_record *stack;
114
115 required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
116
117 if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
118 if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
119 WARN_ONCE(1, "Stack depot reached limit capacity");
120 return NULL;
121 }
122 depot_index++;
123 depot_offset = 0;
124 /*
125 * smp_store_release() here pairs with smp_load_acquire() from
126 * |next_slab_inited| in depot_save_stack() and
127 * init_stack_slab().
128 */
129 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
130 smp_store_release(&next_slab_inited, 0);
131 }
132 init_stack_slab(prealloc);
133 if (stack_slabs[depot_index] == NULL)
134 return NULL;
135
136 stack = stack_slabs[depot_index] + depot_offset;
137
138 stack->hash = hash;
139 stack->size = size;
140 stack->handle.slabindex = depot_index;
141 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
Joonsoo Kim7c311902016-05-05 16:22:35 -0700142 stack->handle.valid = 1;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700143 memcpy(stack->entries, entries, size * sizeof(unsigned long));
144 depot_offset += required_size;
145
146 return stack;
147}
148
149#define STACK_HASH_ORDER 20
150#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
151#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
152#define STACK_HASH_SEED 0x9747b28c
153
154static struct stack_record *stack_table[STACK_HASH_SIZE] = {
155 [0 ... STACK_HASH_SIZE - 1] = NULL
156};
157
158/* Calculate hash for a stack */
159static inline u32 hash_stack(unsigned long *entries, unsigned int size)
160{
161 return jhash2((u32 *)entries,
162 size * sizeof(unsigned long) / sizeof(u32),
163 STACK_HASH_SEED);
164}
165
Alexander Potapenkoa571b272018-02-06 15:38:24 -0800166/* Use our own, non-instrumented version of memcmp().
167 *
168 * We actually don't care about the order, just the equality.
169 */
170static inline
171int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
172 unsigned int n)
173{
174 for ( ; n-- ; u1++, u2++) {
175 if (*u1 != *u2)
176 return 1;
177 }
178 return 0;
179}
180
Alexander Potapenkocd110162016-03-25 14:22:08 -0700181/* Find a stack that is equal to the one stored in entries in the hash */
182static inline struct stack_record *find_stack(struct stack_record *bucket,
183 unsigned long *entries, int size,
184 u32 hash)
185{
186 struct stack_record *found;
187
188 for (found = bucket; found; found = found->next) {
189 if (found->hash == hash &&
190 found->size == size &&
Alexander Potapenkoa571b272018-02-06 15:38:24 -0800191 !stackdepot_memcmp(entries, found->entries, size))
Alexander Potapenkocd110162016-03-25 14:22:08 -0700192 return found;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700193 }
194 return NULL;
195}
196
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200197/**
198 * stack_depot_fetch - Fetch stack entries from a depot
199 *
200 * @handle: Stack depot handle which was returned from
201 * stack_depot_save().
202 * @entries: Pointer to store the entries address
203 *
204 * Return: The number of trace entries for this depot.
205 */
206unsigned int stack_depot_fetch(depot_stack_handle_t handle,
207 unsigned long **entries)
Alexander Potapenkocd110162016-03-25 14:22:08 -0700208{
209 union handle_parts parts = { .handle = handle };
210 void *slab = stack_slabs[parts.slabindex];
211 size_t offset = parts.offset << STACK_ALLOC_ALIGN;
212 struct stack_record *stack = slab + offset;
213
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200214 *entries = stack->entries;
215 return stack->size;
216}
217EXPORT_SYMBOL_GPL(stack_depot_fetch);
218
219void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
220{
221 unsigned int nent = stack_depot_fetch(handle, &trace->entries);
222
223 trace->max_entries = trace->nr_entries = nent;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700224}
Chris Wilsonae65a212016-11-10 10:46:47 -0800225EXPORT_SYMBOL_GPL(depot_fetch_stack);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700226
227/**
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200228 * stack_depot_save - Save a stack trace from an array
Alexander Potapenkocd110162016-03-25 14:22:08 -0700229 *
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200230 * @entries: Pointer to storage array
231 * @nr_entries: Size of the storage array
232 * @alloc_flags: Allocation gfp flags
233 *
234 * Return: The handle of the stack struct stored in depot
Alexander Potapenkocd110162016-03-25 14:22:08 -0700235 */
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200236depot_stack_handle_t stack_depot_save(unsigned long *entries,
237 unsigned int nr_entries,
238 gfp_t alloc_flags)
Alexander Potapenkocd110162016-03-25 14:22:08 -0700239{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700240 struct stack_record *found = NULL, **bucket;
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200241 depot_stack_handle_t retval = 0;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700242 struct page *page = NULL;
243 void *prealloc = NULL;
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200244 unsigned long flags;
245 u32 hash;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700246
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200247 if (unlikely(nr_entries == 0))
Alexander Potapenkocd110162016-03-25 14:22:08 -0700248 goto fast_exit;
249
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200250 hash = hash_stack(entries, nr_entries);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700251 bucket = &stack_table[hash & STACK_HASH_MASK];
252
253 /*
254 * Fast path: look the stack trace up without locking.
255 * The smp_load_acquire() here pairs with smp_store_release() to
256 * |bucket| below.
257 */
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200258 found = find_stack(smp_load_acquire(bucket), entries,
259 nr_entries, hash);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700260 if (found)
261 goto exit;
262
263 /*
264 * Check if the current or the next stack slab need to be initialized.
265 * If so, allocate the memory - we won't be able to do that under the
266 * lock.
267 *
268 * The smp_load_acquire() here pairs with smp_store_release() to
269 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
270 */
271 if (unlikely(!smp_load_acquire(&next_slab_inited))) {
272 /*
273 * Zero out zone modifiers, as we don't have specific zone
274 * requirements. Keep the flags related to allocation in atomic
275 * contexts and I/O.
276 */
277 alloc_flags &= ~GFP_ZONEMASK;
278 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
Kirill A. Shutemov87cc2712016-07-28 15:49:10 -0700279 alloc_flags |= __GFP_NOWARN;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700280 page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
281 if (page)
282 prealloc = page_address(page);
283 }
284
285 spin_lock_irqsave(&depot_lock, flags);
286
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200287 found = find_stack(*bucket, entries, nr_entries, hash);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700288 if (!found) {
289 struct stack_record *new =
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200290 depot_alloc_stack(entries, nr_entries,
Alexander Potapenkocd110162016-03-25 14:22:08 -0700291 hash, &prealloc, alloc_flags);
292 if (new) {
293 new->next = *bucket;
294 /*
295 * This smp_store_release() pairs with
296 * smp_load_acquire() from |bucket| above.
297 */
298 smp_store_release(bucket, new);
299 found = new;
300 }
301 } else if (prealloc) {
302 /*
303 * We didn't need to store this stack trace, but let's keep
304 * the preallocated memory for the future.
305 */
306 WARN_ON(!init_stack_slab(&prealloc));
307 }
308
309 spin_unlock_irqrestore(&depot_lock, flags);
310exit:
311 if (prealloc) {
312 /* Nobody used this memory, ok to free it. */
313 free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
314 }
315 if (found)
316 retval = found->handle.handle;
317fast_exit:
318 return retval;
319}
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200320EXPORT_SYMBOL_GPL(stack_depot_save);
321
322/**
323 * depot_save_stack - save stack in a stack depot.
324 * @trace - the stacktrace to save.
325 * @alloc_flags - flags for allocating additional memory if required.
326 */
327depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
328 gfp_t alloc_flags)
329{
330 return stack_depot_save(trace->entries, trace->nr_entries, alloc_flags);
331}
Chris Wilsonae65a212016-11-10 10:46:47 -0800332EXPORT_SYMBOL_GPL(depot_save_stack);