blob: fe4557955d976f45bcb85980ba469a7d91352b75 [file] [log] [blame]
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001/*
2 * Generic infrastructure for lifetime debugging of objects.
3 *
4 * Started by Thomas Gleixner
5 *
6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7 *
8 * For licencing details see kernel-base/COPYING
9 */
Fabian Frederick719e4842014-06-04 16:06:04 -070010
11#define pr_fmt(fmt) "ODEBUG: " fmt
12
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070013#include <linux/debugobjects.h>
14#include <linux/interrupt.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040015#include <linux/sched.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010016#include <linux/sched/task_stack.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070017#include <linux/seq_file.h>
18#include <linux/debugfs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070020#include <linux/hash.h>
Waiman Longcaba4cb2017-08-14 09:52:13 -040021#include <linux/kmemleak.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070022
23#define ODEBUG_HASH_BITS 14
24#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
25
Christian Borntraeger0b6ec8c2016-01-27 15:37:58 +010026#define ODEBUG_POOL_SIZE 1024
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070027#define ODEBUG_POOL_MIN_LEVEL 256
Waiman Longd86998b2019-05-20 10:14:46 -040028#define ODEBUG_POOL_PERCPU_SIZE 64
Waiman Long634d61f2019-05-20 10:14:47 -040029#define ODEBUG_BATCH_SIZE 16
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070030
31#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
32#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
33#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
34
Waiman Longa7344a62019-05-20 10:14:49 -040035/*
36 * We limit the freeing of debug objects via workqueue at a maximum
37 * frequency of 10Hz and about 1024 objects for each freeing operation.
38 * So it is freeing at most 10k debug objects per second.
39 */
40#define ODEBUG_FREE_WORK_MAX 1024
41#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
42
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070043struct debug_bucket {
44 struct hlist_head list;
Thomas Gleixneraef9cb02009-11-17 18:11:28 +010045 raw_spinlock_t lock;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070046};
47
Waiman Longd86998b2019-05-20 10:14:46 -040048/*
49 * Debug object percpu free list
50 * Access is protected by disabling irq
51 */
52struct debug_percpu_free {
53 struct hlist_head free_objs;
54 int obj_free;
55};
56
57static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
58
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070059static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
60
Thomas Gleixner1be1cb72009-03-16 18:53:18 +010061static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070062
Thomas Gleixneraef9cb02009-11-17 18:11:28 +010063static DEFINE_RAW_SPINLOCK(pool_lock);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070064
65static HLIST_HEAD(obj_pool);
Yang Shi36c4ead2018-02-06 07:18:26 +080066static HLIST_HEAD(obj_to_free);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070067
Waiman Longd86998b2019-05-20 10:14:46 -040068/*
69 * Because of the presence of percpu free pools, obj_pool_free will
70 * under-count those in the percpu free pools. Similarly, obj_pool_used
71 * will over-count those in the percpu free pools. Adjustments will be
72 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
73 * can be off.
74 */
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070075static int obj_pool_min_free = ODEBUG_POOL_SIZE;
76static int obj_pool_free = ODEBUG_POOL_SIZE;
77static int obj_pool_used;
78static int obj_pool_max_used;
Waiman Longa7344a62019-05-20 10:14:49 -040079static bool obj_freeing;
Yang Shi36c4ead2018-02-06 07:18:26 +080080/* The number of objs on the global free list */
81static int obj_nr_tofree;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070082
83static int debug_objects_maxchain __read_mostly;
Arnd Bergmann163cf842018-03-13 14:18:46 +010084static int __maybe_unused debug_objects_maxchecked __read_mostly;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070085static int debug_objects_fixups __read_mostly;
86static int debug_objects_warnings __read_mostly;
Ingo Molnar3ae70202008-11-26 10:02:00 +010087static int debug_objects_enabled __read_mostly
88 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
Waiman Long97dd5522017-01-05 15:17:04 -050089static int debug_objects_pool_size __read_mostly
90 = ODEBUG_POOL_SIZE;
91static int debug_objects_pool_min_level __read_mostly
92 = ODEBUG_POOL_MIN_LEVEL;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070093static struct debug_obj_descr *descr_test __read_mostly;
Waiman Longd86998b2019-05-20 10:14:46 -040094static struct kmem_cache *obj_cache __read_mostly;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070095
Waiman Longc4b73aa2017-01-05 15:17:03 -050096/*
Waiman Long0cad93c2017-02-07 16:40:30 -050097 * Track numbers of kmem_cache_alloc()/free() calls done.
Waiman Longc4b73aa2017-01-05 15:17:03 -050098 */
Waiman Long0cad93c2017-02-07 16:40:30 -050099static int debug_objects_allocated;
Waiman Longc4b73aa2017-01-05 15:17:03 -0500100static int debug_objects_freed;
101
Thomas Gleixner337fff82009-03-16 10:04:53 +0100102static void free_obj_work(struct work_struct *work);
Waiman Longa7344a62019-05-20 10:14:49 -0400103static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
Thomas Gleixner337fff82009-03-16 10:04:53 +0100104
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700105static int __init enable_object_debug(char *str)
106{
107 debug_objects_enabled = 1;
108 return 0;
109}
Kyle McMartin3e8ebb52009-03-01 20:41:41 -0500110
111static int __init disable_object_debug(char *str)
112{
113 debug_objects_enabled = 0;
114 return 0;
115}
116
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700117early_param("debug_objects", enable_object_debug);
Kyle McMartin3e8ebb52009-03-01 20:41:41 -0500118early_param("no_debug_objects", disable_object_debug);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700119
120static const char *obj_states[ODEBUG_STATE_MAX] = {
121 [ODEBUG_STATE_NONE] = "none",
122 [ODEBUG_STATE_INIT] = "initialized",
123 [ODEBUG_STATE_INACTIVE] = "inactive",
124 [ODEBUG_STATE_ACTIVE] = "active",
125 [ODEBUG_STATE_DESTROYED] = "destroyed",
126 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
127};
128
Thomas Gleixner1fda1072012-04-11 11:52:18 +0200129static void fill_pool(void)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700130{
131 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
Waiman Longd26bf502019-05-20 10:14:48 -0400132 struct debug_obj *obj;
Vegard Nossum50db04dd2008-06-15 00:47:36 +0200133 unsigned long flags;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700134
Marco Elver35fd7a62020-01-16 19:55:29 +0100135 if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
Thomas Gleixner1fda1072012-04-11 11:52:18 +0200136 return;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700137
Yang Shi36c4ead2018-02-06 07:18:26 +0800138 /*
139 * Reuse objs from the global free list; they will be reinitialized
140 * when allocating.
Marco Elver35fd7a62020-01-16 19:55:29 +0100141 *
142 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
143 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
144 * sections.
Yang Shi36c4ead2018-02-06 07:18:26 +0800145 */
Marco Elver35fd7a62020-01-16 19:55:29 +0100146 while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
Yang Shi36c4ead2018-02-06 07:18:26 +0800147 raw_spin_lock_irqsave(&pool_lock, flags);
148 /*
149 * Recheck with the lock held as the worker thread might have
150 * won the race and freed the global free list already.
151 */
Waiman Longd26bf502019-05-20 10:14:48 -0400152 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
Yang Shi36c4ead2018-02-06 07:18:26 +0800153 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
154 hlist_del(&obj->node);
Marco Elver35fd7a62020-01-16 19:55:29 +0100155 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
Yang Shi36c4ead2018-02-06 07:18:26 +0800156 hlist_add_head(&obj->node, &obj_pool);
Marco Elver35fd7a62020-01-16 19:55:29 +0100157 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
Yang Shi36c4ead2018-02-06 07:18:26 +0800158 }
159 raw_spin_unlock_irqrestore(&pool_lock, flags);
160 }
161
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700162 if (unlikely(!obj_cache))
Thomas Gleixner1fda1072012-04-11 11:52:18 +0200163 return;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700164
Marco Elver35fd7a62020-01-16 19:55:29 +0100165 while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
Waiman Longd26bf502019-05-20 10:14:48 -0400166 struct debug_obj *new[ODEBUG_BATCH_SIZE];
167 int cnt;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700168
Waiman Longd26bf502019-05-20 10:14:48 -0400169 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
170 new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
171 if (!new[cnt])
172 break;
173 }
174 if (!cnt)
Dan Carpenter33408082012-04-18 14:28:10 +0300175 return;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700176
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100177 raw_spin_lock_irqsave(&pool_lock, flags);
Waiman Longd26bf502019-05-20 10:14:48 -0400178 while (cnt) {
179 hlist_add_head(&new[--cnt]->node, &obj_pool);
180 debug_objects_allocated++;
Marco Elver35fd7a62020-01-16 19:55:29 +0100181 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
Waiman Longd26bf502019-05-20 10:14:48 -0400182 }
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100183 raw_spin_unlock_irqrestore(&pool_lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700184 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700185}
186
187/*
188 * Lookup an object in the hash bucket.
189 */
190static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
191{
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700192 struct debug_obj *obj;
193 int cnt = 0;
194
Sasha Levinb67bfe02013-02-27 17:06:00 -0800195 hlist_for_each_entry(obj, &b->list, node) {
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700196 cnt++;
197 if (obj->object == addr)
198 return obj;
199 }
200 if (cnt > debug_objects_maxchain)
201 debug_objects_maxchain = cnt;
202
203 return NULL;
204}
205
206/*
Waiman Longd86998b2019-05-20 10:14:46 -0400207 * Allocate a new object from the hlist
208 */
209static struct debug_obj *__alloc_object(struct hlist_head *list)
210{
211 struct debug_obj *obj = NULL;
212
213 if (list->first) {
214 obj = hlist_entry(list->first, typeof(*obj), node);
215 hlist_del(&obj->node);
216 }
217
218 return obj;
219}
220
221/*
Vegard Nossum50db04dd2008-06-15 00:47:36 +0200222 * Allocate a new object. If the pool is empty, switch off the debugger.
Vegard Nossum673d62cc2008-08-31 23:39:21 +0200223 * Must be called with interrupts disabled.
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700224 */
225static struct debug_obj *
226alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
227{
Waiman Long634d61f2019-05-20 10:14:47 -0400228 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
Waiman Longd86998b2019-05-20 10:14:46 -0400229 struct debug_obj *obj;
230
231 if (likely(obj_cache)) {
Waiman Longd86998b2019-05-20 10:14:46 -0400232 obj = __alloc_object(&percpu_pool->free_objs);
233 if (obj) {
234 percpu_pool->obj_free--;
235 goto init_obj;
236 }
237 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700238
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100239 raw_spin_lock(&pool_lock);
Waiman Longd86998b2019-05-20 10:14:46 -0400240 obj = __alloc_object(&obj_pool);
241 if (obj) {
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700242 obj_pool_used++;
Marco Elver35fd7a62020-01-16 19:55:29 +0100243 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
Waiman Long634d61f2019-05-20 10:14:47 -0400244
245 /*
246 * Looking ahead, allocate one batch of debug objects and
247 * put them into the percpu free pool.
248 */
249 if (likely(obj_cache)) {
250 int i;
251
252 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
253 struct debug_obj *obj2;
254
255 obj2 = __alloc_object(&obj_pool);
256 if (!obj2)
257 break;
258 hlist_add_head(&obj2->node,
259 &percpu_pool->free_objs);
260 percpu_pool->obj_free++;
261 obj_pool_used++;
Marco Elver35fd7a62020-01-16 19:55:29 +0100262 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
Waiman Long634d61f2019-05-20 10:14:47 -0400263 }
264 }
265
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700266 if (obj_pool_used > obj_pool_max_used)
267 obj_pool_max_used = obj_pool_used;
268
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700269 if (obj_pool_free < obj_pool_min_free)
270 obj_pool_min_free = obj_pool_free;
271 }
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100272 raw_spin_unlock(&pool_lock);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700273
Waiman Longd86998b2019-05-20 10:14:46 -0400274init_obj:
275 if (obj) {
276 obj->object = addr;
277 obj->descr = descr;
278 obj->state = ODEBUG_STATE_NONE;
279 obj->astate = 0;
280 hlist_add_head(&obj->node, &b->list);
281 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700282 return obj;
283}
284
285/*
Thomas Gleixner337fff82009-03-16 10:04:53 +0100286 * workqueue function to free objects.
Waiman Long858274b2017-01-05 15:17:05 -0500287 *
288 * To reduce contention on the global pool_lock, the actual freeing of
Yang Shi636e1972018-02-06 07:18:27 +0800289 * debug objects will be delayed if the pool_lock is busy.
Thomas Gleixner337fff82009-03-16 10:04:53 +0100290 */
291static void free_obj_work(struct work_struct *work)
292{
Yang Shi36c4ead2018-02-06 07:18:26 +0800293 struct hlist_node *tmp;
294 struct debug_obj *obj;
Thomas Gleixner337fff82009-03-16 10:04:53 +0100295 unsigned long flags;
Yang Shi36c4ead2018-02-06 07:18:26 +0800296 HLIST_HEAD(tofree);
Thomas Gleixner337fff82009-03-16 10:04:53 +0100297
Waiman Longa7344a62019-05-20 10:14:49 -0400298 WRITE_ONCE(obj_freeing, false);
Waiman Long858274b2017-01-05 15:17:05 -0500299 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
300 return;
Yang Shi36c4ead2018-02-06 07:18:26 +0800301
Waiman Longa7344a62019-05-20 10:14:49 -0400302 if (obj_pool_free >= debug_objects_pool_size)
303 goto free_objs;
304
Yang Shi36c4ead2018-02-06 07:18:26 +0800305 /*
306 * The objs on the pool list might be allocated before the work is
307 * run, so recheck if pool list it full or not, if not fill pool
Waiman Longa7344a62019-05-20 10:14:49 -0400308 * list from the global free list. As it is likely that a workload
309 * may be gearing up to use more and more objects, don't free any
310 * of them until the next round.
Yang Shi36c4ead2018-02-06 07:18:26 +0800311 */
312 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
313 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
314 hlist_del(&obj->node);
315 hlist_add_head(&obj->node, &obj_pool);
Marco Elver35fd7a62020-01-16 19:55:29 +0100316 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
317 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
Yang Shi36c4ead2018-02-06 07:18:26 +0800318 }
Waiman Longa7344a62019-05-20 10:14:49 -0400319 raw_spin_unlock_irqrestore(&pool_lock, flags);
320 return;
Yang Shi36c4ead2018-02-06 07:18:26 +0800321
Waiman Longa7344a62019-05-20 10:14:49 -0400322free_objs:
Yang Shi36c4ead2018-02-06 07:18:26 +0800323 /*
324 * Pool list is already full and there are still objs on the free
325 * list. Move remaining free objs to a temporary list to free the
326 * memory outside the pool_lock held region.
327 */
328 if (obj_nr_tofree) {
329 hlist_move_list(&obj_to_free, &tofree);
Arnd Bergmann04148182018-02-22 16:52:58 +0100330 debug_objects_freed += obj_nr_tofree;
Marco Elver35fd7a62020-01-16 19:55:29 +0100331 WRITE_ONCE(obj_nr_tofree, 0);
Yang Shi36c4ead2018-02-06 07:18:26 +0800332 }
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100333 raw_spin_unlock_irqrestore(&pool_lock, flags);
Yang Shi36c4ead2018-02-06 07:18:26 +0800334
335 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
336 hlist_del(&obj->node);
337 kmem_cache_free(obj_cache, obj);
338 }
Thomas Gleixner337fff82009-03-16 10:04:53 +0100339}
340
Waiman Longa7344a62019-05-20 10:14:49 -0400341static void __free_object(struct debug_obj *obj)
Yang Shi636e1972018-02-06 07:18:27 +0800342{
Waiman Long634d61f2019-05-20 10:14:47 -0400343 struct debug_obj *objs[ODEBUG_BATCH_SIZE];
344 struct debug_percpu_free *percpu_pool;
345 int lookahead_count = 0;
Yang Shi636e1972018-02-06 07:18:27 +0800346 unsigned long flags;
347 bool work;
348
Waiman Longd86998b2019-05-20 10:14:46 -0400349 local_irq_save(flags);
Waiman Long634d61f2019-05-20 10:14:47 -0400350 if (!obj_cache)
351 goto free_to_obj_pool;
352
Waiman Longd86998b2019-05-20 10:14:46 -0400353 /*
354 * Try to free it into the percpu pool first.
355 */
356 percpu_pool = this_cpu_ptr(&percpu_obj_pool);
Waiman Long634d61f2019-05-20 10:14:47 -0400357 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
Waiman Longd86998b2019-05-20 10:14:46 -0400358 hlist_add_head(&obj->node, &percpu_pool->free_objs);
359 percpu_pool->obj_free++;
360 local_irq_restore(flags);
Waiman Longa7344a62019-05-20 10:14:49 -0400361 return;
Waiman Longd86998b2019-05-20 10:14:46 -0400362 }
363
Waiman Long634d61f2019-05-20 10:14:47 -0400364 /*
365 * As the percpu pool is full, look ahead and pull out a batch
366 * of objects from the percpu pool and free them as well.
367 */
368 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
369 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
370 if (!objs[lookahead_count])
371 break;
372 percpu_pool->obj_free--;
373 }
374
375free_to_obj_pool:
Waiman Longd86998b2019-05-20 10:14:46 -0400376 raw_spin_lock(&pool_lock);
Waiman Longa7344a62019-05-20 10:14:49 -0400377 work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
378 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
Yang Shi636e1972018-02-06 07:18:27 +0800379 obj_pool_used--;
380
381 if (work) {
Marco Elver35fd7a62020-01-16 19:55:29 +0100382 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
Yang Shi636e1972018-02-06 07:18:27 +0800383 hlist_add_head(&obj->node, &obj_to_free);
Waiman Long634d61f2019-05-20 10:14:47 -0400384 if (lookahead_count) {
Marco Elver35fd7a62020-01-16 19:55:29 +0100385 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
Waiman Long634d61f2019-05-20 10:14:47 -0400386 obj_pool_used -= lookahead_count;
387 while (lookahead_count) {
388 hlist_add_head(&objs[--lookahead_count]->node,
389 &obj_to_free);
390 }
391 }
Waiman Longa7344a62019-05-20 10:14:49 -0400392
393 if ((obj_pool_free > debug_objects_pool_size) &&
394 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
395 int i;
396
397 /*
398 * Free one more batch of objects from obj_pool.
399 */
400 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
401 obj = __alloc_object(&obj_pool);
402 hlist_add_head(&obj->node, &obj_to_free);
Marco Elver35fd7a62020-01-16 19:55:29 +0100403 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
404 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
Waiman Longa7344a62019-05-20 10:14:49 -0400405 }
406 }
Yang Shi636e1972018-02-06 07:18:27 +0800407 } else {
Marco Elver35fd7a62020-01-16 19:55:29 +0100408 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
Yang Shi636e1972018-02-06 07:18:27 +0800409 hlist_add_head(&obj->node, &obj_pool);
Waiman Long634d61f2019-05-20 10:14:47 -0400410 if (lookahead_count) {
Marco Elver35fd7a62020-01-16 19:55:29 +0100411 WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
Waiman Long634d61f2019-05-20 10:14:47 -0400412 obj_pool_used -= lookahead_count;
413 while (lookahead_count) {
414 hlist_add_head(&objs[--lookahead_count]->node,
415 &obj_pool);
416 }
417 }
Yang Shi636e1972018-02-06 07:18:27 +0800418 }
Waiman Longd86998b2019-05-20 10:14:46 -0400419 raw_spin_unlock(&pool_lock);
420 local_irq_restore(flags);
Yang Shi636e1972018-02-06 07:18:27 +0800421}
422
Thomas Gleixner337fff82009-03-16 10:04:53 +0100423/*
424 * Put the object back into the pool and schedule work to free objects
425 * if necessary.
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700426 */
427static void free_object(struct debug_obj *obj)
428{
Waiman Longa7344a62019-05-20 10:14:49 -0400429 __free_object(obj);
Marco Elver35fd7a62020-01-16 19:55:29 +0100430 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
Waiman Longa7344a62019-05-20 10:14:49 -0400431 WRITE_ONCE(obj_freeing, true);
432 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
433 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700434}
435
436/*
437 * We run out of memory. That means we probably have tons of objects
438 * allocated.
439 */
440static void debug_objects_oom(void)
441{
442 struct debug_bucket *db = obj_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800443 struct hlist_node *tmp;
Vegard Nossum673d62cc2008-08-31 23:39:21 +0200444 HLIST_HEAD(freelist);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700445 struct debug_obj *obj;
446 unsigned long flags;
447 int i;
448
Fabian Frederick719e4842014-06-04 16:06:04 -0700449 pr_warn("Out of memory. ODEBUG disabled\n");
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700450
451 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100452 raw_spin_lock_irqsave(&db->lock, flags);
Vegard Nossum673d62cc2008-08-31 23:39:21 +0200453 hlist_move_list(&db->list, &freelist);
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100454 raw_spin_unlock_irqrestore(&db->lock, flags);
Vegard Nossum673d62cc2008-08-31 23:39:21 +0200455
456 /* Now free them */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800457 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700458 hlist_del(&obj->node);
459 free_object(obj);
460 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700461 }
462}
463
464/*
465 * We use the pfn of the address for the hash. That way we can check
466 * for freed objects simply by checking the affected bucket.
467 */
468static struct debug_bucket *get_bucket(unsigned long addr)
469{
470 unsigned long hash;
471
472 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
473 return &obj_hash[hash];
474}
475
476static void debug_print_object(struct debug_obj *obj, char *msg)
477{
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100478 struct debug_obj_descr *descr = obj->descr;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700479 static int limit;
480
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100481 if (limit < 5 && descr != descr_test) {
482 void *hint = descr->debug_hint ?
483 descr->debug_hint(obj->object) : NULL;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700484 limit++;
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400485 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100486 "object type: %s hint: %pS\n",
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400487 msg, obj_states[obj->state], obj->astate,
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100488 descr->name, hint);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700489 }
490 debug_objects_warnings++;
491}
492
493/*
494 * Try to repair the damage, so we have a better chance to get useful
495 * debug output.
496 */
Du, Changbinb1e4d9d2016-05-19 17:09:20 -0700497static bool
498debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700499 void * addr, enum debug_obj_state state)
500{
Du, Changbinb1e4d9d2016-05-19 17:09:20 -0700501 if (fixup && fixup(addr, state)) {
502 debug_objects_fixups++;
503 return true;
504 }
505 return false;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700506}
507
508static void debug_object_is_on_stack(void *addr, int onstack)
509{
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700510 int is_on_stack;
511 static int limit;
512
513 if (limit > 4)
514 return;
515
FUJITA Tomonori8b05c7e2008-07-23 21:26:53 -0700516 is_on_stack = object_is_on_stack(addr);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700517 if (is_on_stack == onstack)
518 return;
519
520 limit++;
521 if (is_on_stack)
Joel Fernandes (Google)fc91a3c2018-07-23 14:25:31 -0700522 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
523 task_stack_page(current));
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700524 else
Joel Fernandes (Google)fc91a3c2018-07-23 14:25:31 -0700525 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
526 task_stack_page(current));
527
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700528 WARN_ON(1);
529}
530
531static void
532__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
533{
534 enum debug_obj_state state;
Waiman Longd5f34152019-05-20 10:14:50 -0400535 bool check_stack = false;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700536 struct debug_bucket *db;
537 struct debug_obj *obj;
538 unsigned long flags;
539
Vegard Nossum50db04dd2008-06-15 00:47:36 +0200540 fill_pool();
541
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700542 db = get_bucket((unsigned long) addr);
543
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100544 raw_spin_lock_irqsave(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700545
546 obj = lookup_object(addr, db);
547 if (!obj) {
548 obj = alloc_object(addr, db, descr);
549 if (!obj) {
550 debug_objects_enabled = 0;
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100551 raw_spin_unlock_irqrestore(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700552 debug_objects_oom();
553 return;
554 }
Waiman Longd5f34152019-05-20 10:14:50 -0400555 check_stack = true;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700556 }
557
558 switch (obj->state) {
559 case ODEBUG_STATE_NONE:
560 case ODEBUG_STATE_INIT:
561 case ODEBUG_STATE_INACTIVE:
562 obj->state = ODEBUG_STATE_INIT;
563 break;
564
565 case ODEBUG_STATE_ACTIVE:
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700566 state = obj->state;
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100567 raw_spin_unlock_irqrestore(&db->lock, flags);
Waiman Longd5f34152019-05-20 10:14:50 -0400568 debug_print_object(obj, "init");
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700569 debug_object_fixup(descr->fixup_init, addr, state);
570 return;
571
572 case ODEBUG_STATE_DESTROYED:
Waiman Longd5f34152019-05-20 10:14:50 -0400573 raw_spin_unlock_irqrestore(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700574 debug_print_object(obj, "init");
Waiman Longd5f34152019-05-20 10:14:50 -0400575 return;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700576 default:
577 break;
578 }
579
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100580 raw_spin_unlock_irqrestore(&db->lock, flags);
Waiman Longd5f34152019-05-20 10:14:50 -0400581 if (check_stack)
582 debug_object_is_on_stack(addr, onstack);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700583}
584
585/**
586 * debug_object_init - debug checks when an object is initialized
587 * @addr: address of the object
588 * @descr: pointer to an object specific debug description structure
589 */
590void debug_object_init(void *addr, struct debug_obj_descr *descr)
591{
592 if (!debug_objects_enabled)
593 return;
594
595 __debug_object_init(addr, descr, 0);
596}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800597EXPORT_SYMBOL_GPL(debug_object_init);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700598
599/**
600 * debug_object_init_on_stack - debug checks when an object on stack is
601 * initialized
602 * @addr: address of the object
603 * @descr: pointer to an object specific debug description structure
604 */
605void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
606{
607 if (!debug_objects_enabled)
608 return;
609
610 __debug_object_init(addr, descr, 1);
611}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800612EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700613
614/**
615 * debug_object_activate - debug checks when an object is activated
616 * @addr: address of the object
617 * @descr: pointer to an object specific debug description structure
Paul E. McKenneyb778ae22013-04-23 12:51:11 -0700618 * Returns 0 for success, -EINVAL for check failed.
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700619 */
Paul E. McKenneyb778ae22013-04-23 12:51:11 -0700620int debug_object_activate(void *addr, struct debug_obj_descr *descr)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700621{
622 enum debug_obj_state state;
623 struct debug_bucket *db;
624 struct debug_obj *obj;
625 unsigned long flags;
Paul E. McKenneyb778ae22013-04-23 12:51:11 -0700626 int ret;
Stephen Boydfeac18d2011-11-07 19:48:26 -0800627 struct debug_obj o = { .object = addr,
628 .state = ODEBUG_STATE_NOTAVAILABLE,
629 .descr = descr };
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700630
631 if (!debug_objects_enabled)
Paul E. McKenneyb778ae22013-04-23 12:51:11 -0700632 return 0;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700633
634 db = get_bucket((unsigned long) addr);
635
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100636 raw_spin_lock_irqsave(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700637
638 obj = lookup_object(addr, db);
639 if (obj) {
Waiman Longd5f34152019-05-20 10:14:50 -0400640 bool print_object = false;
641
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700642 switch (obj->state) {
643 case ODEBUG_STATE_INIT:
644 case ODEBUG_STATE_INACTIVE:
645 obj->state = ODEBUG_STATE_ACTIVE;
Paul E. McKenneyb778ae22013-04-23 12:51:11 -0700646 ret = 0;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700647 break;
648
649 case ODEBUG_STATE_ACTIVE:
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700650 state = obj->state;
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100651 raw_spin_unlock_irqrestore(&db->lock, flags);
Waiman Longd5f34152019-05-20 10:14:50 -0400652 debug_print_object(obj, "activate");
Paul E. McKenneyb778ae22013-04-23 12:51:11 -0700653 ret = debug_object_fixup(descr->fixup_activate, addr, state);
Du, Changbine7a8e782016-05-19 17:09:23 -0700654 return ret ? 0 : -EINVAL;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700655
656 case ODEBUG_STATE_DESTROYED:
Waiman Longd5f34152019-05-20 10:14:50 -0400657 print_object = true;
Paul E. McKenneyb778ae22013-04-23 12:51:11 -0700658 ret = -EINVAL;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700659 break;
660 default:
Paul E. McKenneyb778ae22013-04-23 12:51:11 -0700661 ret = 0;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700662 break;
663 }
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100664 raw_spin_unlock_irqrestore(&db->lock, flags);
Waiman Longd5f34152019-05-20 10:14:50 -0400665 if (print_object)
666 debug_print_object(obj, "activate");
Paul E. McKenneyb778ae22013-04-23 12:51:11 -0700667 return ret;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700668 }
669
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100670 raw_spin_unlock_irqrestore(&db->lock, flags);
Waiman Longd5f34152019-05-20 10:14:50 -0400671
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700672 /*
Du, Changbinb9fdac7f2016-05-19 17:09:41 -0700673 * We are here when a static object is activated. We
674 * let the type specific code confirm whether this is
675 * true or not. if true, we just make sure that the
676 * static object is tracked in the object tracker. If
677 * not, this must be a bug, so we try to fix it up.
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700678 */
Du, Changbinb9fdac7f2016-05-19 17:09:41 -0700679 if (descr->is_static_object && descr->is_static_object(addr)) {
680 /* track this static object */
681 debug_object_init(addr, descr);
682 debug_object_activate(addr, descr);
683 } else {
Stephen Boydfeac18d2011-11-07 19:48:26 -0800684 debug_print_object(&o, "activate");
Du, Changbinb9fdac7f2016-05-19 17:09:41 -0700685 ret = debug_object_fixup(descr->fixup_activate, addr,
686 ODEBUG_STATE_NOTAVAILABLE);
687 return ret ? 0 : -EINVAL;
Paul E. McKenneyb778ae22013-04-23 12:51:11 -0700688 }
689 return 0;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700690}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800691EXPORT_SYMBOL_GPL(debug_object_activate);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700692
693/**
694 * debug_object_deactivate - debug checks when an object is deactivated
695 * @addr: address of the object
696 * @descr: pointer to an object specific debug description structure
697 */
698void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
699{
700 struct debug_bucket *db;
701 struct debug_obj *obj;
702 unsigned long flags;
Waiman Longd5f34152019-05-20 10:14:50 -0400703 bool print_object = false;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700704
705 if (!debug_objects_enabled)
706 return;
707
708 db = get_bucket((unsigned long) addr);
709
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100710 raw_spin_lock_irqsave(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700711
712 obj = lookup_object(addr, db);
713 if (obj) {
714 switch (obj->state) {
715 case ODEBUG_STATE_INIT:
716 case ODEBUG_STATE_INACTIVE:
717 case ODEBUG_STATE_ACTIVE:
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400718 if (!obj->astate)
719 obj->state = ODEBUG_STATE_INACTIVE;
720 else
Waiman Longd5f34152019-05-20 10:14:50 -0400721 print_object = true;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700722 break;
723
724 case ODEBUG_STATE_DESTROYED:
Waiman Longd5f34152019-05-20 10:14:50 -0400725 print_object = true;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700726 break;
727 default:
728 break;
729 }
Waiman Longd5f34152019-05-20 10:14:50 -0400730 }
731
732 raw_spin_unlock_irqrestore(&db->lock, flags);
733 if (!obj) {
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700734 struct debug_obj o = { .object = addr,
735 .state = ODEBUG_STATE_NOTAVAILABLE,
736 .descr = descr };
737
738 debug_print_object(&o, "deactivate");
Waiman Longd5f34152019-05-20 10:14:50 -0400739 } else if (print_object) {
740 debug_print_object(obj, "deactivate");
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700741 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700742}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800743EXPORT_SYMBOL_GPL(debug_object_deactivate);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700744
745/**
746 * debug_object_destroy - debug checks when an object is destroyed
747 * @addr: address of the object
748 * @descr: pointer to an object specific debug description structure
749 */
750void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
751{
752 enum debug_obj_state state;
753 struct debug_bucket *db;
754 struct debug_obj *obj;
755 unsigned long flags;
Waiman Longd5f34152019-05-20 10:14:50 -0400756 bool print_object = false;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700757
758 if (!debug_objects_enabled)
759 return;
760
761 db = get_bucket((unsigned long) addr);
762
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100763 raw_spin_lock_irqsave(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700764
765 obj = lookup_object(addr, db);
766 if (!obj)
767 goto out_unlock;
768
769 switch (obj->state) {
770 case ODEBUG_STATE_NONE:
771 case ODEBUG_STATE_INIT:
772 case ODEBUG_STATE_INACTIVE:
773 obj->state = ODEBUG_STATE_DESTROYED;
774 break;
775 case ODEBUG_STATE_ACTIVE:
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700776 state = obj->state;
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100777 raw_spin_unlock_irqrestore(&db->lock, flags);
Waiman Longd5f34152019-05-20 10:14:50 -0400778 debug_print_object(obj, "destroy");
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700779 debug_object_fixup(descr->fixup_destroy, addr, state);
780 return;
781
782 case ODEBUG_STATE_DESTROYED:
Waiman Longd5f34152019-05-20 10:14:50 -0400783 print_object = true;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700784 break;
785 default:
786 break;
787 }
788out_unlock:
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100789 raw_spin_unlock_irqrestore(&db->lock, flags);
Waiman Longd5f34152019-05-20 10:14:50 -0400790 if (print_object)
791 debug_print_object(obj, "destroy");
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700792}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800793EXPORT_SYMBOL_GPL(debug_object_destroy);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700794
795/**
796 * debug_object_free - debug checks when an object is freed
797 * @addr: address of the object
798 * @descr: pointer to an object specific debug description structure
799 */
800void debug_object_free(void *addr, struct debug_obj_descr *descr)
801{
802 enum debug_obj_state state;
803 struct debug_bucket *db;
804 struct debug_obj *obj;
805 unsigned long flags;
806
807 if (!debug_objects_enabled)
808 return;
809
810 db = get_bucket((unsigned long) addr);
811
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100812 raw_spin_lock_irqsave(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700813
814 obj = lookup_object(addr, db);
815 if (!obj)
816 goto out_unlock;
817
818 switch (obj->state) {
819 case ODEBUG_STATE_ACTIVE:
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700820 state = obj->state;
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100821 raw_spin_unlock_irqrestore(&db->lock, flags);
Waiman Longd5f34152019-05-20 10:14:50 -0400822 debug_print_object(obj, "free");
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700823 debug_object_fixup(descr->fixup_free, addr, state);
824 return;
825 default:
826 hlist_del(&obj->node);
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100827 raw_spin_unlock_irqrestore(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700828 free_object(obj);
Vegard Nossum673d62cc2008-08-31 23:39:21 +0200829 return;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700830 }
831out_unlock:
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100832 raw_spin_unlock_irqrestore(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700833}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800834EXPORT_SYMBOL_GPL(debug_object_free);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700835
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400836/**
Christine Chanb84d4352011-11-07 19:48:27 -0800837 * debug_object_assert_init - debug checks when object should be init-ed
838 * @addr: address of the object
839 * @descr: pointer to an object specific debug description structure
840 */
841void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
842{
843 struct debug_bucket *db;
844 struct debug_obj *obj;
845 unsigned long flags;
846
847 if (!debug_objects_enabled)
848 return;
849
850 db = get_bucket((unsigned long) addr);
851
852 raw_spin_lock_irqsave(&db->lock, flags);
853
854 obj = lookup_object(addr, db);
855 if (!obj) {
856 struct debug_obj o = { .object = addr,
857 .state = ODEBUG_STATE_NOTAVAILABLE,
858 .descr = descr };
859
860 raw_spin_unlock_irqrestore(&db->lock, flags);
861 /*
Du, Changbinb9fdac7f2016-05-19 17:09:41 -0700862 * Maybe the object is static, and we let the type specific
863 * code confirm. Track this static object if true, else invoke
864 * fixup.
Christine Chanb84d4352011-11-07 19:48:27 -0800865 */
Du, Changbinb9fdac7f2016-05-19 17:09:41 -0700866 if (descr->is_static_object && descr->is_static_object(addr)) {
867 /* Track this static object */
868 debug_object_init(addr, descr);
869 } else {
Christine Chanb84d4352011-11-07 19:48:27 -0800870 debug_print_object(&o, "assert_init");
Du, Changbinb9fdac7f2016-05-19 17:09:41 -0700871 debug_object_fixup(descr->fixup_assert_init, addr,
872 ODEBUG_STATE_NOTAVAILABLE);
873 }
Christine Chanb84d4352011-11-07 19:48:27 -0800874 return;
875 }
876
877 raw_spin_unlock_irqrestore(&db->lock, flags);
878}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800879EXPORT_SYMBOL_GPL(debug_object_assert_init);
Christine Chanb84d4352011-11-07 19:48:27 -0800880
881/**
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400882 * debug_object_active_state - debug checks object usage state machine
883 * @addr: address of the object
884 * @descr: pointer to an object specific debug description structure
885 * @expect: expected state
886 * @next: state to move to if expected state is found
887 */
888void
889debug_object_active_state(void *addr, struct debug_obj_descr *descr,
890 unsigned int expect, unsigned int next)
891{
892 struct debug_bucket *db;
893 struct debug_obj *obj;
894 unsigned long flags;
Waiman Longd5f34152019-05-20 10:14:50 -0400895 bool print_object = false;
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400896
897 if (!debug_objects_enabled)
898 return;
899
900 db = get_bucket((unsigned long) addr);
901
902 raw_spin_lock_irqsave(&db->lock, flags);
903
904 obj = lookup_object(addr, db);
905 if (obj) {
906 switch (obj->state) {
907 case ODEBUG_STATE_ACTIVE:
908 if (obj->astate == expect)
909 obj->astate = next;
910 else
Waiman Longd5f34152019-05-20 10:14:50 -0400911 print_object = true;
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400912 break;
913
914 default:
Waiman Longd5f34152019-05-20 10:14:50 -0400915 print_object = true;
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400916 break;
917 }
Waiman Longd5f34152019-05-20 10:14:50 -0400918 }
919
920 raw_spin_unlock_irqrestore(&db->lock, flags);
921 if (!obj) {
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400922 struct debug_obj o = { .object = addr,
923 .state = ODEBUG_STATE_NOTAVAILABLE,
924 .descr = descr };
925
926 debug_print_object(&o, "active_state");
Waiman Longd5f34152019-05-20 10:14:50 -0400927 } else if (print_object) {
928 debug_print_object(obj, "active_state");
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400929 }
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400930}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800931EXPORT_SYMBOL_GPL(debug_object_active_state);
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400932
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700933#ifdef CONFIG_DEBUG_OBJECTS_FREE
934static void __debug_check_no_obj_freed(const void *address, unsigned long size)
935{
936 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700937 struct debug_obj_descr *descr;
938 enum debug_obj_state state;
939 struct debug_bucket *db;
Yang Shi1ea9b982018-02-06 07:18:28 +0800940 struct hlist_node *tmp;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700941 struct debug_obj *obj;
Yang Shibd9dcd02018-02-06 07:18:25 +0800942 int cnt, objs_checked = 0;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700943
944 saddr = (unsigned long) address;
945 eaddr = saddr + size;
946 paddr = saddr & ODEBUG_CHUNK_MASK;
947 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
948 chunks >>= ODEBUG_CHUNK_SHIFT;
949
950 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
951 db = get_bucket(paddr);
952
953repeat:
954 cnt = 0;
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100955 raw_spin_lock_irqsave(&db->lock, flags);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800956 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700957 cnt++;
958 oaddr = (unsigned long) obj->object;
959 if (oaddr < saddr || oaddr >= eaddr)
960 continue;
961
962 switch (obj->state) {
963 case ODEBUG_STATE_ACTIVE:
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700964 descr = obj->descr;
965 state = obj->state;
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100966 raw_spin_unlock_irqrestore(&db->lock, flags);
Waiman Longd5f34152019-05-20 10:14:50 -0400967 debug_print_object(obj, "free");
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700968 debug_object_fixup(descr->fixup_free,
969 (void *) oaddr, state);
970 goto repeat;
971 default:
972 hlist_del(&obj->node);
Waiman Longa7344a62019-05-20 10:14:49 -0400973 __free_object(obj);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700974 break;
975 }
976 }
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100977 raw_spin_unlock_irqrestore(&db->lock, flags);
Vegard Nossum673d62cc2008-08-31 23:39:21 +0200978
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700979 if (cnt > debug_objects_maxchain)
980 debug_objects_maxchain = cnt;
Yang Shibd9dcd02018-02-06 07:18:25 +0800981
982 objs_checked += cnt;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700983 }
Yang Shibd9dcd02018-02-06 07:18:25 +0800984
985 if (objs_checked > debug_objects_maxchecked)
986 debug_objects_maxchecked = objs_checked;
Yang Shi1ea9b982018-02-06 07:18:28 +0800987
988 /* Schedule work to actually kmem_cache_free() objects */
Marco Elver35fd7a62020-01-16 19:55:29 +0100989 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
Waiman Longa7344a62019-05-20 10:14:49 -0400990 WRITE_ONCE(obj_freeing, true);
991 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
992 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700993}
994
995void debug_check_no_obj_freed(const void *address, unsigned long size)
996{
997 if (debug_objects_enabled)
998 __debug_check_no_obj_freed(address, size);
999}
1000#endif
1001
1002#ifdef CONFIG_DEBUG_FS
1003
1004static int debug_stats_show(struct seq_file *m, void *v)
1005{
Waiman Longd86998b2019-05-20 10:14:46 -04001006 int cpu, obj_percpu_free = 0;
1007
1008 for_each_possible_cpu(cpu)
1009 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1010
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001011 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
Yang Shibd9dcd02018-02-06 07:18:25 +08001012 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001013 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
1014 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
Marco Elver35fd7a62020-01-16 19:55:29 +01001015 seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
Waiman Longd86998b2019-05-20 10:14:46 -04001016 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001017 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
Waiman Longd86998b2019-05-20 10:14:46 -04001018 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001019 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
Marco Elver35fd7a62020-01-16 19:55:29 +01001020 seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
Waiman Long0cad93c2017-02-07 16:40:30 -05001021 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1022 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001023 return 0;
1024}
Qinglang Miao0f85c482020-07-16 16:47:47 +08001025DEFINE_SHOW_ATTRIBUTE(debug_stats);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001026
1027static int __init debug_objects_init_debugfs(void)
1028{
Greg Kroah-Hartmanfecb0d92019-06-12 17:35:13 +02001029 struct dentry *dbgdir;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001030
1031 if (!debug_objects_enabled)
1032 return 0;
1033
1034 dbgdir = debugfs_create_dir("debug_objects", NULL);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001035
Greg Kroah-Hartmanfecb0d92019-06-12 17:35:13 +02001036 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001037
1038 return 0;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001039}
1040__initcall(debug_objects_init_debugfs);
1041
1042#else
1043static inline void debug_objects_init_debugfs(void) { }
1044#endif
1045
1046#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1047
1048/* Random data structure for the self test */
1049struct self_test {
1050 unsigned long dummy1[6];
1051 int static_init;
1052 unsigned long dummy2[3];
1053};
1054
1055static __initdata struct debug_obj_descr descr_type_test;
1056
Du, Changbinb9fdac7f2016-05-19 17:09:41 -07001057static bool __init is_static_object(void *addr)
1058{
1059 struct self_test *obj = addr;
1060
1061 return obj->static_init;
1062}
1063
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001064/*
1065 * fixup_init is called when:
1066 * - an active object is initialized
1067 */
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001068static bool __init fixup_init(void *addr, enum debug_obj_state state)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001069{
1070 struct self_test *obj = addr;
1071
1072 switch (state) {
1073 case ODEBUG_STATE_ACTIVE:
1074 debug_object_deactivate(obj, &descr_type_test);
1075 debug_object_init(obj, &descr_type_test);
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001076 return true;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001077 default:
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001078 return false;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001079 }
1080}
1081
1082/*
1083 * fixup_activate is called when:
1084 * - an active object is activated
Du, Changbinb9fdac7f2016-05-19 17:09:41 -07001085 * - an unknown non-static object is activated
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001086 */
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001087static bool __init fixup_activate(void *addr, enum debug_obj_state state)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001088{
1089 struct self_test *obj = addr;
1090
1091 switch (state) {
1092 case ODEBUG_STATE_NOTAVAILABLE:
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001093 return true;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001094 case ODEBUG_STATE_ACTIVE:
1095 debug_object_deactivate(obj, &descr_type_test);
1096 debug_object_activate(obj, &descr_type_test);
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001097 return true;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001098
1099 default:
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001100 return false;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001101 }
1102}
1103
1104/*
1105 * fixup_destroy is called when:
1106 * - an active object is destroyed
1107 */
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001108static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001109{
1110 struct self_test *obj = addr;
1111
1112 switch (state) {
1113 case ODEBUG_STATE_ACTIVE:
1114 debug_object_deactivate(obj, &descr_type_test);
1115 debug_object_destroy(obj, &descr_type_test);
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001116 return true;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001117 default:
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001118 return false;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001119 }
1120}
1121
1122/*
1123 * fixup_free is called when:
1124 * - an active object is freed
1125 */
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001126static bool __init fixup_free(void *addr, enum debug_obj_state state)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001127{
1128 struct self_test *obj = addr;
1129
1130 switch (state) {
1131 case ODEBUG_STATE_ACTIVE:
1132 debug_object_deactivate(obj, &descr_type_test);
1133 debug_object_free(obj, &descr_type_test);
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001134 return true;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001135 default:
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001136 return false;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001137 }
1138}
1139
Henrik Kretzschmar1fb2f772010-03-26 20:38:35 +01001140static int __init
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001141check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1142{
1143 struct debug_bucket *db;
1144 struct debug_obj *obj;
1145 unsigned long flags;
1146 int res = -EINVAL;
1147
1148 db = get_bucket((unsigned long) addr);
1149
Thomas Gleixneraef9cb02009-11-17 18:11:28 +01001150 raw_spin_lock_irqsave(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001151
1152 obj = lookup_object(addr, db);
1153 if (!obj && state != ODEBUG_STATE_NONE) {
Arjan van de Ven5cd2b452008-07-25 19:45:39 -07001154 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001155 goto out;
1156 }
1157 if (obj && obj->state != state) {
Arjan van de Ven5cd2b452008-07-25 19:45:39 -07001158 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001159 obj->state, state);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001160 goto out;
1161 }
1162 if (fixups != debug_objects_fixups) {
Arjan van de Ven5cd2b452008-07-25 19:45:39 -07001163 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001164 fixups, debug_objects_fixups);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001165 goto out;
1166 }
1167 if (warnings != debug_objects_warnings) {
Arjan van de Ven5cd2b452008-07-25 19:45:39 -07001168 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001169 warnings, debug_objects_warnings);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001170 goto out;
1171 }
1172 res = 0;
1173out:
Thomas Gleixneraef9cb02009-11-17 18:11:28 +01001174 raw_spin_unlock_irqrestore(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001175 if (res)
1176 debug_objects_enabled = 0;
1177 return res;
1178}
1179
1180static __initdata struct debug_obj_descr descr_type_test = {
1181 .name = "selftest",
Du, Changbinb9fdac7f2016-05-19 17:09:41 -07001182 .is_static_object = is_static_object,
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001183 .fixup_init = fixup_init,
1184 .fixup_activate = fixup_activate,
1185 .fixup_destroy = fixup_destroy,
1186 .fixup_free = fixup_free,
1187};
1188
1189static __initdata struct self_test obj = { .static_init = 0 };
1190
1191static void __init debug_objects_selftest(void)
1192{
1193 int fixups, oldfixups, warnings, oldwarnings;
1194 unsigned long flags;
1195
1196 local_irq_save(flags);
1197
1198 fixups = oldfixups = debug_objects_fixups;
1199 warnings = oldwarnings = debug_objects_warnings;
1200 descr_test = &descr_type_test;
1201
1202 debug_object_init(&obj, &descr_type_test);
1203 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1204 goto out;
1205 debug_object_activate(&obj, &descr_type_test);
1206 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1207 goto out;
1208 debug_object_activate(&obj, &descr_type_test);
1209 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1210 goto out;
1211 debug_object_deactivate(&obj, &descr_type_test);
1212 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1213 goto out;
1214 debug_object_destroy(&obj, &descr_type_test);
1215 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1216 goto out;
1217 debug_object_init(&obj, &descr_type_test);
1218 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1219 goto out;
1220 debug_object_activate(&obj, &descr_type_test);
1221 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1222 goto out;
1223 debug_object_deactivate(&obj, &descr_type_test);
1224 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1225 goto out;
1226 debug_object_free(&obj, &descr_type_test);
1227 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1228 goto out;
1229
1230 obj.static_init = 1;
1231 debug_object_activate(&obj, &descr_type_test);
Stephen Boyd9f78ff02012-03-05 14:59:17 -08001232 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001233 goto out;
1234 debug_object_init(&obj, &descr_type_test);
1235 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1236 goto out;
1237 debug_object_free(&obj, &descr_type_test);
1238 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1239 goto out;
1240
1241#ifdef CONFIG_DEBUG_OBJECTS_FREE
1242 debug_object_init(&obj, &descr_type_test);
1243 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1244 goto out;
1245 debug_object_activate(&obj, &descr_type_test);
1246 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1247 goto out;
1248 __debug_check_no_obj_freed(&obj, sizeof(obj));
1249 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1250 goto out;
1251#endif
Fabian Frederick719e4842014-06-04 16:06:04 -07001252 pr_info("selftest passed\n");
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001253
1254out:
1255 debug_objects_fixups = oldfixups;
1256 debug_objects_warnings = oldwarnings;
1257 descr_test = NULL;
1258
1259 local_irq_restore(flags);
1260}
1261#else
1262static inline void debug_objects_selftest(void) { }
1263#endif
1264
1265/*
1266 * Called during early boot to initialize the hash buckets and link
1267 * the static object pool objects into the poll list. After this call
1268 * the object tracker is fully operational.
1269 */
1270void __init debug_objects_early_init(void)
1271{
1272 int i;
1273
1274 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
Thomas Gleixneraef9cb02009-11-17 18:11:28 +01001275 raw_spin_lock_init(&obj_hash[i].lock);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001276
1277 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1278 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1279}
1280
1281/*
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001282 * Convert the statically allocated objects to dynamic ones:
1283 */
Henrik Kretzschmar1fb2f772010-03-26 20:38:35 +01001284static int __init debug_objects_replace_static_objects(void)
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001285{
1286 struct debug_bucket *db = obj_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001287 struct hlist_node *tmp;
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001288 struct debug_obj *obj, *new;
1289 HLIST_HEAD(objects);
1290 int i, cnt = 0;
1291
1292 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1293 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1294 if (!obj)
1295 goto free;
1296 hlist_add_head(&obj->node, &objects);
1297 }
1298
1299 /*
Qian Caia9ee3a62018-12-28 00:32:32 -08001300 * debug_objects_mem_init() is now called early that only one CPU is up
1301 * and interrupts have been disabled, so it is safe to replace the
1302 * active object references.
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001303 */
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001304
1305 /* Remove the statically allocated objects from the pool */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001306 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001307 hlist_del(&obj->node);
1308 /* Move the allocated objects to the pool */
1309 hlist_move_list(&objects, &obj_pool);
1310
1311 /* Replace the active object references */
1312 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1313 hlist_move_list(&db->list, &objects);
1314
Sasha Levinb67bfe02013-02-27 17:06:00 -08001315 hlist_for_each_entry(obj, &objects, node) {
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001316 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1317 hlist_del(&new->node);
1318 /* copy object data */
1319 *new = *obj;
1320 hlist_add_head(&new->node, &db->list);
1321 cnt++;
1322 }
1323 }
1324
Fabian Frederickc0f35cc2014-06-04 16:06:05 -07001325 pr_debug("%d of %d active objects replaced\n",
1326 cnt, obj_pool_used);
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001327 return 0;
1328free:
Sasha Levinb67bfe02013-02-27 17:06:00 -08001329 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001330 hlist_del(&obj->node);
1331 kmem_cache_free(obj_cache, obj);
1332 }
1333 return -ENOMEM;
1334}
1335
1336/*
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001337 * Called after the kmem_caches are functional to setup a dedicated
1338 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1339 * prevents that the debug code is called on kmem_cache_free() for the
1340 * debug tracker objects to avoid recursive calls.
1341 */
1342void __init debug_objects_mem_init(void)
1343{
Waiman Long634d61f2019-05-20 10:14:47 -04001344 int cpu, extras;
Waiman Longd86998b2019-05-20 10:14:46 -04001345
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001346 if (!debug_objects_enabled)
1347 return;
1348
Waiman Longd86998b2019-05-20 10:14:46 -04001349 /*
1350 * Initialize the percpu object pools
1351 *
1352 * Initialization is not strictly necessary, but was done for
1353 * completeness.
1354 */
1355 for_each_possible_cpu(cpu)
1356 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1357
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001358 obj_cache = kmem_cache_create("debug_objects_cache",
1359 sizeof (struct debug_obj), 0,
Qian Cai8de456c2018-11-30 14:09:48 -08001360 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1361 NULL);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001362
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001363 if (!obj_cache || debug_objects_replace_static_objects()) {
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001364 debug_objects_enabled = 0;
Zhong Jiang3ff4f802018-08-01 00:24:58 +08001365 kmem_cache_destroy(obj_cache);
Fabian Frederick719e4842014-06-04 16:06:04 -07001366 pr_warn("out of memory.\n");
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001367 } else
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001368 debug_objects_selftest();
Waiman Long634d61f2019-05-20 10:14:47 -04001369
1370 /*
1371 * Increase the thresholds for allocating and freeing objects
1372 * according to the number of possible CPUs available in the system.
1373 */
1374 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1375 debug_objects_pool_size += extras;
1376 debug_objects_pool_min_level += extras;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001377}