blob: 0a1e048d0db7c37f4ab978b33c805d35448ade3d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Christoph Lameter81819f02007-05-06 14:49:36 -07002/*
3 * SLUB: A slab allocator that limits cache line use instead of queuing
4 * objects in per cpu and per node lists.
5 *
Bhaskar Chowdhurydc842072021-04-29 22:54:51 -07006 * The allocator synchronizes using per slab locks or atomic operations
Christoph Lameter881db7f2011-06-01 12:25:53 -05007 * and only uses a centralized lock to manage a pool of partial slabs.
Christoph Lameter81819f02007-05-06 14:49:36 -07008 *
Christoph Lametercde53532008-07-04 09:59:22 -07009 * (C) 2007 SGI, Christoph Lameter
Christoph Lameter881db7f2011-06-01 12:25:53 -050010 * (C) 2011 Linux Foundation, Christoph Lameter
Christoph Lameter81819f02007-05-06 14:49:36 -070011 */
12
13#include <linux/mm.h>
Nick Piggin1eb5ac62009-05-05 19:13:44 +100014#include <linux/swap.h> /* struct reclaim_state */
Christoph Lameter81819f02007-05-06 14:49:36 -070015#include <linux/module.h>
16#include <linux/bit_spinlock.h>
17#include <linux/interrupt.h>
Andrew Morton1b3865d2021-06-15 18:23:39 -070018#include <linux/swab.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070019#include <linux/bitops.h>
20#include <linux/slab.h>
Christoph Lameter97d06602012-07-06 15:25:11 -050021#include "slab.h"
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +040022#include <linux/proc_fs.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070023#include <linux/seq_file.h>
Andrey Ryabinina79316c2015-02-13 14:39:38 -080024#include <linux/kasan.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070025#include <linux/cpu.h>
26#include <linux/cpuset.h>
27#include <linux/mempolicy.h>
28#include <linux/ctype.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070029#include <linux/debugobjects.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070030#include <linux/kallsyms.h>
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -080031#include <linux/kfence.h>
Yasunori Gotob9049e22007-10-21 16:41:37 -070032#include <linux/memory.h>
Roman Zippelf8bd2252008-05-01 04:34:31 -070033#include <linux/math64.h>
Akinobu Mita773ff602008-12-23 19:37:01 +090034#include <linux/fault-inject.h>
Pekka Enbergbfa71452011-07-07 22:47:01 +030035#include <linux/stacktrace.h>
Christoph Lameter4de900b2012-01-30 15:53:51 -060036#include <linux/prefetch.h>
Glauber Costa2633d7a2012-12-18 14:22:34 -080037#include <linux/memcontrol.h>
Kees Cook2482ddec2017-09-06 16:19:18 -070038#include <linux/random.h>
Oliver Glitta1f9f78b2021-06-28 19:34:33 -070039#include <kunit/test.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070040
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -070041#include <linux/debugfs.h>
Richard Kennedy4a923792010-10-21 10:29:19 +010042#include <trace/events/kmem.h>
43
Mel Gorman072bb0a2012-07-31 16:43:58 -070044#include "internal.h"
45
Christoph Lameter81819f02007-05-06 14:49:36 -070046/*
47 * Lock order:
Christoph Lameter18004c52012-07-06 15:25:12 -050048 * 1. slab_mutex (Global Mutex)
Christoph Lameter881db7f2011-06-01 12:25:53 -050049 * 2. node->list_lock
50 * 3. slab_lock(page) (Only on some arches and for debugging)
Christoph Lameter81819f02007-05-06 14:49:36 -070051 *
Christoph Lameter18004c52012-07-06 15:25:12 -050052 * slab_mutex
Christoph Lameter881db7f2011-06-01 12:25:53 -050053 *
Christoph Lameter18004c52012-07-06 15:25:12 -050054 * The role of the slab_mutex is to protect the list of all the slabs
Christoph Lameter881db7f2011-06-01 12:25:53 -050055 * and to synchronize major metadata changes to slab cache structures.
56 *
57 * The slab_lock is only used for debugging and on arches that do not
Matthew Wilcoxb7ccc7f2018-06-07 17:08:46 -070058 * have the ability to do a cmpxchg_double. It only protects:
Christoph Lameter881db7f2011-06-01 12:25:53 -050059 * A. page->freelist -> List of object free in a page
Matthew Wilcoxb7ccc7f2018-06-07 17:08:46 -070060 * B. page->inuse -> Number of objects in use
61 * C. page->objects -> Number of objects in page
62 * D. page->frozen -> frozen state
Christoph Lameter881db7f2011-06-01 12:25:53 -050063 *
64 * If a slab is frozen then it is exempt from list management. It is not
Liu Xiang632b2ef2019-05-13 17:16:28 -070065 * on any list except per cpu partial list. The processor that froze the
66 * slab is the one who can perform list operations on the page. Other
67 * processors may put objects onto the freelist but the processor that
68 * froze the slab is the only one that can retrieve the objects from the
69 * page's freelist.
Christoph Lameter81819f02007-05-06 14:49:36 -070070 *
71 * The list_lock protects the partial and full list on each node and
72 * the partial slab counter. If taken then no new slabs may be added or
73 * removed from the lists nor make the number of partial slabs be modified.
74 * (Note that the total number of slabs is an atomic value that may be
75 * modified without taking the list lock).
76 *
77 * The list_lock is a centralized lock and thus we avoid taking it as
78 * much as possible. As long as SLUB does not have to handle partial
79 * slabs, operations can continue without any centralized lock. F.e.
80 * allocating a long series of objects that fill up slabs does not require
81 * the list lock.
Christoph Lameter81819f02007-05-06 14:49:36 -070082 * Interrupts are disabled during allocation and deallocation in order to
83 * make the slab allocator safe to use in the context of an irq. In addition
84 * interrupts are disabled to ensure that the processor does not change
85 * while handling per_cpu slabs, due to kernel preemption.
86 *
87 * SLUB assigns one slab for allocation to each processor.
88 * Allocations only occur from these slabs called cpu slabs.
89 *
Christoph Lameter672bba32007-05-09 02:32:39 -070090 * Slabs with free elements are kept on a partial list and during regular
91 * operations no list for full slabs is used. If an object in a full slab is
Christoph Lameter81819f02007-05-06 14:49:36 -070092 * freed then the slab will show up again on the partial lists.
Christoph Lameter672bba32007-05-09 02:32:39 -070093 * We track full slabs for debugging purposes though because otherwise we
94 * cannot scan all objects.
Christoph Lameter81819f02007-05-06 14:49:36 -070095 *
96 * Slabs are freed when they become empty. Teardown and setup is
97 * minimal so we rely on the page allocators per cpu caches for
98 * fast frees and allocs.
99 *
Yu Zhaoaed68142019-11-30 17:49:34 -0800100 * page->frozen The slab is frozen and exempt from list processing.
Christoph Lameter4b6f0752007-05-16 22:10:53 -0700101 * This means that the slab is dedicated to a purpose
102 * such as satisfying allocations for a specific
103 * processor. Objects may be freed in the slab while
104 * it is frozen but slab_free will then skip the usual
105 * list operations. It is up to the processor holding
106 * the slab to integrate the slab into the slab lists
107 * when the slab is no longer needed.
108 *
109 * One use of this flag is to mark slabs that are
110 * used for allocations. Then such a slab becomes a cpu
111 * slab. The cpu slab may be equipped with an additional
Christoph Lameterdfb4f092007-10-16 01:26:05 -0700112 * freelist that allows lockless access to
Christoph Lameter894b8782007-05-10 03:15:16 -0700113 * free objects in addition to the regular freelist
114 * that requires the slab lock.
Christoph Lameter81819f02007-05-06 14:49:36 -0700115 *
Yu Zhaoaed68142019-11-30 17:49:34 -0800116 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug
Christoph Lameter81819f02007-05-06 14:49:36 -0700117 * options set. This moves slab handling out of
Christoph Lameter894b8782007-05-10 03:15:16 -0700118 * the fast path and disables lockless freelists.
Christoph Lameter81819f02007-05-06 14:49:36 -0700119 */
120
Vlastimil Babkaca0cab62020-08-06 23:18:51 -0700121#ifdef CONFIG_SLUB_DEBUG
122#ifdef CONFIG_SLUB_DEBUG_ON
123DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
124#else
125DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
126#endif
Stephen Boyd79270292021-06-28 19:34:52 -0700127#endif /* CONFIG_SLUB_DEBUG */
Vlastimil Babkaca0cab62020-08-06 23:18:51 -0700128
Vlastimil Babka59052e82020-08-06 23:18:55 -0700129static inline bool kmem_cache_debug(struct kmem_cache *s)
130{
131 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
Christoph Lameteraf537b02010-07-09 14:07:14 -0500132}
Christoph Lameter5577bd82007-05-16 22:10:56 -0700133
Geert Uytterhoeven117d54d2016-08-04 15:31:55 -0700134void *fixup_red_left(struct kmem_cache *s, void *p)
Joonsoo Kimd86bd1b2016-03-15 14:55:12 -0700135{
Vlastimil Babka59052e82020-08-06 23:18:55 -0700136 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
Joonsoo Kimd86bd1b2016-03-15 14:55:12 -0700137 p += s->red_left_pad;
138
139 return p;
140}
141
Joonsoo Kim345c9052013-06-19 14:05:52 +0900142static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
143{
144#ifdef CONFIG_SLUB_CPU_PARTIAL
145 return !kmem_cache_debug(s);
146#else
147 return false;
148#endif
149}
150
Christoph Lameter81819f02007-05-06 14:49:36 -0700151/*
152 * Issues still to be resolved:
153 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700154 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
155 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700156 * - Variable sizing of the per node arrays
157 */
158
Christoph Lameterb789ef52011-06-01 12:25:49 -0500159/* Enable to log cmpxchg failures */
160#undef SLUB_DEBUG_CMPXCHG
161
Christoph Lameter81819f02007-05-06 14:49:36 -0700162/*
Bhaskar Chowdhurydc842072021-04-29 22:54:51 -0700163 * Minimum number of partial slabs. These will be left on the partial
Christoph Lameter2086d262007-05-06 14:49:46 -0700164 * lists even if they are empty. kmem_cache_shrink may reclaim them.
165 */
Christoph Lameter76be8952007-12-21 14:37:37 -0800166#define MIN_PARTIAL 5
Christoph Lametere95eed52007-05-06 14:49:44 -0700167
Christoph Lameter2086d262007-05-06 14:49:46 -0700168/*
169 * Maximum number of desirable partial slabs.
170 * The existence of more partial slabs makes kmem_cache_shrink
Zhi Yong Wu721ae222013-11-08 20:47:37 +0800171 * sort the partial list by the number of objects in use.
Christoph Lameter2086d262007-05-06 14:49:46 -0700172 */
173#define MAX_PARTIAL 10
174
Laura Abbottbecfda62016-03-15 14:55:06 -0700175#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
Christoph Lameter81819f02007-05-06 14:49:36 -0700176 SLAB_POISON | SLAB_STORE_USER)
Christoph Lameter672bba32007-05-09 02:32:39 -0700177
Christoph Lameter81819f02007-05-06 14:49:36 -0700178/*
Laura Abbott149daaf2016-03-15 14:55:09 -0700179 * These debug flags cannot use CMPXCHG because there might be consistency
180 * issues when checking or reading debug information
181 */
182#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
183 SLAB_TRACE)
184
185
186/*
David Rientjes3de47212009-07-27 18:30:35 -0700187 * Debugging flags that require metadata to be stored in the slab. These get
188 * disabled when slub_debug=O is used and a cache's min order increases with
189 * metadata.
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700190 */
David Rientjes3de47212009-07-27 18:30:35 -0700191#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700192
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400193#define OO_SHIFT 16
194#define OO_MASK ((1 << OO_SHIFT) - 1)
Christoph Lameter50d5c412011-06-01 12:25:45 -0500195#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400196
Christoph Lameter81819f02007-05-06 14:49:36 -0700197/* Internal SLUB flags */
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800198/* Poison object */
Alexey Dobriyan4fd0b462017-11-15 17:32:21 -0800199#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800200/* Use cmpxchg_double */
Alexey Dobriyan4fd0b462017-11-15 17:32:21 -0800201#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
Christoph Lameter81819f02007-05-06 14:49:36 -0700202
Christoph Lameter02cbc872007-05-09 02:32:43 -0700203/*
204 * Tracking user of a slab.
205 */
Ben Greeard6543e32011-07-07 11:36:36 -0700206#define TRACK_ADDRS_COUNT 16
Christoph Lameter02cbc872007-05-09 02:32:43 -0700207struct track {
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300208 unsigned long addr; /* Called from address */
Linus Torvaldsae14c632021-07-17 13:27:00 -0700209#ifdef CONFIG_STACKTRACE
210 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
Ben Greeard6543e32011-07-07 11:36:36 -0700211#endif
Christoph Lameter02cbc872007-05-09 02:32:43 -0700212 int cpu; /* Was running on cpu */
213 int pid; /* Pid context */
214 unsigned long when; /* When did the operation occur */
215};
216
217enum track_item { TRACK_ALLOC, TRACK_FREE };
218
Christoph Lameterab4d5ed2010-10-05 13:57:26 -0500219#ifdef CONFIG_SYSFS
Christoph Lameter81819f02007-05-06 14:49:36 -0700220static int sysfs_slab_add(struct kmem_cache *);
221static int sysfs_slab_alias(struct kmem_cache *, const char *);
Christoph Lameter81819f02007-05-06 14:49:36 -0700222#else
Christoph Lameter0c710012007-07-17 04:03:24 -0700223static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
224static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
225 { return 0; }
Christoph Lameter81819f02007-05-06 14:49:36 -0700226#endif
227
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -0700228#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
229static void debugfs_slab_add(struct kmem_cache *);
230#else
231static inline void debugfs_slab_add(struct kmem_cache *s) { }
232#endif
233
Christoph Lameter4fdccdf2011-03-22 13:35:00 -0500234static inline void stat(const struct kmem_cache *s, enum stat_item si)
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800235{
236#ifdef CONFIG_SLUB_STATS
Christoph Lameter88da03a2014-04-07 15:39:42 -0700237 /*
238 * The rmw is racy on a preemptible kernel but this is acceptable, so
239 * avoid this_cpu_add()'s irq-disable overhead.
240 */
241 raw_cpu_inc(s->cpu_slab->stat[si]);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800242#endif
243}
244
Vlastimil Babka7e1fa932021-02-24 12:01:12 -0800245/*
246 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
247 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
248 * differ during memory hotplug/hotremove operations.
249 * Protected by slab_mutex.
250 */
251static nodemask_t slab_nodes;
252
Christoph Lameter81819f02007-05-06 14:49:36 -0700253/********************************************************************
254 * Core slab cache functions
255 *******************************************************************/
256
Kees Cook2482ddec2017-09-06 16:19:18 -0700257/*
258 * Returns freelist pointer (ptr). With hardening, this is obfuscated
259 * with an XOR of the address where the pointer is held and a per-cache
260 * random number.
261 */
262static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
263 unsigned long ptr_addr)
264{
265#ifdef CONFIG_SLAB_FREELIST_HARDENED
Andrey Konovalovd36a63a2019-02-20 22:19:32 -0800266 /*
Andrey Konovalovaa1ef4d2020-12-22 12:02:17 -0800267 * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged.
Andrey Konovalovd36a63a2019-02-20 22:19:32 -0800268 * Normally, this doesn't cause any issues, as both set_freepointer()
269 * and get_freepointer() are called with a pointer with the same tag.
270 * However, there are some issues with CONFIG_SLUB_DEBUG code. For
271 * example, when __free_slub() iterates over objects in a cache, it
272 * passes untagged pointers to check_object(). check_object() in turns
273 * calls get_freepointer() with an untagged pointer, which causes the
274 * freepointer to be restored incorrectly.
275 */
276 return (void *)((unsigned long)ptr ^ s->random ^
Kees Cook1ad53d92020-04-01 21:04:23 -0700277 swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
Kees Cook2482ddec2017-09-06 16:19:18 -0700278#else
279 return ptr;
280#endif
281}
282
283/* Returns the freelist pointer recorded at location ptr_addr. */
284static inline void *freelist_dereference(const struct kmem_cache *s,
285 void *ptr_addr)
286{
287 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
288 (unsigned long)ptr_addr);
289}
290
Christoph Lameter7656c722007-05-09 02:32:40 -0700291static inline void *get_freepointer(struct kmem_cache *s, void *object)
292{
Andrey Konovalovaa1ef4d2020-12-22 12:02:17 -0800293 object = kasan_reset_tag(object);
Kees Cook2482ddec2017-09-06 16:19:18 -0700294 return freelist_dereference(s, object + s->offset);
Christoph Lameter7656c722007-05-09 02:32:40 -0700295}
296
Eric Dumazet0ad95002011-12-16 16:25:34 +0100297static void prefetch_freepointer(const struct kmem_cache *s, void *object)
298{
Vlastimil Babka0882ff92018-08-17 15:44:44 -0700299 prefetch(object + s->offset);
Eric Dumazet0ad95002011-12-16 16:25:34 +0100300}
301
Christoph Lameter1393d9a2011-05-16 15:26:08 -0500302static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
303{
Kees Cook2482ddec2017-09-06 16:19:18 -0700304 unsigned long freepointer_addr;
Christoph Lameter1393d9a2011-05-16 15:26:08 -0500305 void *p;
306
Vlastimil Babka8e57f8a2020-01-13 16:29:20 -0800307 if (!debug_pagealloc_enabled_static())
Joonsoo Kim922d5662016-03-17 14:17:53 -0700308 return get_freepointer(s, object);
309
Alexander Potapenkof70b0042021-05-22 17:41:56 -0700310 object = kasan_reset_tag(object);
Kees Cook2482ddec2017-09-06 16:19:18 -0700311 freepointer_addr = (unsigned long)object + s->offset;
Christoph Hellwigfe557312020-06-17 09:37:53 +0200312 copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p));
Kees Cook2482ddec2017-09-06 16:19:18 -0700313 return freelist_ptr(s, p, freepointer_addr);
Christoph Lameter1393d9a2011-05-16 15:26:08 -0500314}
315
Christoph Lameter7656c722007-05-09 02:32:40 -0700316static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
317{
Kees Cook2482ddec2017-09-06 16:19:18 -0700318 unsigned long freeptr_addr = (unsigned long)object + s->offset;
319
Alexander Popovce6fa912017-09-06 16:19:22 -0700320#ifdef CONFIG_SLAB_FREELIST_HARDENED
321 BUG_ON(object == fp); /* naive detection of double free or corruption */
322#endif
323
Andrey Konovalovaa1ef4d2020-12-22 12:02:17 -0800324 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
Kees Cook2482ddec2017-09-06 16:19:18 -0700325 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
Christoph Lameter7656c722007-05-09 02:32:40 -0700326}
327
328/* Loop over all objects in a slab */
Christoph Lameter224a88b2008-04-14 19:11:31 +0300329#define for_each_object(__p, __s, __addr, __objects) \
Joonsoo Kimd86bd1b2016-03-15 14:55:12 -0700330 for (__p = fixup_red_left(__s, __addr); \
331 __p < (__addr) + (__objects) * (__s)->size; \
332 __p += (__s)->size)
Christoph Lameter7656c722007-05-09 02:32:40 -0700333
Matthew Wilcox9736d2a2018-06-07 17:09:10 -0700334static inline unsigned int order_objects(unsigned int order, unsigned int size)
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800335{
Matthew Wilcox9736d2a2018-06-07 17:09:10 -0700336 return ((unsigned int)PAGE_SIZE << order) / size;
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800337}
338
Alexey Dobriyan19af27a2018-04-05 16:21:39 -0700339static inline struct kmem_cache_order_objects oo_make(unsigned int order,
Matthew Wilcox9736d2a2018-06-07 17:09:10 -0700340 unsigned int size)
Christoph Lameter834f3d12008-04-14 19:11:31 +0300341{
342 struct kmem_cache_order_objects x = {
Matthew Wilcox9736d2a2018-06-07 17:09:10 -0700343 (order << OO_SHIFT) + order_objects(order, size)
Christoph Lameter834f3d12008-04-14 19:11:31 +0300344 };
345
346 return x;
347}
348
Alexey Dobriyan19af27a2018-04-05 16:21:39 -0700349static inline unsigned int oo_order(struct kmem_cache_order_objects x)
Christoph Lameter834f3d12008-04-14 19:11:31 +0300350{
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400351 return x.x >> OO_SHIFT;
Christoph Lameter834f3d12008-04-14 19:11:31 +0300352}
353
Alexey Dobriyan19af27a2018-04-05 16:21:39 -0700354static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
Christoph Lameter834f3d12008-04-14 19:11:31 +0300355{
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400356 return x.x & OO_MASK;
Christoph Lameter834f3d12008-04-14 19:11:31 +0300357}
358
Christoph Lameter881db7f2011-06-01 12:25:53 -0500359/*
360 * Per slab locking using the pagelock
361 */
362static __always_inline void slab_lock(struct page *page)
363{
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800364 VM_BUG_ON_PAGE(PageTail(page), page);
Christoph Lameter881db7f2011-06-01 12:25:53 -0500365 bit_spin_lock(PG_locked, &page->flags);
366}
367
368static __always_inline void slab_unlock(struct page *page)
369{
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800370 VM_BUG_ON_PAGE(PageTail(page), page);
Christoph Lameter881db7f2011-06-01 12:25:53 -0500371 __bit_spin_unlock(PG_locked, &page->flags);
372}
373
Christoph Lameter1d071712011-07-14 12:49:12 -0500374/* Interrupts must be disabled (for the fallback code to work right) */
375static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
Christoph Lameterb789ef52011-06-01 12:25:49 -0500376 void *freelist_old, unsigned long counters_old,
377 void *freelist_new, unsigned long counters_new,
378 const char *n)
379{
Christoph Lameter1d071712011-07-14 12:49:12 -0500380 VM_BUG_ON(!irqs_disabled());
Heiko Carstens25654092012-01-12 17:17:33 -0800381#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
382 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
Christoph Lameterb789ef52011-06-01 12:25:49 -0500383 if (s->flags & __CMPXCHG_DOUBLE) {
Jan Beulichcdcd6292012-01-02 17:02:18 +0000384 if (cmpxchg_double(&page->freelist, &page->counters,
Dan Carpenter0aa9a132014-08-06 16:04:48 -0700385 freelist_old, counters_old,
386 freelist_new, counters_new))
Joe Perches6f6528a2015-04-14 15:44:31 -0700387 return true;
Christoph Lameterb789ef52011-06-01 12:25:49 -0500388 } else
389#endif
390 {
Christoph Lameter881db7f2011-06-01 12:25:53 -0500391 slab_lock(page);
Chen Gangd0e0ac92013-07-15 09:05:29 +0800392 if (page->freelist == freelist_old &&
393 page->counters == counters_old) {
Christoph Lameterb789ef52011-06-01 12:25:49 -0500394 page->freelist = freelist_new;
Matthew Wilcox7d27a042018-06-07 17:08:31 -0700395 page->counters = counters_new;
Christoph Lameter881db7f2011-06-01 12:25:53 -0500396 slab_unlock(page);
Joe Perches6f6528a2015-04-14 15:44:31 -0700397 return true;
Christoph Lameterb789ef52011-06-01 12:25:49 -0500398 }
Christoph Lameter881db7f2011-06-01 12:25:53 -0500399 slab_unlock(page);
Christoph Lameterb789ef52011-06-01 12:25:49 -0500400 }
401
402 cpu_relax();
403 stat(s, CMPXCHG_DOUBLE_FAIL);
404
405#ifdef SLUB_DEBUG_CMPXCHG
Fabian Frederickf9f58282014-06-04 16:06:34 -0700406 pr_info("%s %s: cmpxchg double redo ", n, s->name);
Christoph Lameterb789ef52011-06-01 12:25:49 -0500407#endif
408
Joe Perches6f6528a2015-04-14 15:44:31 -0700409 return false;
Christoph Lameterb789ef52011-06-01 12:25:49 -0500410}
411
Christoph Lameter1d071712011-07-14 12:49:12 -0500412static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
413 void *freelist_old, unsigned long counters_old,
414 void *freelist_new, unsigned long counters_new,
415 const char *n)
416{
Heiko Carstens25654092012-01-12 17:17:33 -0800417#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
418 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
Christoph Lameter1d071712011-07-14 12:49:12 -0500419 if (s->flags & __CMPXCHG_DOUBLE) {
Jan Beulichcdcd6292012-01-02 17:02:18 +0000420 if (cmpxchg_double(&page->freelist, &page->counters,
Dan Carpenter0aa9a132014-08-06 16:04:48 -0700421 freelist_old, counters_old,
422 freelist_new, counters_new))
Joe Perches6f6528a2015-04-14 15:44:31 -0700423 return true;
Christoph Lameter1d071712011-07-14 12:49:12 -0500424 } else
425#endif
426 {
427 unsigned long flags;
428
429 local_irq_save(flags);
430 slab_lock(page);
Chen Gangd0e0ac92013-07-15 09:05:29 +0800431 if (page->freelist == freelist_old &&
432 page->counters == counters_old) {
Christoph Lameter1d071712011-07-14 12:49:12 -0500433 page->freelist = freelist_new;
Matthew Wilcox7d27a042018-06-07 17:08:31 -0700434 page->counters = counters_new;
Christoph Lameter1d071712011-07-14 12:49:12 -0500435 slab_unlock(page);
436 local_irq_restore(flags);
Joe Perches6f6528a2015-04-14 15:44:31 -0700437 return true;
Christoph Lameter1d071712011-07-14 12:49:12 -0500438 }
439 slab_unlock(page);
440 local_irq_restore(flags);
441 }
442
443 cpu_relax();
444 stat(s, CMPXCHG_DOUBLE_FAIL);
445
446#ifdef SLUB_DEBUG_CMPXCHG
Fabian Frederickf9f58282014-06-04 16:06:34 -0700447 pr_info("%s %s: cmpxchg double redo ", n, s->name);
Christoph Lameter1d071712011-07-14 12:49:12 -0500448#endif
449
Joe Perches6f6528a2015-04-14 15:44:31 -0700450 return false;
Christoph Lameter1d071712011-07-14 12:49:12 -0500451}
452
Christoph Lameter41ecc552007-05-09 02:32:44 -0700453#ifdef CONFIG_SLUB_DEBUG
Yu Zhao90e9f6a2020-01-30 22:11:57 -0800454static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
455static DEFINE_SPINLOCK(object_map_lock);
456
Vlastimil Babkab3fd64e2021-05-23 01:28:37 +0200457static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
458 struct page *page)
459{
460 void *addr = page_address(page);
461 void *p;
462
463 bitmap_zero(obj_map, page->objects);
464
465 for (p = page->freelist; p; p = get_freepointer(s, p))
466 set_bit(__obj_to_index(s, addr, p), obj_map);
467}
468
Oliver Glitta1f9f78b2021-06-28 19:34:33 -0700469#if IS_ENABLED(CONFIG_KUNIT)
470static bool slab_add_kunit_errors(void)
471{
472 struct kunit_resource *resource;
473
474 if (likely(!current->kunit_test))
475 return false;
476
477 resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
478 if (!resource)
479 return false;
480
481 (*(int *)resource->data)++;
482 kunit_put_resource(resource);
483 return true;
484}
485#else
486static inline bool slab_add_kunit_errors(void) { return false; }
487#endif
488
Christoph Lameter41ecc552007-05-09 02:32:44 -0700489/*
Christoph Lameter5f80b132011-04-15 14:48:13 -0500490 * Determine a map of object in use on a page.
491 *
Christoph Lameter881db7f2011-06-01 12:25:53 -0500492 * Node listlock must be held to guarantee that the page does
Christoph Lameter5f80b132011-04-15 14:48:13 -0500493 * not vanish from under us.
494 */
Yu Zhao90e9f6a2020-01-30 22:11:57 -0800495static unsigned long *get_map(struct kmem_cache *s, struct page *page)
Jules Irenge31364c22020-04-06 20:08:15 -0700496 __acquires(&object_map_lock)
Christoph Lameter5f80b132011-04-15 14:48:13 -0500497{
Yu Zhao90e9f6a2020-01-30 22:11:57 -0800498 VM_BUG_ON(!irqs_disabled());
499
500 spin_lock(&object_map_lock);
501
Vlastimil Babkab3fd64e2021-05-23 01:28:37 +0200502 __fill_map(object_map, s, page);
Yu Zhao90e9f6a2020-01-30 22:11:57 -0800503
504 return object_map;
505}
506
Jules Irenge81aba9e2020-04-06 20:08:18 -0700507static void put_map(unsigned long *map) __releases(&object_map_lock)
Yu Zhao90e9f6a2020-01-30 22:11:57 -0800508{
509 VM_BUG_ON(map != object_map);
Yu Zhao90e9f6a2020-01-30 22:11:57 -0800510 spin_unlock(&object_map_lock);
Christoph Lameter5f80b132011-04-15 14:48:13 -0500511}
512
Alexey Dobriyan870b1fb2018-04-05 16:21:43 -0700513static inline unsigned int size_from_object(struct kmem_cache *s)
Joonsoo Kimd86bd1b2016-03-15 14:55:12 -0700514{
515 if (s->flags & SLAB_RED_ZONE)
516 return s->size - s->red_left_pad;
517
518 return s->size;
519}
520
521static inline void *restore_red_left(struct kmem_cache *s, void *p)
522{
523 if (s->flags & SLAB_RED_ZONE)
524 p -= s->red_left_pad;
525
526 return p;
527}
528
Christoph Lameter41ecc552007-05-09 02:32:44 -0700529/*
530 * Debug settings:
531 */
Andrey Ryabinin89d3c872015-11-05 18:51:23 -0800532#if defined(CONFIG_SLUB_DEBUG_ON)
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800533static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
Christoph Lameterf0630ff2007-07-15 23:38:14 -0700534#else
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800535static slab_flags_t slub_debug;
Christoph Lameterf0630ff2007-07-15 23:38:14 -0700536#endif
Christoph Lameter41ecc552007-05-09 02:32:44 -0700537
Vlastimil Babkae17f1df2020-08-06 23:18:35 -0700538static char *slub_debug_string;
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700539static int disable_higher_order_debug;
Christoph Lameter41ecc552007-05-09 02:32:44 -0700540
Christoph Lameter7656c722007-05-09 02:32:40 -0700541/*
Andrey Ryabinina79316c2015-02-13 14:39:38 -0800542 * slub is about to manipulate internal object metadata. This memory lies
543 * outside the range of the allocated object, so accessing it would normally
544 * be reported by kasan as a bounds error. metadata_access_enable() is used
545 * to tell kasan that these accesses are OK.
546 */
547static inline void metadata_access_enable(void)
548{
549 kasan_disable_current();
550}
551
552static inline void metadata_access_disable(void)
553{
554 kasan_enable_current();
555}
556
557/*
Christoph Lameter81819f02007-05-06 14:49:36 -0700558 * Object debugging
559 */
Joonsoo Kimd86bd1b2016-03-15 14:55:12 -0700560
561/* Verify that a pointer has an address that is valid within a slab page */
562static inline int check_valid_pointer(struct kmem_cache *s,
563 struct page *page, void *object)
564{
565 void *base;
566
567 if (!object)
568 return 1;
569
570 base = page_address(page);
Qian Cai338cfaa2019-02-20 22:19:36 -0800571 object = kasan_reset_tag(object);
Joonsoo Kimd86bd1b2016-03-15 14:55:12 -0700572 object = restore_red_left(s, object);
573 if (object < base || object >= base + page->objects * s->size ||
574 (object - base) % s->size) {
575 return 0;
576 }
577
578 return 1;
579}
580
Daniel Thompsonaa2efd52017-01-24 15:18:02 -0800581static void print_section(char *level, char *text, u8 *addr,
582 unsigned int length)
Christoph Lameter81819f02007-05-06 14:49:36 -0700583{
Andrey Ryabinina79316c2015-02-13 14:39:38 -0800584 metadata_access_enable();
Kuan-Ying Lee340caf12021-08-13 16:54:27 -0700585 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
586 16, 1, kasan_reset_tag((void *)addr), length, 1);
Andrey Ryabinina79316c2015-02-13 14:39:38 -0800587 metadata_access_disable();
Christoph Lameter81819f02007-05-06 14:49:36 -0700588}
589
Waiman Longcbfc35a2020-05-07 18:36:06 -0700590/*
591 * See comment in calculate_sizes().
592 */
593static inline bool freeptr_outside_object(struct kmem_cache *s)
594{
595 return s->offset >= s->inuse;
596}
597
598/*
599 * Return offset of the end of info block which is inuse + free pointer if
600 * not overlapping with object.
601 */
602static inline unsigned int get_info_end(struct kmem_cache *s)
603{
604 if (freeptr_outside_object(s))
605 return s->inuse + sizeof(void *);
606 else
607 return s->inuse;
608}
609
Christoph Lameter81819f02007-05-06 14:49:36 -0700610static struct track *get_track(struct kmem_cache *s, void *object,
611 enum track_item alloc)
612{
613 struct track *p;
614
Waiman Longcbfc35a2020-05-07 18:36:06 -0700615 p = object + get_info_end(s);
Christoph Lameter81819f02007-05-06 14:49:36 -0700616
Andrey Konovalovaa1ef4d2020-12-22 12:02:17 -0800617 return kasan_reset_tag(p + alloc);
Christoph Lameter81819f02007-05-06 14:49:36 -0700618}
619
620static void set_track(struct kmem_cache *s, void *object,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300621 enum track_item alloc, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -0700622{
Akinobu Mita1a00df42009-03-07 00:36:21 +0900623 struct track *p = get_track(s, object, alloc);
Christoph Lameter81819f02007-05-06 14:49:36 -0700624
Christoph Lameter81819f02007-05-06 14:49:36 -0700625 if (addr) {
Linus Torvaldsae14c632021-07-17 13:27:00 -0700626#ifdef CONFIG_STACKTRACE
627 unsigned int nr_entries;
628
629 metadata_access_enable();
630 nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
631 TRACK_ADDRS_COUNT, 3);
632 metadata_access_disable();
633
634 if (nr_entries < TRACK_ADDRS_COUNT)
635 p->addrs[nr_entries] = 0;
Ben Greeard6543e32011-07-07 11:36:36 -0700636#endif
Christoph Lameter81819f02007-05-06 14:49:36 -0700637 p->addr = addr;
638 p->cpu = smp_processor_id();
Alexey Dobriyan88e4ccf2008-06-23 02:58:37 +0400639 p->pid = current->pid;
Christoph Lameter81819f02007-05-06 14:49:36 -0700640 p->when = jiffies;
Thomas Gleixnerb8ca7ff2019-04-10 12:28:05 +0200641 } else {
Christoph Lameter81819f02007-05-06 14:49:36 -0700642 memset(p, 0, sizeof(struct track));
Thomas Gleixnerb8ca7ff2019-04-10 12:28:05 +0200643 }
Christoph Lameter81819f02007-05-06 14:49:36 -0700644}
645
Christoph Lameter81819f02007-05-06 14:49:36 -0700646static void init_tracking(struct kmem_cache *s, void *object)
647{
Christoph Lameter24922682007-07-17 04:03:18 -0700648 if (!(s->flags & SLAB_STORE_USER))
649 return;
650
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300651 set_track(s, object, TRACK_FREE, 0UL);
652 set_track(s, object, TRACK_ALLOC, 0UL);
Christoph Lameter81819f02007-05-06 14:49:36 -0700653}
654
Chintan Pandya86609d32018-04-05 16:20:15 -0700655static void print_track(const char *s, struct track *t, unsigned long pr_time)
Christoph Lameter81819f02007-05-06 14:49:36 -0700656{
657 if (!t->addr)
658 return;
659
Yafang Shao96b94ab2021-03-19 18:12:45 +0800660 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
Chintan Pandya86609d32018-04-05 16:20:15 -0700661 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
Linus Torvaldsae14c632021-07-17 13:27:00 -0700662#ifdef CONFIG_STACKTRACE
Ben Greeard6543e32011-07-07 11:36:36 -0700663 {
Linus Torvaldsae14c632021-07-17 13:27:00 -0700664 int i;
665 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
666 if (t->addrs[i])
667 pr_err("\t%pS\n", (void *)t->addrs[i]);
668 else
669 break;
Ben Greeard6543e32011-07-07 11:36:36 -0700670 }
671#endif
Christoph Lameter81819f02007-05-06 14:49:36 -0700672}
673
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700674void print_tracking(struct kmem_cache *s, void *object)
Christoph Lameter24922682007-07-17 04:03:18 -0700675{
Chintan Pandya86609d32018-04-05 16:20:15 -0700676 unsigned long pr_time = jiffies;
Christoph Lameter24922682007-07-17 04:03:18 -0700677 if (!(s->flags & SLAB_STORE_USER))
678 return;
679
Chintan Pandya86609d32018-04-05 16:20:15 -0700680 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
681 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
Christoph Lameter24922682007-07-17 04:03:18 -0700682}
683
684static void print_page_info(struct page *page)
685{
Yafang Shao96b94ab2021-03-19 18:12:45 +0800686 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%#lx(%pGp)\n",
Yafang Shao4a8ef192021-03-19 18:12:44 +0800687 page, page->objects, page->inuse, page->freelist,
688 page->flags, &page->flags);
Christoph Lameter24922682007-07-17 04:03:18 -0700689
690}
691
692static void slab_bug(struct kmem_cache *s, char *fmt, ...)
693{
Fabian Frederickecc42fb2014-06-04 16:06:35 -0700694 struct va_format vaf;
Christoph Lameter24922682007-07-17 04:03:18 -0700695 va_list args;
Christoph Lameter24922682007-07-17 04:03:18 -0700696
697 va_start(args, fmt);
Fabian Frederickecc42fb2014-06-04 16:06:35 -0700698 vaf.fmt = fmt;
699 vaf.va = &args;
Fabian Frederickf9f58282014-06-04 16:06:34 -0700700 pr_err("=============================================================================\n");
Fabian Frederickecc42fb2014-06-04 16:06:35 -0700701 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
Fabian Frederickf9f58282014-06-04 16:06:34 -0700702 pr_err("-----------------------------------------------------------------------------\n\n");
Fabian Frederickecc42fb2014-06-04 16:06:35 -0700703 va_end(args);
Christoph Lameter24922682007-07-17 04:03:18 -0700704}
705
Joe Perches582d1212021-06-28 19:34:49 -0700706__printf(2, 3)
Christoph Lameter24922682007-07-17 04:03:18 -0700707static void slab_fix(struct kmem_cache *s, char *fmt, ...)
708{
Fabian Frederickecc42fb2014-06-04 16:06:35 -0700709 struct va_format vaf;
Christoph Lameter24922682007-07-17 04:03:18 -0700710 va_list args;
Christoph Lameter24922682007-07-17 04:03:18 -0700711
Oliver Glitta1f9f78b2021-06-28 19:34:33 -0700712 if (slab_add_kunit_errors())
713 return;
714
Christoph Lameter24922682007-07-17 04:03:18 -0700715 va_start(args, fmt);
Fabian Frederickecc42fb2014-06-04 16:06:35 -0700716 vaf.fmt = fmt;
717 vaf.va = &args;
718 pr_err("FIX %s: %pV\n", s->name, &vaf);
Christoph Lameter24922682007-07-17 04:03:18 -0700719 va_end(args);
Christoph Lameter24922682007-07-17 04:03:18 -0700720}
721
Dongli Zhang52f23472020-06-01 21:45:47 -0700722static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
Eugeniu Roscadc07a722020-09-04 16:35:30 -0700723 void **freelist, void *nextfree)
Dongli Zhang52f23472020-06-01 21:45:47 -0700724{
725 if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
Eugeniu Roscadc07a722020-09-04 16:35:30 -0700726 !check_valid_pointer(s, page, nextfree) && freelist) {
727 object_err(s, page, *freelist, "Freechain corrupt");
728 *freelist = NULL;
Dongli Zhang52f23472020-06-01 21:45:47 -0700729 slab_fix(s, "Isolate corrupted freechain");
730 return true;
731 }
732
733 return false;
734}
735
Christoph Lameter24922682007-07-17 04:03:18 -0700736static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
Christoph Lameter81819f02007-05-06 14:49:36 -0700737{
738 unsigned int off; /* Offset of last byte */
Christoph Lametera973e9d2008-03-01 13:40:44 -0800739 u8 *addr = page_address(page);
Christoph Lameter24922682007-07-17 04:03:18 -0700740
741 print_tracking(s, p);
742
743 print_page_info(page);
744
Yafang Shao96b94ab2021-03-19 18:12:45 +0800745 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
Fabian Frederickf9f58282014-06-04 16:06:34 -0700746 p, p - addr, get_freepointer(s, p));
Christoph Lameter24922682007-07-17 04:03:18 -0700747
Joonsoo Kimd86bd1b2016-03-15 14:55:12 -0700748 if (s->flags & SLAB_RED_ZONE)
Kees Cook8669dba2021-06-15 18:23:19 -0700749 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
Daniel Thompsonaa2efd52017-01-24 15:18:02 -0800750 s->red_left_pad);
Joonsoo Kimd86bd1b2016-03-15 14:55:12 -0700751 else if (p > addr + 16)
Daniel Thompsonaa2efd52017-01-24 15:18:02 -0800752 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
Christoph Lameter24922682007-07-17 04:03:18 -0700753
Kees Cook8669dba2021-06-15 18:23:19 -0700754 print_section(KERN_ERR, "Object ", p,
Alexey Dobriyan1b473f22018-04-05 16:21:17 -0700755 min_t(unsigned int, s->object_size, PAGE_SIZE));
Christoph Lameter81819f02007-05-06 14:49:36 -0700756 if (s->flags & SLAB_RED_ZONE)
Kees Cook8669dba2021-06-15 18:23:19 -0700757 print_section(KERN_ERR, "Redzone ", p + s->object_size,
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500758 s->inuse - s->object_size);
Christoph Lameter81819f02007-05-06 14:49:36 -0700759
Waiman Longcbfc35a2020-05-07 18:36:06 -0700760 off = get_info_end(s);
Christoph Lameter81819f02007-05-06 14:49:36 -0700761
Christoph Lameter24922682007-07-17 04:03:18 -0700762 if (s->flags & SLAB_STORE_USER)
Christoph Lameter81819f02007-05-06 14:49:36 -0700763 off += 2 * sizeof(struct track);
Christoph Lameter81819f02007-05-06 14:49:36 -0700764
Alexander Potapenko80a92012016-07-28 15:49:07 -0700765 off += kasan_metadata_size(s);
766
Joonsoo Kimd86bd1b2016-03-15 14:55:12 -0700767 if (off != size_from_object(s))
Christoph Lameter81819f02007-05-06 14:49:36 -0700768 /* Beginning of the filler is the free pointer */
Kees Cook8669dba2021-06-15 18:23:19 -0700769 print_section(KERN_ERR, "Padding ", p + off,
Daniel Thompsonaa2efd52017-01-24 15:18:02 -0800770 size_from_object(s) - off);
Christoph Lameter24922682007-07-17 04:03:18 -0700771
772 dump_stack();
Christoph Lameter81819f02007-05-06 14:49:36 -0700773}
774
Andrey Ryabinin75c66de2015-02-13 14:39:35 -0800775void object_err(struct kmem_cache *s, struct page *page,
Christoph Lameter81819f02007-05-06 14:49:36 -0700776 u8 *object, char *reason)
777{
Oliver Glitta1f9f78b2021-06-28 19:34:33 -0700778 if (slab_add_kunit_errors())
779 return;
780
Christoph Lameter3dc50632008-04-23 12:28:01 -0700781 slab_bug(s, "%s", reason);
Christoph Lameter24922682007-07-17 04:03:18 -0700782 print_trailer(s, page, object);
Georgi Djakov65ebdee2021-06-28 19:34:58 -0700783 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Christoph Lameter81819f02007-05-06 14:49:36 -0700784}
785
Mathieu Malaterrea38965b2018-06-07 17:05:17 -0700786static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
Chen Gangd0e0ac92013-07-15 09:05:29 +0800787 const char *fmt, ...)
Christoph Lameter81819f02007-05-06 14:49:36 -0700788{
789 va_list args;
790 char buf[100];
791
Oliver Glitta1f9f78b2021-06-28 19:34:33 -0700792 if (slab_add_kunit_errors())
793 return;
794
Christoph Lameter24922682007-07-17 04:03:18 -0700795 va_start(args, fmt);
796 vsnprintf(buf, sizeof(buf), fmt, args);
Christoph Lameter81819f02007-05-06 14:49:36 -0700797 va_end(args);
Christoph Lameter3dc50632008-04-23 12:28:01 -0700798 slab_bug(s, "%s", buf);
Christoph Lameter24922682007-07-17 04:03:18 -0700799 print_page_info(page);
Christoph Lameter81819f02007-05-06 14:49:36 -0700800 dump_stack();
Georgi Djakov65ebdee2021-06-28 19:34:58 -0700801 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Christoph Lameter81819f02007-05-06 14:49:36 -0700802}
803
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500804static void init_object(struct kmem_cache *s, void *object, u8 val)
Christoph Lameter81819f02007-05-06 14:49:36 -0700805{
Andrey Konovalovaa1ef4d2020-12-22 12:02:17 -0800806 u8 *p = kasan_reset_tag(object);
Christoph Lameter81819f02007-05-06 14:49:36 -0700807
Joonsoo Kimd86bd1b2016-03-15 14:55:12 -0700808 if (s->flags & SLAB_RED_ZONE)
809 memset(p - s->red_left_pad, val, s->red_left_pad);
810
Christoph Lameter81819f02007-05-06 14:49:36 -0700811 if (s->flags & __OBJECT_POISON) {
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500812 memset(p, POISON_FREE, s->object_size - 1);
813 p[s->object_size - 1] = POISON_END;
Christoph Lameter81819f02007-05-06 14:49:36 -0700814 }
815
816 if (s->flags & SLAB_RED_ZONE)
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500817 memset(p + s->object_size, val, s->inuse - s->object_size);
Christoph Lameter81819f02007-05-06 14:49:36 -0700818}
819
Christoph Lameter24922682007-07-17 04:03:18 -0700820static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
821 void *from, void *to)
822{
Joe Perches582d1212021-06-28 19:34:49 -0700823 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
Christoph Lameter24922682007-07-17 04:03:18 -0700824 memset(from, data, to - from);
825}
826
827static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
828 u8 *object, char *what,
Pekka Enberg06428782008-01-07 23:20:27 -0800829 u8 *start, unsigned int value, unsigned int bytes)
Christoph Lameter24922682007-07-17 04:03:18 -0700830{
831 u8 *fault;
832 u8 *end;
Miles Chene1b70dd2019-11-30 17:49:31 -0800833 u8 *addr = page_address(page);
Christoph Lameter24922682007-07-17 04:03:18 -0700834
Andrey Ryabinina79316c2015-02-13 14:39:38 -0800835 metadata_access_enable();
Andrey Konovalovaa1ef4d2020-12-22 12:02:17 -0800836 fault = memchr_inv(kasan_reset_tag(start), value, bytes);
Andrey Ryabinina79316c2015-02-13 14:39:38 -0800837 metadata_access_disable();
Christoph Lameter24922682007-07-17 04:03:18 -0700838 if (!fault)
839 return 1;
840
841 end = start + bytes;
842 while (end > fault && end[-1] == value)
843 end--;
844
Oliver Glitta1f9f78b2021-06-28 19:34:33 -0700845 if (slab_add_kunit_errors())
846 goto skip_bug_print;
847
Christoph Lameter24922682007-07-17 04:03:18 -0700848 slab_bug(s, "%s overwritten", what);
Yafang Shao96b94ab2021-03-19 18:12:45 +0800849 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
Miles Chene1b70dd2019-11-30 17:49:31 -0800850 fault, end - 1, fault - addr,
851 fault[0], value);
Christoph Lameter24922682007-07-17 04:03:18 -0700852 print_trailer(s, page, object);
Georgi Djakov65ebdee2021-06-28 19:34:58 -0700853 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Christoph Lameter24922682007-07-17 04:03:18 -0700854
Oliver Glitta1f9f78b2021-06-28 19:34:33 -0700855skip_bug_print:
Christoph Lameter24922682007-07-17 04:03:18 -0700856 restore_bytes(s, what, value, fault, end);
857 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700858}
859
Christoph Lameter81819f02007-05-06 14:49:36 -0700860/*
861 * Object layout:
862 *
863 * object address
864 * Bytes of the object to be managed.
865 * If the freepointer may overlay the object then the free
Waiman Longcbfc35a2020-05-07 18:36:06 -0700866 * pointer is at the middle of the object.
Christoph Lameter672bba32007-05-09 02:32:39 -0700867 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700868 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
869 * 0xa5 (POISON_END)
870 *
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500871 * object + s->object_size
Christoph Lameter81819f02007-05-06 14:49:36 -0700872 * Padding to reach word boundary. This is also used for Redzoning.
Christoph Lameter672bba32007-05-09 02:32:39 -0700873 * Padding is extended by another word if Redzoning is enabled and
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500874 * object_size == inuse.
Christoph Lameter672bba32007-05-09 02:32:39 -0700875 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700876 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
877 * 0xcc (RED_ACTIVE) for objects in use.
878 *
879 * object + s->inuse
Christoph Lameter672bba32007-05-09 02:32:39 -0700880 * Meta data starts here.
881 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700882 * A. Free pointer (if we cannot overwrite object on free)
883 * B. Tracking data for SLAB_STORE_USER
Bhaskar Chowdhurydc842072021-04-29 22:54:51 -0700884 * C. Padding to reach required alignment boundary or at minimum
Christoph Lameter6446faa2008-02-15 23:45:26 -0800885 * one word if debugging is on to be able to detect writes
Christoph Lameter672bba32007-05-09 02:32:39 -0700886 * before the word boundary.
887 *
888 * Padding is done using 0x5a (POISON_INUSE)
Christoph Lameter81819f02007-05-06 14:49:36 -0700889 *
890 * object + s->size
Christoph Lameter672bba32007-05-09 02:32:39 -0700891 * Nothing is used beyond s->size.
Christoph Lameter81819f02007-05-06 14:49:36 -0700892 *
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500893 * If slabcaches are merged then the object_size and inuse boundaries are mostly
Christoph Lameter672bba32007-05-09 02:32:39 -0700894 * ignored. And therefore no slab options that rely on these boundaries
Christoph Lameter81819f02007-05-06 14:49:36 -0700895 * may be used with merged slabcaches.
896 */
897
Christoph Lameter81819f02007-05-06 14:49:36 -0700898static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
899{
Waiman Longcbfc35a2020-05-07 18:36:06 -0700900 unsigned long off = get_info_end(s); /* The end of info */
Christoph Lameter81819f02007-05-06 14:49:36 -0700901
902 if (s->flags & SLAB_STORE_USER)
903 /* We also have user information there */
904 off += 2 * sizeof(struct track);
905
Alexander Potapenko80a92012016-07-28 15:49:07 -0700906 off += kasan_metadata_size(s);
907
Joonsoo Kimd86bd1b2016-03-15 14:55:12 -0700908 if (size_from_object(s) == off)
Christoph Lameter81819f02007-05-06 14:49:36 -0700909 return 1;
910
Christoph Lameter24922682007-07-17 04:03:18 -0700911 return check_bytes_and_report(s, page, p, "Object padding",
Joonsoo Kimd86bd1b2016-03-15 14:55:12 -0700912 p + off, POISON_INUSE, size_from_object(s) - off);
Christoph Lameter81819f02007-05-06 14:49:36 -0700913}
914
Christoph Lameter39b26462008-04-14 19:11:30 +0300915/* Check the pad bytes at the end of a slab page */
Christoph Lameter81819f02007-05-06 14:49:36 -0700916static int slab_pad_check(struct kmem_cache *s, struct page *page)
917{
Christoph Lameter24922682007-07-17 04:03:18 -0700918 u8 *start;
919 u8 *fault;
920 u8 *end;
Balasubramani Vivekanandan5d682682018-01-31 16:15:43 -0800921 u8 *pad;
Christoph Lameter24922682007-07-17 04:03:18 -0700922 int length;
923 int remainder;
Christoph Lameter81819f02007-05-06 14:49:36 -0700924
925 if (!(s->flags & SLAB_POISON))
926 return 1;
927
Christoph Lametera973e9d2008-03-01 13:40:44 -0800928 start = page_address(page);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -0700929 length = page_size(page);
Christoph Lameter39b26462008-04-14 19:11:30 +0300930 end = start + length;
931 remainder = length % s->size;
Christoph Lameter81819f02007-05-06 14:49:36 -0700932 if (!remainder)
933 return 1;
934
Balasubramani Vivekanandan5d682682018-01-31 16:15:43 -0800935 pad = end - remainder;
Andrey Ryabinina79316c2015-02-13 14:39:38 -0800936 metadata_access_enable();
Andrey Konovalovaa1ef4d2020-12-22 12:02:17 -0800937 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
Andrey Ryabinina79316c2015-02-13 14:39:38 -0800938 metadata_access_disable();
Christoph Lameter24922682007-07-17 04:03:18 -0700939 if (!fault)
940 return 1;
941 while (end > fault && end[-1] == POISON_INUSE)
942 end--;
943
Miles Chene1b70dd2019-11-30 17:49:31 -0800944 slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu",
945 fault, end - 1, fault - start);
Balasubramani Vivekanandan5d682682018-01-31 16:15:43 -0800946 print_section(KERN_ERR, "Padding ", pad, remainder);
Christoph Lameter24922682007-07-17 04:03:18 -0700947
Balasubramani Vivekanandan5d682682018-01-31 16:15:43 -0800948 restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
Christoph Lameter24922682007-07-17 04:03:18 -0700949 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700950}
951
952static int check_object(struct kmem_cache *s, struct page *page,
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500953 void *object, u8 val)
Christoph Lameter81819f02007-05-06 14:49:36 -0700954{
955 u8 *p = object;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500956 u8 *endobject = object + s->object_size;
Christoph Lameter81819f02007-05-06 14:49:36 -0700957
958 if (s->flags & SLAB_RED_ZONE) {
Kees Cook8669dba2021-06-15 18:23:19 -0700959 if (!check_bytes_and_report(s, page, object, "Left Redzone",
Joonsoo Kimd86bd1b2016-03-15 14:55:12 -0700960 object - s->red_left_pad, val, s->red_left_pad))
961 return 0;
962
Kees Cook8669dba2021-06-15 18:23:19 -0700963 if (!check_bytes_and_report(s, page, object, "Right Redzone",
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500964 endobject, val, s->inuse - s->object_size))
Christoph Lameter81819f02007-05-06 14:49:36 -0700965 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700966 } else {
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500967 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
Ingo Molnar3adbefe2008-02-05 17:57:39 -0800968 check_bytes_and_report(s, page, p, "Alignment padding",
Chen Gangd0e0ac92013-07-15 09:05:29 +0800969 endobject, POISON_INUSE,
970 s->inuse - s->object_size);
Ingo Molnar3adbefe2008-02-05 17:57:39 -0800971 }
Christoph Lameter81819f02007-05-06 14:49:36 -0700972 }
973
974 if (s->flags & SLAB_POISON) {
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500975 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
Christoph Lameter24922682007-07-17 04:03:18 -0700976 (!check_bytes_and_report(s, page, p, "Poison", p,
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500977 POISON_FREE, s->object_size - 1) ||
Kees Cook8669dba2021-06-15 18:23:19 -0700978 !check_bytes_and_report(s, page, p, "End Poison",
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500979 p + s->object_size - 1, POISON_END, 1)))
Christoph Lameter81819f02007-05-06 14:49:36 -0700980 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700981 /*
982 * check_pad_bytes cleans up on its own.
983 */
984 check_pad_bytes(s, page, p);
985 }
986
Waiman Longcbfc35a2020-05-07 18:36:06 -0700987 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE)
Christoph Lameter81819f02007-05-06 14:49:36 -0700988 /*
989 * Object and freepointer overlap. Cannot check
990 * freepointer while object is allocated.
991 */
992 return 1;
993
994 /* Check free pointer validity */
995 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
996 object_err(s, page, p, "Freepointer corrupt");
997 /*
Nick Andrew9f6c708e2008-12-05 14:08:08 +1100998 * No choice but to zap it and thus lose the remainder
Christoph Lameter81819f02007-05-06 14:49:36 -0700999 * of the free objects in this slab. May cause
Christoph Lameter672bba32007-05-09 02:32:39 -07001000 * another error because the object count is now wrong.
Christoph Lameter81819f02007-05-06 14:49:36 -07001001 */
Christoph Lametera973e9d2008-03-01 13:40:44 -08001002 set_freepointer(s, p, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -07001003 return 0;
1004 }
1005 return 1;
1006}
1007
1008static int check_slab(struct kmem_cache *s, struct page *page)
1009{
Christoph Lameter39b26462008-04-14 19:11:30 +03001010 int maxobj;
1011
Christoph Lameter81819f02007-05-06 14:49:36 -07001012 if (!PageSlab(page)) {
Christoph Lameter24922682007-07-17 04:03:18 -07001013 slab_err(s, page, "Not a valid slab page");
Christoph Lameter81819f02007-05-06 14:49:36 -07001014 return 0;
1015 }
Christoph Lameter39b26462008-04-14 19:11:30 +03001016
Matthew Wilcox9736d2a2018-06-07 17:09:10 -07001017 maxobj = order_objects(compound_order(page), s->size);
Christoph Lameter39b26462008-04-14 19:11:30 +03001018 if (page->objects > maxobj) {
1019 slab_err(s, page, "objects %u > max %u",
Andrey Ryabininf6edde92014-12-10 15:42:22 -08001020 page->objects, maxobj);
Christoph Lameter39b26462008-04-14 19:11:30 +03001021 return 0;
1022 }
1023 if (page->inuse > page->objects) {
Christoph Lameter24922682007-07-17 04:03:18 -07001024 slab_err(s, page, "inuse %u > max %u",
Andrey Ryabininf6edde92014-12-10 15:42:22 -08001025 page->inuse, page->objects);
Christoph Lameter81819f02007-05-06 14:49:36 -07001026 return 0;
1027 }
1028 /* Slab_pad_check fixes things up after itself */
1029 slab_pad_check(s, page);
1030 return 1;
1031}
1032
1033/*
Christoph Lameter672bba32007-05-09 02:32:39 -07001034 * Determine if a certain object on a page is on the freelist. Must hold the
1035 * slab lock to guarantee that the chains are in a consistent state.
Christoph Lameter81819f02007-05-06 14:49:36 -07001036 */
1037static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
1038{
1039 int nr = 0;
Christoph Lameter881db7f2011-06-01 12:25:53 -05001040 void *fp;
Christoph Lameter81819f02007-05-06 14:49:36 -07001041 void *object = NULL;
Andrey Ryabininf6edde92014-12-10 15:42:22 -08001042 int max_objects;
Christoph Lameter81819f02007-05-06 14:49:36 -07001043
Christoph Lameter881db7f2011-06-01 12:25:53 -05001044 fp = page->freelist;
Christoph Lameter39b26462008-04-14 19:11:30 +03001045 while (fp && nr <= page->objects) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001046 if (fp == search)
1047 return 1;
1048 if (!check_valid_pointer(s, page, fp)) {
1049 if (object) {
1050 object_err(s, page, object,
1051 "Freechain corrupt");
Christoph Lametera973e9d2008-03-01 13:40:44 -08001052 set_freepointer(s, object, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -07001053 } else {
Christoph Lameter24922682007-07-17 04:03:18 -07001054 slab_err(s, page, "Freepointer corrupt");
Christoph Lametera973e9d2008-03-01 13:40:44 -08001055 page->freelist = NULL;
Christoph Lameter39b26462008-04-14 19:11:30 +03001056 page->inuse = page->objects;
Christoph Lameter24922682007-07-17 04:03:18 -07001057 slab_fix(s, "Freelist cleared");
Christoph Lameter81819f02007-05-06 14:49:36 -07001058 return 0;
1059 }
1060 break;
1061 }
1062 object = fp;
1063 fp = get_freepointer(s, object);
1064 nr++;
1065 }
1066
Matthew Wilcox9736d2a2018-06-07 17:09:10 -07001067 max_objects = order_objects(compound_order(page), s->size);
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +04001068 if (max_objects > MAX_OBJS_PER_PAGE)
1069 max_objects = MAX_OBJS_PER_PAGE;
Christoph Lameter224a88b2008-04-14 19:11:31 +03001070
1071 if (page->objects != max_objects) {
Joe Perches756a0252016-03-17 14:19:47 -07001072 slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
1073 page->objects, max_objects);
Christoph Lameter224a88b2008-04-14 19:11:31 +03001074 page->objects = max_objects;
Joe Perches582d1212021-06-28 19:34:49 -07001075 slab_fix(s, "Number of objects adjusted");
Christoph Lameter224a88b2008-04-14 19:11:31 +03001076 }
Christoph Lameter39b26462008-04-14 19:11:30 +03001077 if (page->inuse != page->objects - nr) {
Joe Perches756a0252016-03-17 14:19:47 -07001078 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
1079 page->inuse, page->objects - nr);
Christoph Lameter39b26462008-04-14 19:11:30 +03001080 page->inuse = page->objects - nr;
Joe Perches582d1212021-06-28 19:34:49 -07001081 slab_fix(s, "Object count adjusted");
Christoph Lameter81819f02007-05-06 14:49:36 -07001082 }
1083 return search == NULL;
1084}
1085
Christoph Lameter0121c6192008-04-29 16:11:12 -07001086static void trace(struct kmem_cache *s, struct page *page, void *object,
1087 int alloc)
Christoph Lameter3ec09742007-05-16 22:11:00 -07001088{
1089 if (s->flags & SLAB_TRACE) {
Fabian Frederickf9f58282014-06-04 16:06:34 -07001090 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
Christoph Lameter3ec09742007-05-16 22:11:00 -07001091 s->name,
1092 alloc ? "alloc" : "free",
1093 object, page->inuse,
1094 page->freelist);
1095
1096 if (!alloc)
Daniel Thompsonaa2efd52017-01-24 15:18:02 -08001097 print_section(KERN_INFO, "Object ", (void *)object,
Chen Gangd0e0ac92013-07-15 09:05:29 +08001098 s->object_size);
Christoph Lameter3ec09742007-05-16 22:11:00 -07001099
1100 dump_stack();
1101 }
1102}
1103
Christoph Lameter643b1132007-05-06 14:49:42 -07001104/*
Christoph Lameter672bba32007-05-09 02:32:39 -07001105 * Tracking of fully allocated slabs for debugging purposes.
Christoph Lameter643b1132007-05-06 14:49:42 -07001106 */
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001107static void add_full(struct kmem_cache *s,
1108 struct kmem_cache_node *n, struct page *page)
Christoph Lameter643b1132007-05-06 14:49:42 -07001109{
Christoph Lameter643b1132007-05-06 14:49:42 -07001110 if (!(s->flags & SLAB_STORE_USER))
1111 return;
1112
David Rientjes255d0882014-02-10 14:25:39 -08001113 lockdep_assert_held(&n->list_lock);
Tobin C. Harding916ac052019-05-13 17:16:12 -07001114 list_add(&page->slab_list, &n->full);
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001115}
Christoph Lameter643b1132007-05-06 14:49:42 -07001116
Peter Zijlstrac65c1872014-01-10 13:23:49 +01001117static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001118{
1119 if (!(s->flags & SLAB_STORE_USER))
1120 return;
1121
David Rientjes255d0882014-02-10 14:25:39 -08001122 lockdep_assert_held(&n->list_lock);
Tobin C. Harding916ac052019-05-13 17:16:12 -07001123 list_del(&page->slab_list);
Christoph Lameter643b1132007-05-06 14:49:42 -07001124}
1125
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001126/* Tracking of the number of slabs for debugging purposes */
1127static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1128{
1129 struct kmem_cache_node *n = get_node(s, node);
1130
1131 return atomic_long_read(&n->nr_slabs);
1132}
1133
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04001134static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1135{
1136 return atomic_long_read(&n->nr_slabs);
1137}
1138
Christoph Lameter205ab992008-04-14 19:11:40 +03001139static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001140{
1141 struct kmem_cache_node *n = get_node(s, node);
1142
1143 /*
1144 * May be called early in order to allocate a slab for the
1145 * kmem_cache_node structure. Solve the chicken-egg
1146 * dilemma by deferring the increment of the count during
1147 * bootstrap (see early_kmem_cache_node_alloc).
1148 */
Joonsoo Kim338b2642013-01-21 17:01:27 +09001149 if (likely(n)) {
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001150 atomic_long_inc(&n->nr_slabs);
Christoph Lameter205ab992008-04-14 19:11:40 +03001151 atomic_long_add(objects, &n->total_objects);
1152 }
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001153}
Christoph Lameter205ab992008-04-14 19:11:40 +03001154static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001155{
1156 struct kmem_cache_node *n = get_node(s, node);
1157
1158 atomic_long_dec(&n->nr_slabs);
Christoph Lameter205ab992008-04-14 19:11:40 +03001159 atomic_long_sub(objects, &n->total_objects);
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001160}
1161
1162/* Object debug checks for alloc/free paths */
Christoph Lameter3ec09742007-05-16 22:11:00 -07001163static void setup_object_debug(struct kmem_cache *s, struct page *page,
1164 void *object)
1165{
Vlastimil Babka8fc8d662020-08-06 23:18:58 -07001166 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
Christoph Lameter3ec09742007-05-16 22:11:00 -07001167 return;
1168
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001169 init_object(s, object, SLUB_RED_INACTIVE);
Christoph Lameter3ec09742007-05-16 22:11:00 -07001170 init_tracking(s, object);
1171}
1172
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07001173static
1174void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
Andrey Konovalova7101222019-02-20 22:19:23 -08001175{
Vlastimil Babka8fc8d662020-08-06 23:18:58 -07001176 if (!kmem_cache_debug_flags(s, SLAB_POISON))
Andrey Konovalova7101222019-02-20 22:19:23 -08001177 return;
1178
1179 metadata_access_enable();
Andrey Konovalovaa1ef4d2020-12-22 12:02:17 -08001180 memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page));
Andrey Konovalova7101222019-02-20 22:19:23 -08001181 metadata_access_disable();
1182}
1183
Laura Abbottbecfda62016-03-15 14:55:06 -07001184static inline int alloc_consistency_checks(struct kmem_cache *s,
Qian Cai278d7752019-03-05 15:42:10 -08001185 struct page *page, void *object)
Christoph Lameter81819f02007-05-06 14:49:36 -07001186{
1187 if (!check_slab(s, page))
Laura Abbottbecfda62016-03-15 14:55:06 -07001188 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07001189
Christoph Lameter81819f02007-05-06 14:49:36 -07001190 if (!check_valid_pointer(s, page, object)) {
1191 object_err(s, page, object, "Freelist Pointer check fails");
Laura Abbottbecfda62016-03-15 14:55:06 -07001192 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07001193 }
1194
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001195 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
Laura Abbottbecfda62016-03-15 14:55:06 -07001196 return 0;
1197
1198 return 1;
1199}
1200
1201static noinline int alloc_debug_processing(struct kmem_cache *s,
1202 struct page *page,
1203 void *object, unsigned long addr)
1204{
1205 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
Qian Cai278d7752019-03-05 15:42:10 -08001206 if (!alloc_consistency_checks(s, page, object))
Laura Abbottbecfda62016-03-15 14:55:06 -07001207 goto bad;
1208 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001209
Christoph Lameter3ec09742007-05-16 22:11:00 -07001210 /* Success perform special debug activities for allocs */
1211 if (s->flags & SLAB_STORE_USER)
1212 set_track(s, object, TRACK_ALLOC, addr);
1213 trace(s, page, object, 1);
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001214 init_object(s, object, SLUB_RED_ACTIVE);
Christoph Lameter81819f02007-05-06 14:49:36 -07001215 return 1;
Christoph Lameter3ec09742007-05-16 22:11:00 -07001216
Christoph Lameter81819f02007-05-06 14:49:36 -07001217bad:
1218 if (PageSlab(page)) {
1219 /*
1220 * If this is a slab page then lets do the best we can
1221 * to avoid issues in the future. Marking all objects
Christoph Lameter672bba32007-05-09 02:32:39 -07001222 * as used avoids touching the remaining objects.
Christoph Lameter81819f02007-05-06 14:49:36 -07001223 */
Christoph Lameter24922682007-07-17 04:03:18 -07001224 slab_fix(s, "Marking all objects used");
Christoph Lameter39b26462008-04-14 19:11:30 +03001225 page->inuse = page->objects;
Christoph Lametera973e9d2008-03-01 13:40:44 -08001226 page->freelist = NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07001227 }
1228 return 0;
1229}
1230
Laura Abbottbecfda62016-03-15 14:55:06 -07001231static inline int free_consistency_checks(struct kmem_cache *s,
1232 struct page *page, void *object, unsigned long addr)
1233{
1234 if (!check_valid_pointer(s, page, object)) {
1235 slab_err(s, page, "Invalid object pointer 0x%p", object);
1236 return 0;
1237 }
1238
1239 if (on_freelist(s, page, object)) {
1240 object_err(s, page, object, "Object already free");
1241 return 0;
1242 }
1243
1244 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1245 return 0;
1246
1247 if (unlikely(s != page->slab_cache)) {
1248 if (!PageSlab(page)) {
Joe Perches756a0252016-03-17 14:19:47 -07001249 slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1250 object);
Laura Abbottbecfda62016-03-15 14:55:06 -07001251 } else if (!page->slab_cache) {
1252 pr_err("SLUB <none>: no slab for object 0x%p.\n",
1253 object);
1254 dump_stack();
1255 } else
1256 object_err(s, page, object,
1257 "page slab pointer corrupt.");
1258 return 0;
1259 }
1260 return 1;
1261}
1262
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08001263/* Supports checking bulk free of a constructed freelist */
Laura Abbott282acb42016-03-15 14:54:59 -07001264static noinline int free_debug_processing(
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08001265 struct kmem_cache *s, struct page *page,
1266 void *head, void *tail, int bulk_cnt,
Laura Abbott282acb42016-03-15 14:54:59 -07001267 unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -07001268{
Christoph Lameter19c7ff92012-05-30 12:54:46 -05001269 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08001270 void *object = head;
1271 int cnt = 0;
Kees Cook3f649ab2020-06-03 13:09:38 -07001272 unsigned long flags;
Laura Abbott804aa132016-03-15 14:55:02 -07001273 int ret = 0;
Christoph Lameter5c2e4bb2011-06-01 12:25:54 -05001274
Laura Abbott282acb42016-03-15 14:54:59 -07001275 spin_lock_irqsave(&n->list_lock, flags);
Christoph Lameter881db7f2011-06-01 12:25:53 -05001276 slab_lock(page);
1277
Laura Abbottbecfda62016-03-15 14:55:06 -07001278 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1279 if (!check_slab(s, page))
1280 goto out;
1281 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001282
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08001283next_object:
1284 cnt++;
1285
Laura Abbottbecfda62016-03-15 14:55:06 -07001286 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1287 if (!free_consistency_checks(s, page, object, addr))
1288 goto out;
Christoph Lameter81819f02007-05-06 14:49:36 -07001289 }
Christoph Lameter3ec09742007-05-16 22:11:00 -07001290
Christoph Lameter3ec09742007-05-16 22:11:00 -07001291 if (s->flags & SLAB_STORE_USER)
1292 set_track(s, object, TRACK_FREE, addr);
1293 trace(s, page, object, 0);
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08001294 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001295 init_object(s, object, SLUB_RED_INACTIVE);
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08001296
1297 /* Reached end of constructed freelist yet? */
1298 if (object != tail) {
1299 object = get_freepointer(s, object);
1300 goto next_object;
1301 }
Laura Abbott804aa132016-03-15 14:55:02 -07001302 ret = 1;
1303
Christoph Lameter5c2e4bb2011-06-01 12:25:54 -05001304out:
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08001305 if (cnt != bulk_cnt)
1306 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1307 bulk_cnt, cnt);
1308
Christoph Lameter881db7f2011-06-01 12:25:53 -05001309 slab_unlock(page);
Laura Abbott282acb42016-03-15 14:54:59 -07001310 spin_unlock_irqrestore(&n->list_lock, flags);
Laura Abbott804aa132016-03-15 14:55:02 -07001311 if (!ret)
1312 slab_fix(s, "Object at 0x%p not freed", object);
1313 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07001314}
1315
Vlastimil Babkae17f1df2020-08-06 23:18:35 -07001316/*
1317 * Parse a block of slub_debug options. Blocks are delimited by ';'
1318 *
1319 * @str: start of block
1320 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
1321 * @slabs: return start of list of slabs, or NULL when there's no list
1322 * @init: assume this is initial parsing and not per-kmem-create parsing
1323 *
1324 * returns the start of next block if there's any, or NULL
1325 */
1326static char *
1327parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
1328{
1329 bool higher_order_disable = false;
1330
1331 /* Skip any completely empty blocks */
1332 while (*str && *str == ';')
1333 str++;
1334
1335 if (*str == ',') {
1336 /*
1337 * No options but restriction on slabs. This means full
1338 * debugging for slabs matching a pattern.
1339 */
1340 *flags = DEBUG_DEFAULT_FLAGS;
1341 goto check_slabs;
1342 }
1343 *flags = 0;
1344
1345 /* Determine which debug features should be switched on */
1346 for (; *str && *str != ',' && *str != ';'; str++) {
1347 switch (tolower(*str)) {
1348 case '-':
1349 *flags = 0;
1350 break;
1351 case 'f':
1352 *flags |= SLAB_CONSISTENCY_CHECKS;
1353 break;
1354 case 'z':
1355 *flags |= SLAB_RED_ZONE;
1356 break;
1357 case 'p':
1358 *flags |= SLAB_POISON;
1359 break;
1360 case 'u':
1361 *flags |= SLAB_STORE_USER;
1362 break;
1363 case 't':
1364 *flags |= SLAB_TRACE;
1365 break;
1366 case 'a':
1367 *flags |= SLAB_FAILSLAB;
1368 break;
1369 case 'o':
1370 /*
1371 * Avoid enabling debugging on caches if its minimum
1372 * order would increase as a result.
1373 */
1374 higher_order_disable = true;
1375 break;
1376 default:
1377 if (init)
1378 pr_err("slub_debug option '%c' unknown. skipped\n", *str);
1379 }
1380 }
1381check_slabs:
1382 if (*str == ',')
1383 *slabs = ++str;
1384 else
1385 *slabs = NULL;
1386
1387 /* Skip over the slab list */
1388 while (*str && *str != ';')
1389 str++;
1390
1391 /* Skip any completely empty blocks */
1392 while (*str && *str == ';')
1393 str++;
1394
1395 if (init && higher_order_disable)
1396 disable_higher_order_debug = 1;
1397
1398 if (*str)
1399 return str;
1400 else
1401 return NULL;
1402}
1403
Christoph Lameter41ecc552007-05-09 02:32:44 -07001404static int __init setup_slub_debug(char *str)
1405{
Vlastimil Babkae17f1df2020-08-06 23:18:35 -07001406 slab_flags_t flags;
Vlastimil Babkaa7f1d482021-08-13 16:54:34 -07001407 slab_flags_t global_flags;
Vlastimil Babkae17f1df2020-08-06 23:18:35 -07001408 char *saved_str;
1409 char *slab_list;
1410 bool global_slub_debug_changed = false;
1411 bool slab_list_specified = false;
1412
Vlastimil Babkaa7f1d482021-08-13 16:54:34 -07001413 global_flags = DEBUG_DEFAULT_FLAGS;
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001414 if (*str++ != '=' || !*str)
1415 /*
1416 * No options specified. Switch on full debugging.
1417 */
1418 goto out;
Christoph Lameter41ecc552007-05-09 02:32:44 -07001419
Vlastimil Babkae17f1df2020-08-06 23:18:35 -07001420 saved_str = str;
1421 while (str) {
1422 str = parse_slub_debug_flags(str, &flags, &slab_list, true);
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001423
Vlastimil Babkae17f1df2020-08-06 23:18:35 -07001424 if (!slab_list) {
Vlastimil Babkaa7f1d482021-08-13 16:54:34 -07001425 global_flags = flags;
Vlastimil Babkae17f1df2020-08-06 23:18:35 -07001426 global_slub_debug_changed = true;
1427 } else {
1428 slab_list_specified = true;
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001429 }
1430 }
1431
Vlastimil Babkae17f1df2020-08-06 23:18:35 -07001432 /*
1433 * For backwards compatibility, a single list of flags with list of
Vlastimil Babkaa7f1d482021-08-13 16:54:34 -07001434 * slabs means debugging is only changed for those slabs, so the global
1435 * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
1436 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
Vlastimil Babkae17f1df2020-08-06 23:18:35 -07001437 * long as there is no option specifying flags without a slab list.
1438 */
1439 if (slab_list_specified) {
1440 if (!global_slub_debug_changed)
Vlastimil Babkaa7f1d482021-08-13 16:54:34 -07001441 global_flags = slub_debug;
Vlastimil Babkae17f1df2020-08-06 23:18:35 -07001442 slub_debug_string = saved_str;
1443 }
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001444out:
Vlastimil Babkaa7f1d482021-08-13 16:54:34 -07001445 slub_debug = global_flags;
Vlastimil Babkaca0cab62020-08-06 23:18:51 -07001446 if (slub_debug != 0 || slub_debug_string)
1447 static_branch_enable(&slub_debug_enabled);
Stephen Boyd02ac47d2021-06-28 19:34:43 -07001448 else
1449 static_branch_disable(&slub_debug_enabled);
Alexander Potapenko64713842019-07-11 20:59:19 -07001450 if ((static_branch_unlikely(&init_on_alloc) ||
1451 static_branch_unlikely(&init_on_free)) &&
1452 (slub_debug & SLAB_POISON))
1453 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
Christoph Lameter41ecc552007-05-09 02:32:44 -07001454 return 1;
1455}
1456
1457__setup("slub_debug", setup_slub_debug);
1458
Aaron Tomlinc5fd3ca2018-10-26 15:03:15 -07001459/*
1460 * kmem_cache_flags - apply debugging options to the cache
1461 * @object_size: the size of an object without meta data
1462 * @flags: flags to set
1463 * @name: name of the cache
Aaron Tomlinc5fd3ca2018-10-26 15:03:15 -07001464 *
1465 * Debug option(s) are applied to @flags. In addition to the debug
1466 * option(s), if a slab name (or multiple) is specified i.e.
1467 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1468 * then only the select slabs will receive the debug option(s).
1469 */
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -07001470slab_flags_t kmem_cache_flags(unsigned int object_size,
Nikolay Borisov37540002021-02-24 12:00:58 -08001471 slab_flags_t flags, const char *name)
Christoph Lameter41ecc552007-05-09 02:32:44 -07001472{
Aaron Tomlinc5fd3ca2018-10-26 15:03:15 -07001473 char *iter;
1474 size_t len;
Vlastimil Babkae17f1df2020-08-06 23:18:35 -07001475 char *next_block;
1476 slab_flags_t block_flags;
Johannes Bergca220592021-02-24 12:01:04 -08001477 slab_flags_t slub_debug_local = slub_debug;
1478
1479 /*
1480 * If the slab cache is for debugging (e.g. kmemleak) then
1481 * don't store user (stack trace) information by default,
1482 * but let the user enable it via the command line below.
1483 */
1484 if (flags & SLAB_NOLEAKTRACE)
1485 slub_debug_local &= ~SLAB_STORE_USER;
Aaron Tomlinc5fd3ca2018-10-26 15:03:15 -07001486
Aaron Tomlinc5fd3ca2018-10-26 15:03:15 -07001487 len = strlen(name);
Vlastimil Babkae17f1df2020-08-06 23:18:35 -07001488 next_block = slub_debug_string;
1489 /* Go through all blocks of debug options, see if any matches our slab's name */
1490 while (next_block) {
1491 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
1492 if (!iter)
1493 continue;
1494 /* Found a block that has a slab list, search it */
1495 while (*iter) {
1496 char *end, *glob;
1497 size_t cmplen;
Aaron Tomlinc5fd3ca2018-10-26 15:03:15 -07001498
Vlastimil Babkae17f1df2020-08-06 23:18:35 -07001499 end = strchrnul(iter, ',');
1500 if (next_block && next_block < end)
1501 end = next_block - 1;
Aaron Tomlinc5fd3ca2018-10-26 15:03:15 -07001502
Vlastimil Babkae17f1df2020-08-06 23:18:35 -07001503 glob = strnchr(iter, end - iter, '*');
1504 if (glob)
1505 cmplen = glob - iter;
1506 else
1507 cmplen = max_t(size_t, len, (end - iter));
Aaron Tomlinc5fd3ca2018-10-26 15:03:15 -07001508
Vlastimil Babkae17f1df2020-08-06 23:18:35 -07001509 if (!strncmp(name, iter, cmplen)) {
1510 flags |= block_flags;
1511 return flags;
1512 }
1513
1514 if (!*end || *end == ';')
1515 break;
1516 iter = end + 1;
Aaron Tomlinc5fd3ca2018-10-26 15:03:15 -07001517 }
Aaron Tomlinc5fd3ca2018-10-26 15:03:15 -07001518 }
Christoph Lameterba0268a2007-09-11 15:24:11 -07001519
Johannes Bergca220592021-02-24 12:01:04 -08001520 return flags | slub_debug_local;
Christoph Lameter41ecc552007-05-09 02:32:44 -07001521}
Jesper Dangaard Brouerb4a64712015-11-20 15:57:41 -08001522#else /* !CONFIG_SLUB_DEBUG */
Christoph Lameter3ec09742007-05-16 22:11:00 -07001523static inline void setup_object_debug(struct kmem_cache *s,
1524 struct page *page, void *object) {}
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07001525static inline
1526void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
Christoph Lameter41ecc552007-05-09 02:32:44 -07001527
Christoph Lameter3ec09742007-05-16 22:11:00 -07001528static inline int alloc_debug_processing(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001529 struct page *page, void *object, unsigned long addr) { return 0; }
Christoph Lameter41ecc552007-05-09 02:32:44 -07001530
Laura Abbott282acb42016-03-15 14:54:59 -07001531static inline int free_debug_processing(
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08001532 struct kmem_cache *s, struct page *page,
1533 void *head, void *tail, int bulk_cnt,
Laura Abbott282acb42016-03-15 14:54:59 -07001534 unsigned long addr) { return 0; }
Christoph Lameter41ecc552007-05-09 02:32:44 -07001535
Christoph Lameter41ecc552007-05-09 02:32:44 -07001536static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1537 { return 1; }
1538static inline int check_object(struct kmem_cache *s, struct page *page,
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001539 void *object, u8 val) { return 1; }
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001540static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1541 struct page *page) {}
Peter Zijlstrac65c1872014-01-10 13:23:49 +01001542static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1543 struct page *page) {}
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -07001544slab_flags_t kmem_cache_flags(unsigned int object_size,
Nikolay Borisov37540002021-02-24 12:00:58 -08001545 slab_flags_t flags, const char *name)
Christoph Lameterba0268a2007-09-11 15:24:11 -07001546{
1547 return flags;
1548}
Christoph Lameter41ecc552007-05-09 02:32:44 -07001549#define slub_debug 0
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001550
Ingo Molnarfdaa45e2009-09-15 11:00:26 +02001551#define disable_higher_order_debug 0
1552
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001553static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1554 { return 0; }
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04001555static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1556 { return 0; }
Christoph Lameter205ab992008-04-14 19:11:40 +03001557static inline void inc_slabs_node(struct kmem_cache *s, int node,
1558 int objects) {}
1559static inline void dec_slabs_node(struct kmem_cache *s, int node,
1560 int objects) {}
Christoph Lameter7d550c52010-08-25 14:07:16 -05001561
Dongli Zhang52f23472020-06-01 21:45:47 -07001562static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
Eugeniu Roscadc07a722020-09-04 16:35:30 -07001563 void **freelist, void *nextfree)
Dongli Zhang52f23472020-06-01 21:45:47 -07001564{
1565 return false;
1566}
Andrey Ryabinin02e72cc2014-08-06 16:04:18 -07001567#endif /* CONFIG_SLUB_DEBUG */
1568
1569/*
1570 * Hooks for other subsystems that check memory allocations. In a typical
1571 * production configuration these hooks all should produce no code at all.
1572 */
Andrey Konovalov01165232018-12-28 00:29:37 -08001573static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
Roman Bobnievd56791b2013-10-08 15:58:57 -07001574{
Andrey Konovalov53128242019-02-20 22:19:11 -08001575 ptr = kasan_kmalloc_large(ptr, size, flags);
Andrey Konovalova2f77572019-02-20 22:19:16 -08001576 /* As ptr might get tagged, call kmemleak hook after KASAN. */
Roman Bobnievd56791b2013-10-08 15:58:57 -07001577 kmemleak_alloc(ptr, size, 1, flags);
Andrey Konovalov53128242019-02-20 22:19:11 -08001578 return ptr;
Roman Bobnievd56791b2013-10-08 15:58:57 -07001579}
1580
Dmitry Vyukovee3ce772018-02-06 15:36:27 -08001581static __always_inline void kfree_hook(void *x)
Roman Bobnievd56791b2013-10-08 15:58:57 -07001582{
1583 kmemleak_free(x);
Andrey Konovalov027b37b2021-02-24 12:05:46 -08001584 kasan_kfree_large(x);
Roman Bobnievd56791b2013-10-08 15:58:57 -07001585}
1586
Andrey Konovalovd57a9642021-04-29 23:00:09 -07001587static __always_inline bool slab_free_hook(struct kmem_cache *s,
1588 void *x, bool init)
Roman Bobnievd56791b2013-10-08 15:58:57 -07001589{
1590 kmemleak_free_recursive(x, s->flags);
Christoph Lameter7d550c52010-08-25 14:07:16 -05001591
Vlastimil Babka84048032021-05-21 01:25:06 +02001592 debug_check_no_locks_freed(x, s->object_size);
Andrey Ryabinin02e72cc2014-08-06 16:04:18 -07001593
Andrey Ryabinin02e72cc2014-08-06 16:04:18 -07001594 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1595 debug_check_no_obj_freed(x, s->object_size);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08001596
Marco Elvercfbe1632020-08-06 23:19:12 -07001597 /* Use KCSAN to help debug racy use-after-free. */
1598 if (!(s->flags & SLAB_TYPESAFE_BY_RCU))
1599 __kcsan_check_access(x, s->object_size,
1600 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
1601
Andrey Konovalovd57a9642021-04-29 23:00:09 -07001602 /*
1603 * As memory initialization might be integrated into KASAN,
1604 * kasan_slab_free and initialization memset's must be
1605 * kept together to avoid discrepancies in behavior.
1606 *
1607 * The initialization memset's clear the object and the metadata,
1608 * but don't touch the SLAB redzone.
1609 */
1610 if (init) {
1611 int rsize;
1612
1613 if (!kasan_has_integrated_init())
1614 memset(kasan_reset_tag(x), 0, s->object_size);
1615 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
1616 memset((char *)kasan_reset_tag(x) + s->inuse, 0,
1617 s->size - s->inuse - rsize);
1618 }
1619 /* KASAN might put x into memory quarantine, delaying its reuse. */
1620 return kasan_slab_free(s, x, init);
Andrey Ryabinin02e72cc2014-08-06 16:04:18 -07001621}
Christoph Lameter205ab992008-04-14 19:11:40 +03001622
Andrey Konovalovc3895392018-04-10 16:30:31 -07001623static inline bool slab_free_freelist_hook(struct kmem_cache *s,
1624 void **head, void **tail)
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08001625{
Alexander Potapenko64713842019-07-11 20:59:19 -07001626
1627 void *object;
1628 void *next = *head;
1629 void *old_tail = *tail ? *tail : *head;
Alexander Potapenko64713842019-07-11 20:59:19 -07001630
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08001631 if (is_kfence_address(next)) {
Andrey Konovalovd57a9642021-04-29 23:00:09 -07001632 slab_free_hook(s, next, false);
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08001633 return true;
1634 }
1635
Laura Abbottaea4df42019-11-15 17:34:50 -08001636 /* Head and tail of the reconstructed freelist */
1637 *head = NULL;
1638 *tail = NULL;
Laura Abbott1b7e8162019-07-31 15:32:40 -04001639
Laura Abbottaea4df42019-11-15 17:34:50 -08001640 do {
1641 object = next;
1642 next = get_freepointer(s, object);
1643
Andrey Konovalovc3895392018-04-10 16:30:31 -07001644 /* If object's reuse doesn't have to be delayed */
Andrey Konovalovd57a9642021-04-29 23:00:09 -07001645 if (!slab_free_hook(s, object, slab_want_init_on_free(s))) {
Andrey Konovalovc3895392018-04-10 16:30:31 -07001646 /* Move object to the new freelist */
1647 set_freepointer(s, object, *head);
1648 *head = object;
1649 if (!*tail)
1650 *tail = object;
1651 }
1652 } while (object != old_tail);
1653
1654 if (*head == *tail)
1655 *tail = NULL;
1656
1657 return *head != NULL;
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08001658}
1659
Andrey Konovalov4d176712018-12-28 00:30:23 -08001660static void *setup_object(struct kmem_cache *s, struct page *page,
Thomas Gleixner588f8ba2015-09-04 15:45:48 -07001661 void *object)
1662{
1663 setup_object_debug(s, page, object);
Andrey Konovalov4d176712018-12-28 00:30:23 -08001664 object = kasan_init_slab_obj(s, object);
Thomas Gleixner588f8ba2015-09-04 15:45:48 -07001665 if (unlikely(s->ctor)) {
1666 kasan_unpoison_object_data(s, object);
1667 s->ctor(object);
1668 kasan_poison_object_data(s, object);
1669 }
Andrey Konovalov4d176712018-12-28 00:30:23 -08001670 return object;
Thomas Gleixner588f8ba2015-09-04 15:45:48 -07001671}
1672
Christoph Lameter81819f02007-05-06 14:49:36 -07001673/*
1674 * Slab allocation and freeing
1675 */
Vladimir Davydov5dfb4172014-06-04 16:06:38 -07001676static inline struct page *alloc_slab_page(struct kmem_cache *s,
1677 gfp_t flags, int node, struct kmem_cache_order_objects oo)
Christoph Lameter65c33762008-04-14 19:11:40 +03001678{
Vladimir Davydov5dfb4172014-06-04 16:06:38 -07001679 struct page *page;
Alexey Dobriyan19af27a2018-04-05 16:21:39 -07001680 unsigned int order = oo_order(oo);
Christoph Lameter65c33762008-04-14 19:11:40 +03001681
Christoph Lameter2154a332010-07-09 14:07:10 -05001682 if (node == NUMA_NO_NODE)
Vladimir Davydov5dfb4172014-06-04 16:06:38 -07001683 page = alloc_pages(flags, order);
Christoph Lameter65c33762008-04-14 19:11:40 +03001684 else
Vlastimil Babka96db8002015-09-08 15:03:50 -07001685 page = __alloc_pages_node(node, flags, order);
Vladimir Davydov5dfb4172014-06-04 16:06:38 -07001686
Vladimir Davydov5dfb4172014-06-04 16:06:38 -07001687 return page;
Christoph Lameter65c33762008-04-14 19:11:40 +03001688}
1689
Thomas Garnier210e7a42016-07-26 15:21:59 -07001690#ifdef CONFIG_SLAB_FREELIST_RANDOM
1691/* Pre-initialize the random sequence cache */
1692static int init_cache_random_seq(struct kmem_cache *s)
1693{
Alexey Dobriyan19af27a2018-04-05 16:21:39 -07001694 unsigned int count = oo_objects(s->oo);
Thomas Garnier210e7a42016-07-26 15:21:59 -07001695 int err;
Thomas Garnier210e7a42016-07-26 15:21:59 -07001696
Sean Reesa8100072017-02-08 14:30:59 -08001697 /* Bailout if already initialised */
1698 if (s->random_seq)
1699 return 0;
1700
Thomas Garnier210e7a42016-07-26 15:21:59 -07001701 err = cache_random_seq_create(s, count, GFP_KERNEL);
1702 if (err) {
1703 pr_err("SLUB: Unable to initialize free list for %s\n",
1704 s->name);
1705 return err;
1706 }
1707
1708 /* Transform to an offset on the set of pages */
1709 if (s->random_seq) {
Alexey Dobriyan19af27a2018-04-05 16:21:39 -07001710 unsigned int i;
1711
Thomas Garnier210e7a42016-07-26 15:21:59 -07001712 for (i = 0; i < count; i++)
1713 s->random_seq[i] *= s->size;
1714 }
1715 return 0;
1716}
1717
1718/* Initialize each random sequence freelist per cache */
1719static void __init init_freelist_randomization(void)
1720{
1721 struct kmem_cache *s;
1722
1723 mutex_lock(&slab_mutex);
1724
1725 list_for_each_entry(s, &slab_caches, list)
1726 init_cache_random_seq(s);
1727
1728 mutex_unlock(&slab_mutex);
1729}
1730
1731/* Get the next entry on the pre-computed freelist randomized */
1732static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1733 unsigned long *pos, void *start,
1734 unsigned long page_limit,
1735 unsigned long freelist_count)
1736{
1737 unsigned int idx;
1738
1739 /*
1740 * If the target page allocation failed, the number of objects on the
1741 * page might be smaller than the usual size defined by the cache.
1742 */
1743 do {
1744 idx = s->random_seq[*pos];
1745 *pos += 1;
1746 if (*pos >= freelist_count)
1747 *pos = 0;
1748 } while (unlikely(idx >= page_limit));
1749
1750 return (char *)start + idx;
1751}
1752
1753/* Shuffle the single linked freelist based on a random pre-computed sequence */
1754static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1755{
1756 void *start;
1757 void *cur;
1758 void *next;
1759 unsigned long idx, pos, page_limit, freelist_count;
1760
1761 if (page->objects < 2 || !s->random_seq)
1762 return false;
1763
1764 freelist_count = oo_objects(s->oo);
1765 pos = get_random_int() % freelist_count;
1766
1767 page_limit = page->objects * s->size;
1768 start = fixup_red_left(s, page_address(page));
1769
1770 /* First entry is used as the base of the freelist */
1771 cur = next_freelist_entry(s, page, &pos, start, page_limit,
1772 freelist_count);
Andrey Konovalov4d176712018-12-28 00:30:23 -08001773 cur = setup_object(s, page, cur);
Thomas Garnier210e7a42016-07-26 15:21:59 -07001774 page->freelist = cur;
1775
1776 for (idx = 1; idx < page->objects; idx++) {
Thomas Garnier210e7a42016-07-26 15:21:59 -07001777 next = next_freelist_entry(s, page, &pos, start, page_limit,
1778 freelist_count);
Andrey Konovalov4d176712018-12-28 00:30:23 -08001779 next = setup_object(s, page, next);
Thomas Garnier210e7a42016-07-26 15:21:59 -07001780 set_freepointer(s, cur, next);
1781 cur = next;
1782 }
Thomas Garnier210e7a42016-07-26 15:21:59 -07001783 set_freepointer(s, cur, NULL);
1784
1785 return true;
1786}
1787#else
1788static inline int init_cache_random_seq(struct kmem_cache *s)
1789{
1790 return 0;
1791}
1792static inline void init_freelist_randomization(void) { }
1793static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1794{
1795 return false;
1796}
1797#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1798
Christoph Lameter81819f02007-05-06 14:49:36 -07001799static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1800{
Pekka Enberg06428782008-01-07 23:20:27 -08001801 struct page *page;
Christoph Lameter834f3d12008-04-14 19:11:31 +03001802 struct kmem_cache_order_objects oo = s->oo;
Pekka Enbergba522702009-06-24 21:59:51 +03001803 gfp_t alloc_gfp;
Andrey Konovalov4d176712018-12-28 00:30:23 -08001804 void *start, *p, *next;
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07001805 int idx;
Thomas Garnier210e7a42016-07-26 15:21:59 -07001806 bool shuffle;
Christoph Lameter81819f02007-05-06 14:49:36 -07001807
Christoph Lameter7e0528d2011-06-01 12:25:44 -05001808 flags &= gfp_allowed_mask;
1809
Christoph Lameterb7a49f02008-02-14 14:21:32 -08001810 flags |= s->allocflags;
Mel Gormane12ba742007-10-16 01:25:52 -07001811
Pekka Enbergba522702009-06-24 21:59:51 +03001812 /*
1813 * Let the initial higher-order allocation fail under memory pressure
1814 * so we fall-back to the minimum order allocation.
1815 */
1816 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
Mel Gormand0164ad2015-11-06 16:28:21 -08001817 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
Mel Gorman444eb2a42016-03-17 14:19:23 -07001818 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
Pekka Enbergba522702009-06-24 21:59:51 +03001819
Vladimir Davydov5dfb4172014-06-04 16:06:38 -07001820 page = alloc_slab_page(s, alloc_gfp, node, oo);
Christoph Lameter65c33762008-04-14 19:11:40 +03001821 if (unlikely(!page)) {
1822 oo = s->min;
Joonsoo Kim80c3a992014-03-12 17:26:20 +09001823 alloc_gfp = flags;
Christoph Lameter65c33762008-04-14 19:11:40 +03001824 /*
1825 * Allocation may have failed due to fragmentation.
1826 * Try a lower order alloc if possible
1827 */
Vladimir Davydov5dfb4172014-06-04 16:06:38 -07001828 page = alloc_slab_page(s, alloc_gfp, node, oo);
Thomas Gleixner588f8ba2015-09-04 15:45:48 -07001829 if (unlikely(!page))
1830 goto out;
1831 stat(s, ORDER_FALLBACK);
Christoph Lameter65c33762008-04-14 19:11:40 +03001832 }
Vegard Nossum5a896d92008-04-04 00:54:48 +02001833
Christoph Lameter834f3d12008-04-14 19:11:31 +03001834 page->objects = oo_objects(oo);
Christoph Lameter81819f02007-05-06 14:49:36 -07001835
Roman Gushchin2e9bd482021-02-24 12:03:11 -08001836 account_slab_page(page, oo_order(oo), s, flags);
Roman Gushchin1f3147b2020-12-29 15:15:07 -08001837
Glauber Costa1b4f59e32012-10-22 18:05:36 +04001838 page->slab_cache = s;
Joonsoo Kimc03f94c2012-05-18 00:47:47 +09001839 __SetPageSlab(page);
Michal Hocko2f064f32015-08-21 14:11:51 -07001840 if (page_is_pfmemalloc(page))
Mel Gorman072bb0a2012-07-31 16:43:58 -07001841 SetPageSlabPfmemalloc(page);
Christoph Lameter81819f02007-05-06 14:49:36 -07001842
Andrey Konovalova7101222019-02-20 22:19:23 -08001843 kasan_poison_slab(page);
1844
Christoph Lameter81819f02007-05-06 14:49:36 -07001845 start = page_address(page);
Christoph Lameter81819f02007-05-06 14:49:36 -07001846
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07001847 setup_page_debug(s, page, start);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08001848
Thomas Garnier210e7a42016-07-26 15:21:59 -07001849 shuffle = shuffle_freelist(s, page);
1850
1851 if (!shuffle) {
Andrey Konovalov4d176712018-12-28 00:30:23 -08001852 start = fixup_red_left(s, start);
1853 start = setup_object(s, page, start);
1854 page->freelist = start;
Andrey Konovalov18e50662019-02-20 22:19:28 -08001855 for (idx = 0, p = start; idx < page->objects - 1; idx++) {
1856 next = p + s->size;
1857 next = setup_object(s, page, next);
1858 set_freepointer(s, p, next);
1859 p = next;
1860 }
1861 set_freepointer(s, p, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -07001862 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001863
Christoph Lametere6e82ea2011-08-09 16:12:24 -05001864 page->inuse = page->objects;
Christoph Lameter8cb0a502011-06-01 12:25:46 -05001865 page->frozen = 1;
Thomas Gleixner588f8ba2015-09-04 15:45:48 -07001866
Christoph Lameter81819f02007-05-06 14:49:36 -07001867out:
Thomas Gleixner588f8ba2015-09-04 15:45:48 -07001868 if (!page)
1869 return NULL;
1870
Thomas Gleixner588f8ba2015-09-04 15:45:48 -07001871 inc_slabs_node(s, page_to_nid(page), page->objects);
1872
Christoph Lameter81819f02007-05-06 14:49:36 -07001873 return page;
1874}
1875
Thomas Gleixner588f8ba2015-09-04 15:45:48 -07001876static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1877{
Long Li44405092020-08-06 23:18:28 -07001878 if (unlikely(flags & GFP_SLAB_BUG_MASK))
1879 flags = kmalloc_fix_flags(flags);
Thomas Gleixner588f8ba2015-09-04 15:45:48 -07001880
Vlastimil Babka53a0de02021-05-11 13:01:34 +02001881 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
1882
Thomas Gleixner588f8ba2015-09-04 15:45:48 -07001883 return allocate_slab(s,
1884 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1885}
1886
Christoph Lameter81819f02007-05-06 14:49:36 -07001887static void __free_slab(struct kmem_cache *s, struct page *page)
1888{
Christoph Lameter834f3d12008-04-14 19:11:31 +03001889 int order = compound_order(page);
1890 int pages = 1 << order;
Christoph Lameter81819f02007-05-06 14:49:36 -07001891
Vlastimil Babka8fc8d662020-08-06 23:18:58 -07001892 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001893 void *p;
1894
1895 slab_pad_check(s, page);
Christoph Lameter224a88b2008-04-14 19:11:31 +03001896 for_each_object(p, s, page_address(page),
1897 page->objects)
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001898 check_object(s, page, p, SLUB_RED_INACTIVE);
Christoph Lameter81819f02007-05-06 14:49:36 -07001899 }
1900
Mel Gorman072bb0a2012-07-31 16:43:58 -07001901 __ClearPageSlabPfmemalloc(page);
Christoph Lameter49bd5222008-04-14 18:52:18 +03001902 __ClearPageSlab(page);
Vlastimil Babka0c06dd72020-12-14 19:04:29 -08001903 /* In union with page->mapping where page allocator expects NULL */
1904 page->slab_cache = NULL;
Nick Piggin1eb5ac62009-05-05 19:13:44 +10001905 if (current->reclaim_state)
1906 current->reclaim_state->reclaimed_slab += pages;
Roman Gushchin74d555b2020-08-06 23:21:44 -07001907 unaccount_slab_page(page, order, s);
Vladimir Davydov27ee57c2016-03-17 14:17:35 -07001908 __free_pages(page, order);
Christoph Lameter81819f02007-05-06 14:49:36 -07001909}
1910
1911static void rcu_free_slab(struct rcu_head *h)
1912{
Matthew Wilcoxbf68c212018-06-07 17:09:05 -07001913 struct page *page = container_of(h, struct page, rcu_head);
Lai Jiangshanda9a6382011-03-10 15:22:00 +08001914
Glauber Costa1b4f59e32012-10-22 18:05:36 +04001915 __free_slab(page->slab_cache, page);
Christoph Lameter81819f02007-05-06 14:49:36 -07001916}
1917
1918static void free_slab(struct kmem_cache *s, struct page *page)
1919{
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08001920 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
Matthew Wilcoxbf68c212018-06-07 17:09:05 -07001921 call_rcu(&page->rcu_head, rcu_free_slab);
Christoph Lameter81819f02007-05-06 14:49:36 -07001922 } else
1923 __free_slab(s, page);
1924}
1925
1926static void discard_slab(struct kmem_cache *s, struct page *page)
1927{
Christoph Lameter205ab992008-04-14 19:11:40 +03001928 dec_slabs_node(s, page_to_nid(page), page->objects);
Christoph Lameter81819f02007-05-06 14:49:36 -07001929 free_slab(s, page);
1930}
1931
1932/*
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001933 * Management of partially allocated slabs.
Christoph Lameter81819f02007-05-06 14:49:36 -07001934 */
Steven Rostedt1e4dd942014-02-10 14:25:46 -08001935static inline void
1936__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
Christoph Lameter81819f02007-05-06 14:49:36 -07001937{
Christoph Lametere95eed52007-05-06 14:49:44 -07001938 n->nr_partial++;
Shaohua Li136333d2011-08-24 08:57:52 +08001939 if (tail == DEACTIVATE_TO_TAIL)
Tobin C. Harding916ac052019-05-13 17:16:12 -07001940 list_add_tail(&page->slab_list, &n->partial);
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001941 else
Tobin C. Harding916ac052019-05-13 17:16:12 -07001942 list_add(&page->slab_list, &n->partial);
Christoph Lameter81819f02007-05-06 14:49:36 -07001943}
1944
Steven Rostedt1e4dd942014-02-10 14:25:46 -08001945static inline void add_partial(struct kmem_cache_node *n,
1946 struct page *page, int tail)
1947{
1948 lockdep_assert_held(&n->list_lock);
1949 __add_partial(n, page, tail);
1950}
1951
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001952static inline void remove_partial(struct kmem_cache_node *n,
Christoph Lameter62e346a2010-09-28 08:10:28 -05001953 struct page *page)
1954{
Peter Zijlstrac65c1872014-01-10 13:23:49 +01001955 lockdep_assert_held(&n->list_lock);
Tobin C. Harding916ac052019-05-13 17:16:12 -07001956 list_del(&page->slab_list);
Dmitry Safonov52b4b952016-02-17 13:11:37 -08001957 n->nr_partial--;
Christoph Lameter62e346a2010-09-28 08:10:28 -05001958}
1959
Christoph Lameter81819f02007-05-06 14:49:36 -07001960/*
Christoph Lameter7ced3712012-05-09 10:09:53 -05001961 * Remove slab from the partial list, freeze it and
1962 * return the pointer to the freelist.
Christoph Lameter81819f02007-05-06 14:49:36 -07001963 *
Christoph Lameter497b66f2011-08-09 16:12:26 -05001964 * Returns a list of objects or NULL if it fails.
Christoph Lameter81819f02007-05-06 14:49:36 -07001965 */
Christoph Lameter497b66f2011-08-09 16:12:26 -05001966static inline void *acquire_slab(struct kmem_cache *s,
Christoph Lameteracd19fd2011-08-09 16:12:25 -05001967 struct kmem_cache_node *n, struct page *page,
Joonsoo Kim633b0762013-01-21 17:01:25 +09001968 int mode, int *objects)
Christoph Lameter81819f02007-05-06 14:49:36 -07001969{
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001970 void *freelist;
1971 unsigned long counters;
1972 struct page new;
1973
Peter Zijlstrac65c1872014-01-10 13:23:49 +01001974 lockdep_assert_held(&n->list_lock);
1975
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001976 /*
1977 * Zap the freelist and set the frozen bit.
1978 * The old freelist is the list of objects for the
1979 * per cpu allocation list.
1980 */
Christoph Lameter7ced3712012-05-09 10:09:53 -05001981 freelist = page->freelist;
1982 counters = page->counters;
1983 new.counters = counters;
Joonsoo Kim633b0762013-01-21 17:01:25 +09001984 *objects = new.objects - new.inuse;
Pekka Enberg23910c52012-06-04 10:14:58 +03001985 if (mode) {
Christoph Lameter7ced3712012-05-09 10:09:53 -05001986 new.inuse = page->objects;
Pekka Enberg23910c52012-06-04 10:14:58 +03001987 new.freelist = NULL;
1988 } else {
1989 new.freelist = freelist;
1990 }
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001991
Dave Hansena0132ac2014-01-29 14:05:50 -08001992 VM_BUG_ON(new.frozen);
Christoph Lameter7ced3712012-05-09 10:09:53 -05001993 new.frozen = 1;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001994
Christoph Lameter7ced3712012-05-09 10:09:53 -05001995 if (!__cmpxchg_double_slab(s, page,
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001996 freelist, counters,
Joonsoo Kim02d76332012-05-17 00:13:02 +09001997 new.freelist, new.counters,
Christoph Lameter7ced3712012-05-09 10:09:53 -05001998 "acquire_slab"))
Christoph Lameter7ced3712012-05-09 10:09:53 -05001999 return NULL;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002000
2001 remove_partial(n, page);
Christoph Lameter7ced3712012-05-09 10:09:53 -05002002 WARN_ON(!freelist);
Christoph Lameter49e22582011-08-09 16:12:27 -05002003 return freelist;
Christoph Lameter81819f02007-05-06 14:49:36 -07002004}
2005
Joonsoo Kim633b0762013-01-21 17:01:25 +09002006static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
Joonsoo Kim8ba00bb2012-09-17 14:09:09 -07002007static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
Christoph Lameter49e22582011-08-09 16:12:27 -05002008
Christoph Lameter81819f02007-05-06 14:49:36 -07002009/*
Christoph Lameter672bba32007-05-09 02:32:39 -07002010 * Try to allocate a partial slab from a specific node.
Christoph Lameter81819f02007-05-06 14:49:36 -07002011 */
Joonsoo Kim8ba00bb2012-09-17 14:09:09 -07002012static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
Vlastimil Babka4b1f4492021-05-11 17:45:26 +02002013 struct page **ret_page, gfp_t gfpflags)
Christoph Lameter81819f02007-05-06 14:49:36 -07002014{
Christoph Lameter49e22582011-08-09 16:12:27 -05002015 struct page *page, *page2;
2016 void *object = NULL;
Alexey Dobriyane5d99982018-04-05 16:21:10 -07002017 unsigned int available = 0;
Vlastimil Babka4b1f4492021-05-11 17:45:26 +02002018 unsigned long flags;
Joonsoo Kim633b0762013-01-21 17:01:25 +09002019 int objects;
Christoph Lameter81819f02007-05-06 14:49:36 -07002020
2021 /*
2022 * Racy check. If we mistakenly see no partial slabs then we
2023 * just allocate an empty slab. If we mistakenly try to get a
Chen Tao70b6d252020-10-15 20:10:01 -07002024 * partial slab and there is none available then get_partial()
Christoph Lameter672bba32007-05-09 02:32:39 -07002025 * will return NULL.
Christoph Lameter81819f02007-05-06 14:49:36 -07002026 */
2027 if (!n || !n->nr_partial)
2028 return NULL;
2029
Vlastimil Babka4b1f4492021-05-11 17:45:26 +02002030 spin_lock_irqsave(&n->list_lock, flags);
Tobin C. Harding916ac052019-05-13 17:16:12 -07002031 list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
Joonsoo Kim8ba00bb2012-09-17 14:09:09 -07002032 void *t;
Christoph Lameter49e22582011-08-09 16:12:27 -05002033
Vlastimil Babka4b1f4492021-05-11 17:45:26 +02002034 if (!pfmemalloc_match(page, gfpflags))
Joonsoo Kim8ba00bb2012-09-17 14:09:09 -07002035 continue;
2036
Joonsoo Kim633b0762013-01-21 17:01:25 +09002037 t = acquire_slab(s, n, page, object == NULL, &objects);
Christoph Lameter49e22582011-08-09 16:12:27 -05002038 if (!t)
Linus Torvalds9b1ea292021-03-10 10:18:04 -08002039 break;
Christoph Lameter49e22582011-08-09 16:12:27 -05002040
Joonsoo Kim633b0762013-01-21 17:01:25 +09002041 available += objects;
Alex,Shi12d79632011-09-07 10:26:36 +08002042 if (!object) {
Vlastimil Babka75c8ff22021-05-11 14:05:22 +02002043 *ret_page = page;
Christoph Lameter49e22582011-08-09 16:12:27 -05002044 stat(s, ALLOC_FROM_PARTIAL);
Christoph Lameter49e22582011-08-09 16:12:27 -05002045 object = t;
Christoph Lameter49e22582011-08-09 16:12:27 -05002046 } else {
Joonsoo Kim633b0762013-01-21 17:01:25 +09002047 put_cpu_partial(s, page, 0);
Alex Shi8028dce2012-02-03 23:34:56 +08002048 stat(s, CPU_PARTIAL_NODE);
Christoph Lameter49e22582011-08-09 16:12:27 -05002049 }
Joonsoo Kim345c9052013-06-19 14:05:52 +09002050 if (!kmem_cache_has_cpu_partial(s)
Wei Yange6d0e1d2017-07-06 15:36:34 -07002051 || available > slub_cpu_partial(s) / 2)
Christoph Lameter49e22582011-08-09 16:12:27 -05002052 break;
2053
Christoph Lameter497b66f2011-08-09 16:12:26 -05002054 }
Vlastimil Babka4b1f4492021-05-11 17:45:26 +02002055 spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter497b66f2011-08-09 16:12:26 -05002056 return object;
Christoph Lameter81819f02007-05-06 14:49:36 -07002057}
2058
2059/*
Christoph Lameter672bba32007-05-09 02:32:39 -07002060 * Get a page from somewhere. Search in increasing NUMA distances.
Christoph Lameter81819f02007-05-06 14:49:36 -07002061 */
Joonsoo Kimde3ec032012-01-27 00:12:23 -08002062static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
Vlastimil Babka75c8ff22021-05-11 14:05:22 +02002063 struct page **ret_page)
Christoph Lameter81819f02007-05-06 14:49:36 -07002064{
2065#ifdef CONFIG_NUMA
2066 struct zonelist *zonelist;
Mel Gormandd1a2392008-04-28 02:12:17 -07002067 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07002068 struct zone *zone;
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002069 enum zone_type highest_zoneidx = gfp_zone(flags);
Christoph Lameter497b66f2011-08-09 16:12:26 -05002070 void *object;
Mel Gormancc9a6c82012-03-21 16:34:11 -07002071 unsigned int cpuset_mems_cookie;
Christoph Lameter81819f02007-05-06 14:49:36 -07002072
2073 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07002074 * The defrag ratio allows a configuration of the tradeoffs between
2075 * inter node defragmentation and node local allocations. A lower
2076 * defrag_ratio increases the tendency to do local allocations
2077 * instead of attempting to obtain partial slabs from other nodes.
Christoph Lameter81819f02007-05-06 14:49:36 -07002078 *
Christoph Lameter672bba32007-05-09 02:32:39 -07002079 * If the defrag_ratio is set to 0 then kmalloc() always
2080 * returns node local objects. If the ratio is higher then kmalloc()
2081 * may return off node objects because partial slabs are obtained
2082 * from other nodes and filled up.
Christoph Lameter81819f02007-05-06 14:49:36 -07002083 *
Li Peng43efd3e2016-05-19 17:10:43 -07002084 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
2085 * (which makes defrag_ratio = 1000) then every (well almost)
2086 * allocation will first attempt to defrag slab caches on other nodes.
2087 * This means scanning over all nodes to look for partial slabs which
2088 * may be expensive if we do it every time we are trying to find a slab
Christoph Lameter672bba32007-05-09 02:32:39 -07002089 * with available objects.
Christoph Lameter81819f02007-05-06 14:49:36 -07002090 */
Christoph Lameter98246012008-01-07 23:20:26 -08002091 if (!s->remote_node_defrag_ratio ||
2092 get_cycles() % 1024 > s->remote_node_defrag_ratio)
Christoph Lameter81819f02007-05-06 14:49:36 -07002093 return NULL;
2094
Mel Gormancc9a6c82012-03-21 16:34:11 -07002095 do {
Mel Gormand26914d2014-04-03 14:47:24 -07002096 cpuset_mems_cookie = read_mems_allowed_begin();
David Rientjes2a389612014-04-07 15:37:29 -07002097 zonelist = node_zonelist(mempolicy_slab_node(), flags);
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002098 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
Mel Gormancc9a6c82012-03-21 16:34:11 -07002099 struct kmem_cache_node *n;
Christoph Lameter81819f02007-05-06 14:49:36 -07002100
Mel Gormancc9a6c82012-03-21 16:34:11 -07002101 n = get_node(s, zone_to_nid(zone));
Christoph Lameter81819f02007-05-06 14:49:36 -07002102
Vladimir Davydovdee2f8a2014-12-12 16:58:28 -08002103 if (n && cpuset_zone_allowed(zone, flags) &&
Mel Gormancc9a6c82012-03-21 16:34:11 -07002104 n->nr_partial > s->min_partial) {
Vlastimil Babka75c8ff22021-05-11 14:05:22 +02002105 object = get_partial_node(s, n, ret_page, flags);
Mel Gormancc9a6c82012-03-21 16:34:11 -07002106 if (object) {
2107 /*
Mel Gormand26914d2014-04-03 14:47:24 -07002108 * Don't check read_mems_allowed_retry()
2109 * here - if mems_allowed was updated in
2110 * parallel, that was a harmless race
2111 * between allocation and the cpuset
2112 * update
Mel Gormancc9a6c82012-03-21 16:34:11 -07002113 */
Mel Gormancc9a6c82012-03-21 16:34:11 -07002114 return object;
2115 }
Miao Xiec0ff7452010-05-24 14:32:08 -07002116 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002117 }
Mel Gormand26914d2014-04-03 14:47:24 -07002118 } while (read_mems_allowed_retry(cpuset_mems_cookie));
Tobin C. Harding6dfd1b62019-05-13 17:16:09 -07002119#endif /* CONFIG_NUMA */
Christoph Lameter81819f02007-05-06 14:49:36 -07002120 return NULL;
2121}
2122
2123/*
2124 * Get a partial page, lock it and return it.
2125 */
Christoph Lameter497b66f2011-08-09 16:12:26 -05002126static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
Vlastimil Babka75c8ff22021-05-11 14:05:22 +02002127 struct page **ret_page)
Christoph Lameter81819f02007-05-06 14:49:36 -07002128{
Christoph Lameter497b66f2011-08-09 16:12:26 -05002129 void *object;
Joonsoo Kima561ce02014-10-09 15:26:15 -07002130 int searchnode = node;
2131
2132 if (node == NUMA_NO_NODE)
2133 searchnode = numa_mem_id();
Christoph Lameter81819f02007-05-06 14:49:36 -07002134
Vlastimil Babka75c8ff22021-05-11 14:05:22 +02002135 object = get_partial_node(s, get_node(s, searchnode), ret_page, flags);
Christoph Lameter497b66f2011-08-09 16:12:26 -05002136 if (object || node != NUMA_NO_NODE)
2137 return object;
Christoph Lameter81819f02007-05-06 14:49:36 -07002138
Vlastimil Babka75c8ff22021-05-11 14:05:22 +02002139 return get_any_partial(s, flags, ret_page);
Christoph Lameter81819f02007-05-06 14:49:36 -07002140}
2141
Thomas Gleixner923717c2019-10-15 21:18:12 +02002142#ifdef CONFIG_PREEMPTION
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002143/*
Ethon Paul0d645ed2020-06-04 16:49:34 -07002144 * Calculate the next globally unique transaction for disambiguation
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002145 * during cmpxchg. The transactions start with the cpu number and are then
2146 * incremented by CONFIG_NR_CPUS.
2147 */
2148#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
2149#else
2150/*
2151 * No preemption supported therefore also no need to check for
2152 * different cpus.
2153 */
2154#define TID_STEP 1
2155#endif
2156
2157static inline unsigned long next_tid(unsigned long tid)
2158{
2159 return tid + TID_STEP;
2160}
2161
Qian Cai9d5f0be2019-09-23 15:33:52 -07002162#ifdef SLUB_DEBUG_CMPXCHG
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002163static inline unsigned int tid_to_cpu(unsigned long tid)
2164{
2165 return tid % TID_STEP;
2166}
2167
2168static inline unsigned long tid_to_event(unsigned long tid)
2169{
2170 return tid / TID_STEP;
2171}
Qian Cai9d5f0be2019-09-23 15:33:52 -07002172#endif
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002173
2174static inline unsigned int init_tid(int cpu)
2175{
2176 return cpu;
2177}
2178
2179static inline void note_cmpxchg_failure(const char *n,
2180 const struct kmem_cache *s, unsigned long tid)
2181{
2182#ifdef SLUB_DEBUG_CMPXCHG
2183 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
2184
Fabian Frederickf9f58282014-06-04 16:06:34 -07002185 pr_info("%s %s: cmpxchg redo ", n, s->name);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002186
Thomas Gleixner923717c2019-10-15 21:18:12 +02002187#ifdef CONFIG_PREEMPTION
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002188 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
Fabian Frederickf9f58282014-06-04 16:06:34 -07002189 pr_warn("due to cpu change %d -> %d\n",
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002190 tid_to_cpu(tid), tid_to_cpu(actual_tid));
2191 else
2192#endif
2193 if (tid_to_event(tid) != tid_to_event(actual_tid))
Fabian Frederickf9f58282014-06-04 16:06:34 -07002194 pr_warn("due to cpu running other code. Event %ld->%ld\n",
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002195 tid_to_event(tid), tid_to_event(actual_tid));
2196 else
Fabian Frederickf9f58282014-06-04 16:06:34 -07002197 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002198 actual_tid, tid, next_tid(tid));
2199#endif
Christoph Lameter4fdccdf2011-03-22 13:35:00 -05002200 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002201}
2202
Fengguang Wu788e1aa2012-09-28 16:34:05 +08002203static void init_kmem_cache_cpus(struct kmem_cache *s)
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002204{
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002205 int cpu;
2206
2207 for_each_possible_cpu(cpu)
2208 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002209}
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002210
2211/*
Vlastimil Babkaa019d202021-05-12 13:53:34 +02002212 * Finishes removing the cpu slab. Merges cpu's freelist with page's freelist,
2213 * unfreezes the slabs and puts it on the proper list.
2214 * Assumes the slab has been already safely taken away from kmem_cache_cpu
2215 * by the caller.
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002216 */
Chen Gangd0e0ac92013-07-15 09:05:29 +08002217static void deactivate_slab(struct kmem_cache *s, struct page *page,
Vlastimil Babkaa019d202021-05-12 13:53:34 +02002218 void *freelist)
Christoph Lameter81819f02007-05-06 14:49:36 -07002219{
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002220 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002221 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
Vlastimil Babkad930ff02021-02-24 12:01:19 -08002222 int lock = 0, free_delta = 0;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002223 enum slab_modes l = M_NONE, m = M_NONE;
Vlastimil Babkad930ff02021-02-24 12:01:19 -08002224 void *nextfree, *freelist_iter, *freelist_tail;
Shaohua Li136333d2011-08-24 08:57:52 +08002225 int tail = DEACTIVATE_TO_HEAD;
Vlastimil Babka3406e912021-05-12 13:59:58 +02002226 unsigned long flags = 0;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002227 struct page new;
2228 struct page old;
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08002229
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002230 if (page->freelist) {
Christoph Lameter84e554e62009-12-18 16:26:23 -06002231 stat(s, DEACTIVATE_REMOTE_FREES);
Shaohua Li136333d2011-08-24 08:57:52 +08002232 tail = DEACTIVATE_TO_TAIL;
Christoph Lameter894b8782007-05-10 03:15:16 -07002233 }
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002234
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002235 /*
Vlastimil Babkad930ff02021-02-24 12:01:19 -08002236 * Stage one: Count the objects on cpu's freelist as free_delta and
2237 * remember the last object in freelist_tail for later splicing.
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002238 */
Vlastimil Babkad930ff02021-02-24 12:01:19 -08002239 freelist_tail = NULL;
2240 freelist_iter = freelist;
2241 while (freelist_iter) {
2242 nextfree = get_freepointer(s, freelist_iter);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002243
Dongli Zhang52f23472020-06-01 21:45:47 -07002244 /*
2245 * If 'nextfree' is invalid, it is possible that the object at
Vlastimil Babkad930ff02021-02-24 12:01:19 -08002246 * 'freelist_iter' is already corrupted. So isolate all objects
2247 * starting at 'freelist_iter' by skipping them.
Dongli Zhang52f23472020-06-01 21:45:47 -07002248 */
Vlastimil Babkad930ff02021-02-24 12:01:19 -08002249 if (freelist_corrupted(s, page, &freelist_iter, nextfree))
Dongli Zhang52f23472020-06-01 21:45:47 -07002250 break;
2251
Vlastimil Babkad930ff02021-02-24 12:01:19 -08002252 freelist_tail = freelist_iter;
2253 free_delta++;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002254
Vlastimil Babkad930ff02021-02-24 12:01:19 -08002255 freelist_iter = nextfree;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002256 }
2257
2258 /*
Vlastimil Babkad930ff02021-02-24 12:01:19 -08002259 * Stage two: Unfreeze the page while splicing the per-cpu
2260 * freelist to the head of page's freelist.
2261 *
2262 * Ensure that the page is unfrozen while the list presence
2263 * reflects the actual number of objects during unfreeze.
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002264 *
2265 * We setup the list membership and then perform a cmpxchg
2266 * with the count. If there is a mismatch then the page
2267 * is not unfrozen but the page is on the wrong list.
2268 *
2269 * Then we restart the process which may have to remove
2270 * the page from the list that we just put it on again
2271 * because the number of objects in the slab may have
2272 * changed.
2273 */
2274redo:
2275
Vlastimil Babkad930ff02021-02-24 12:01:19 -08002276 old.freelist = READ_ONCE(page->freelist);
2277 old.counters = READ_ONCE(page->counters);
Dave Hansena0132ac2014-01-29 14:05:50 -08002278 VM_BUG_ON(!old.frozen);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002279
2280 /* Determine target state of the slab */
2281 new.counters = old.counters;
Vlastimil Babkad930ff02021-02-24 12:01:19 -08002282 if (freelist_tail) {
2283 new.inuse -= free_delta;
2284 set_freepointer(s, freelist_tail, old.freelist);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002285 new.freelist = freelist;
2286 } else
2287 new.freelist = old.freelist;
2288
2289 new.frozen = 0;
2290
Joonsoo Kim8a5b20a2014-07-02 15:22:35 -07002291 if (!new.inuse && n->nr_partial >= s->min_partial)
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002292 m = M_FREE;
2293 else if (new.freelist) {
2294 m = M_PARTIAL;
2295 if (!lock) {
2296 lock = 1;
2297 /*
Wei Yang8bb4e7a2019-03-05 15:46:22 -08002298 * Taking the spinlock removes the possibility
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002299 * that acquire_slab() will see a slab page that
2300 * is frozen
2301 */
Vlastimil Babka3406e912021-05-12 13:59:58 +02002302 spin_lock_irqsave(&n->list_lock, flags);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002303 }
2304 } else {
2305 m = M_FULL;
Vlastimil Babka965c4842020-12-14 19:04:36 -08002306 if (kmem_cache_debug_flags(s, SLAB_STORE_USER) && !lock) {
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002307 lock = 1;
2308 /*
2309 * This also ensures that the scanning of full
2310 * slabs from diagnostic functions will not see
2311 * any frozen slabs.
2312 */
Vlastimil Babka3406e912021-05-12 13:59:58 +02002313 spin_lock_irqsave(&n->list_lock, flags);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002314 }
2315 }
2316
2317 if (l != m) {
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002318 if (l == M_PARTIAL)
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002319 remove_partial(n, page);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002320 else if (l == M_FULL)
Peter Zijlstrac65c1872014-01-10 13:23:49 +01002321 remove_full(s, n, page);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002322
Wei Yang88349a22018-12-28 00:33:13 -08002323 if (m == M_PARTIAL)
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002324 add_partial(n, page, tail);
Wei Yang88349a22018-12-28 00:33:13 -08002325 else if (m == M_FULL)
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002326 add_full(s, n, page);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002327 }
2328
2329 l = m;
Vlastimil Babka3406e912021-05-12 13:59:58 +02002330 if (!cmpxchg_double_slab(s, page,
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002331 old.freelist, old.counters,
2332 new.freelist, new.counters,
2333 "unfreezing slab"))
2334 goto redo;
2335
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002336 if (lock)
Vlastimil Babka3406e912021-05-12 13:59:58 +02002337 spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002338
Wei Yang88349a22018-12-28 00:33:13 -08002339 if (m == M_PARTIAL)
2340 stat(s, tail);
2341 else if (m == M_FULL)
2342 stat(s, DEACTIVATE_FULL);
2343 else if (m == M_FREE) {
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002344 stat(s, DEACTIVATE_EMPTY);
2345 discard_slab(s, page);
2346 stat(s, FREE_SLAB);
2347 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002348}
2349
Joonsoo Kimd24ac772012-05-18 22:01:17 +09002350/*
2351 * Unfreeze all the cpu partial slabs.
2352 *
Vlastimil Babkaf3ab8b62021-05-20 14:00:03 +02002353 * This function must be called with preemption or migration
2354 * disabled with c local to the cpu.
Joonsoo Kimd24ac772012-05-18 22:01:17 +09002355 */
Christoph Lameter59a09912012-11-28 16:23:00 +00002356static void unfreeze_partials(struct kmem_cache *s,
2357 struct kmem_cache_cpu *c)
Christoph Lameter49e22582011-08-09 16:12:27 -05002358{
Joonsoo Kim345c9052013-06-19 14:05:52 +09002359#ifdef CONFIG_SLUB_CPU_PARTIAL
Joonsoo Kim43d77862012-06-09 02:23:16 +09002360 struct kmem_cache_node *n = NULL, *n2 = NULL;
Shaohua Li9ada1932011-11-14 13:34:13 +08002361 struct page *page, *discard_page = NULL;
Vlastimil Babkaf3ab8b62021-05-20 14:00:03 +02002362 unsigned long flags;
2363
2364 local_irq_save(flags);
Christoph Lameter49e22582011-08-09 16:12:27 -05002365
chenqiwu4c7ba222020-04-01 21:04:16 -07002366 while ((page = slub_percpu_partial(c))) {
Christoph Lameter49e22582011-08-09 16:12:27 -05002367 struct page new;
2368 struct page old;
2369
chenqiwu4c7ba222020-04-01 21:04:16 -07002370 slub_set_percpu_partial(c, page);
Joonsoo Kim43d77862012-06-09 02:23:16 +09002371
2372 n2 = get_node(s, page_to_nid(page));
2373 if (n != n2) {
2374 if (n)
2375 spin_unlock(&n->list_lock);
2376
2377 n = n2;
2378 spin_lock(&n->list_lock);
2379 }
Christoph Lameter49e22582011-08-09 16:12:27 -05002380
2381 do {
2382
2383 old.freelist = page->freelist;
2384 old.counters = page->counters;
Dave Hansena0132ac2014-01-29 14:05:50 -08002385 VM_BUG_ON(!old.frozen);
Christoph Lameter49e22582011-08-09 16:12:27 -05002386
2387 new.counters = old.counters;
2388 new.freelist = old.freelist;
2389
2390 new.frozen = 0;
2391
Joonsoo Kimd24ac772012-05-18 22:01:17 +09002392 } while (!__cmpxchg_double_slab(s, page,
Christoph Lameter49e22582011-08-09 16:12:27 -05002393 old.freelist, old.counters,
2394 new.freelist, new.counters,
2395 "unfreezing slab"));
2396
Joonsoo Kim8a5b20a2014-07-02 15:22:35 -07002397 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
Shaohua Li9ada1932011-11-14 13:34:13 +08002398 page->next = discard_page;
2399 discard_page = page;
Joonsoo Kim43d77862012-06-09 02:23:16 +09002400 } else {
2401 add_partial(n, page, DEACTIVATE_TO_TAIL);
2402 stat(s, FREE_ADD_PARTIAL);
Christoph Lameter49e22582011-08-09 16:12:27 -05002403 }
2404 }
2405
2406 if (n)
2407 spin_unlock(&n->list_lock);
Shaohua Li9ada1932011-11-14 13:34:13 +08002408
Vlastimil Babka8de06a62021-05-20 14:01:57 +02002409 local_irq_restore(flags);
2410
Shaohua Li9ada1932011-11-14 13:34:13 +08002411 while (discard_page) {
2412 page = discard_page;
2413 discard_page = discard_page->next;
2414
2415 stat(s, DEACTIVATE_EMPTY);
2416 discard_slab(s, page);
2417 stat(s, FREE_SLAB);
2418 }
Vlastimil Babkaf3ab8b62021-05-20 14:00:03 +02002419
Tobin C. Harding6dfd1b62019-05-13 17:16:09 -07002420#endif /* CONFIG_SLUB_CPU_PARTIAL */
Christoph Lameter49e22582011-08-09 16:12:27 -05002421}
2422
2423/*
Wei Yang9234bae2019-03-05 15:43:10 -08002424 * Put a page that was just frozen (in __slab_free|get_partial_node) into a
2425 * partial page slot if available.
Christoph Lameter49e22582011-08-09 16:12:27 -05002426 *
2427 * If we did not find a slot then simply move all the partials to the
2428 * per node partial list.
2429 */
Joonsoo Kim633b0762013-01-21 17:01:25 +09002430static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
Christoph Lameter49e22582011-08-09 16:12:27 -05002431{
Joonsoo Kim345c9052013-06-19 14:05:52 +09002432#ifdef CONFIG_SLUB_CPU_PARTIAL
Christoph Lameter49e22582011-08-09 16:12:27 -05002433 struct page *oldpage;
2434 int pages;
2435 int pobjects;
2436
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -08002437 preempt_disable();
Christoph Lameter49e22582011-08-09 16:12:27 -05002438 do {
2439 pages = 0;
2440 pobjects = 0;
2441 oldpage = this_cpu_read(s->cpu_slab->partial);
2442
2443 if (oldpage) {
2444 pobjects = oldpage->pobjects;
2445 pages = oldpage->pages;
chenqiwubbd4e302020-04-01 21:04:19 -07002446 if (drain && pobjects > slub_cpu_partial(s)) {
Christoph Lameter49e22582011-08-09 16:12:27 -05002447 /*
2448 * partial array is full. Move the existing
2449 * set to the per node partial list.
2450 */
Christoph Lameter59a09912012-11-28 16:23:00 +00002451 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
Joonsoo Kime24fc412012-06-23 03:22:38 +09002452 oldpage = NULL;
Christoph Lameter49e22582011-08-09 16:12:27 -05002453 pobjects = 0;
2454 pages = 0;
Alex Shi8028dce2012-02-03 23:34:56 +08002455 stat(s, CPU_PARTIAL_DRAIN);
Christoph Lameter49e22582011-08-09 16:12:27 -05002456 }
2457 }
2458
2459 pages++;
2460 pobjects += page->objects - page->inuse;
2461
2462 page->pages = pages;
2463 page->pobjects = pobjects;
2464 page->next = oldpage;
2465
Chen Gangd0e0ac92013-07-15 09:05:29 +08002466 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2467 != oldpage);
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -08002468 preempt_enable();
Tobin C. Harding6dfd1b62019-05-13 17:16:09 -07002469#endif /* CONFIG_SLUB_CPU_PARTIAL */
Christoph Lameter49e22582011-08-09 16:12:27 -05002470}
2471
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002472static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07002473{
Vlastimil Babkaa019d202021-05-12 13:53:34 +02002474 void *freelist = c->freelist;
2475 struct page *page = c->page;
Christoph Lameterc17dda42012-05-09 10:09:57 -05002476
Vlastimil Babkaa019d202021-05-12 13:53:34 +02002477 c->page = NULL;
2478 c->freelist = NULL;
Christoph Lameterc17dda42012-05-09 10:09:57 -05002479 c->tid = next_tid(c->tid);
Vlastimil Babkaa019d202021-05-12 13:53:34 +02002480
2481 deactivate_slab(s, page, freelist);
2482
2483 stat(s, CPUSLAB_FLUSH);
Christoph Lameter81819f02007-05-06 14:49:36 -07002484}
2485
2486/*
2487 * Flush cpu slab.
Christoph Lameter6446faa2008-02-15 23:45:26 -08002488 *
Christoph Lameter81819f02007-05-06 14:49:36 -07002489 * Called from IPI handler with interrupts disabled.
2490 */
Christoph Lameter0c710012007-07-17 04:03:24 -07002491static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
Christoph Lameter81819f02007-05-06 14:49:36 -07002492{
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002493 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
Christoph Lameter81819f02007-05-06 14:49:36 -07002494
Wei Yang1265ef22018-12-28 00:33:06 -08002495 if (c->page)
2496 flush_slab(s, c);
Christoph Lameter49e22582011-08-09 16:12:27 -05002497
Wei Yang1265ef22018-12-28 00:33:06 -08002498 unfreeze_partials(s, c);
Christoph Lameter81819f02007-05-06 14:49:36 -07002499}
2500
2501static void flush_cpu_slab(void *d)
2502{
2503 struct kmem_cache *s = d;
Christoph Lameter81819f02007-05-06 14:49:36 -07002504
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002505 __flush_cpu_slab(s, smp_processor_id());
Christoph Lameter81819f02007-05-06 14:49:36 -07002506}
2507
Gilad Ben-Yossefa8364d52012-03-28 14:42:44 -07002508static bool has_cpu_slab(int cpu, void *info)
2509{
2510 struct kmem_cache *s = info;
2511 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2512
Wei Yanga93cf072017-07-06 15:36:31 -07002513 return c->page || slub_percpu_partial(c);
Gilad Ben-Yossefa8364d52012-03-28 14:42:44 -07002514}
2515
Christoph Lameter81819f02007-05-06 14:49:36 -07002516static void flush_all(struct kmem_cache *s)
2517{
Sebastian Andrzej Siewiorcb923152020-01-17 10:01:37 +01002518 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
Christoph Lameter81819f02007-05-06 14:49:36 -07002519}
2520
2521/*
Sebastian Andrzej Siewiora96a87b2016-08-18 14:57:19 +02002522 * Use the cpu notifier to insure that the cpu slabs are flushed when
2523 * necessary.
2524 */
2525static int slub_cpu_dead(unsigned int cpu)
2526{
2527 struct kmem_cache *s;
2528 unsigned long flags;
2529
2530 mutex_lock(&slab_mutex);
2531 list_for_each_entry(s, &slab_caches, list) {
2532 local_irq_save(flags);
2533 __flush_cpu_slab(s, cpu);
2534 local_irq_restore(flags);
2535 }
2536 mutex_unlock(&slab_mutex);
2537 return 0;
2538}
2539
2540/*
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002541 * Check if the objects in a per cpu structure fit numa
2542 * locality expectations.
2543 */
Christoph Lameter57d437d2012-05-09 10:09:59 -05002544static inline int node_match(struct page *page, int node)
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002545{
2546#ifdef CONFIG_NUMA
Wei Yang6159d0f2018-12-28 00:33:09 -08002547 if (node != NUMA_NO_NODE && page_to_nid(page) != node)
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002548 return 0;
2549#endif
2550 return 1;
2551}
2552
David Rientjes9a02d692014-06-04 16:06:36 -07002553#ifdef CONFIG_SLUB_DEBUG
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002554static int count_free(struct page *page)
2555{
2556 return page->objects - page->inuse;
2557}
2558
David Rientjes9a02d692014-06-04 16:06:36 -07002559static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2560{
2561 return atomic_long_read(&n->total_objects);
2562}
2563#endif /* CONFIG_SLUB_DEBUG */
2564
2565#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002566static unsigned long count_partial(struct kmem_cache_node *n,
2567 int (*get_count)(struct page *))
2568{
2569 unsigned long flags;
2570 unsigned long x = 0;
2571 struct page *page;
2572
2573 spin_lock_irqsave(&n->list_lock, flags);
Tobin C. Harding916ac052019-05-13 17:16:12 -07002574 list_for_each_entry(page, &n->partial, slab_list)
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002575 x += get_count(page);
2576 spin_unlock_irqrestore(&n->list_lock, flags);
2577 return x;
2578}
David Rientjes9a02d692014-06-04 16:06:36 -07002579#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04002580
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002581static noinline void
2582slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2583{
David Rientjes9a02d692014-06-04 16:06:36 -07002584#ifdef CONFIG_SLUB_DEBUG
2585 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2586 DEFAULT_RATELIMIT_BURST);
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002587 int node;
Christoph Lameterfa45dc22014-08-06 16:04:09 -07002588 struct kmem_cache_node *n;
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002589
David Rientjes9a02d692014-06-04 16:06:36 -07002590 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2591 return;
2592
Vlastimil Babka5b3810e2016-03-15 14:56:33 -07002593 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2594 nid, gfpflags, &gfpflags);
Alexey Dobriyan19af27a2018-04-05 16:21:39 -07002595 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
Fabian Frederickf9f58282014-06-04 16:06:34 -07002596 s->name, s->object_size, s->size, oo_order(s->oo),
2597 oo_order(s->min));
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002598
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002599 if (oo_order(s->min) > get_order(s->object_size))
Fabian Frederickf9f58282014-06-04 16:06:34 -07002600 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
2601 s->name);
David Rientjesfa5ec8a2009-07-07 00:14:14 -07002602
Christoph Lameterfa45dc22014-08-06 16:04:09 -07002603 for_each_kmem_cache_node(s, node, n) {
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002604 unsigned long nr_slabs;
2605 unsigned long nr_objs;
2606 unsigned long nr_free;
2607
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04002608 nr_free = count_partial(n, count_free);
2609 nr_slabs = node_nr_slabs(n);
2610 nr_objs = node_nr_objs(n);
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002611
Fabian Frederickf9f58282014-06-04 16:06:34 -07002612 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002613 node, nr_slabs, nr_objs, nr_free);
2614 }
David Rientjes9a02d692014-06-04 16:06:36 -07002615#endif
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002616}
2617
Mel Gorman072bb0a2012-07-31 16:43:58 -07002618static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2619{
2620 if (unlikely(PageSlabPfmemalloc(page)))
2621 return gfp_pfmemalloc_allowed(gfpflags);
2622
2623 return true;
2624}
2625
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002626/*
Vlastimil Babka0b303fb2021-05-08 02:28:02 +02002627 * A variant of pfmemalloc_match() that tests page flags without asserting
2628 * PageSlab. Intended for opportunistic checks before taking a lock and
2629 * rechecking that nobody else freed the page under us.
2630 */
2631static inline bool pfmemalloc_match_unsafe(struct page *page, gfp_t gfpflags)
2632{
2633 if (unlikely(__PageSlabPfmemalloc(page)))
2634 return gfp_pfmemalloc_allowed(gfpflags);
2635
2636 return true;
2637}
2638
2639/*
Chen Gangd0e0ac92013-07-15 09:05:29 +08002640 * Check the page->freelist of a page and either transfer the freelist to the
2641 * per cpu freelist or deactivate the page.
Christoph Lameter213eeb92011-11-11 14:07:14 -06002642 *
2643 * The page is still frozen if the return value is not NULL.
2644 *
2645 * If this function returns NULL then the page has been unfrozen.
Joonsoo Kimd24ac772012-05-18 22:01:17 +09002646 *
2647 * This function must be called with interrupt disabled.
Christoph Lameter213eeb92011-11-11 14:07:14 -06002648 */
2649static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2650{
2651 struct page new;
2652 unsigned long counters;
2653 void *freelist;
2654
2655 do {
2656 freelist = page->freelist;
2657 counters = page->counters;
Christoph Lameter6faa6832012-05-09 10:09:51 -05002658
Christoph Lameter213eeb92011-11-11 14:07:14 -06002659 new.counters = counters;
Dave Hansena0132ac2014-01-29 14:05:50 -08002660 VM_BUG_ON(!new.frozen);
Christoph Lameter213eeb92011-11-11 14:07:14 -06002661
2662 new.inuse = page->objects;
2663 new.frozen = freelist != NULL;
2664
Joonsoo Kimd24ac772012-05-18 22:01:17 +09002665 } while (!__cmpxchg_double_slab(s, page,
Christoph Lameter213eeb92011-11-11 14:07:14 -06002666 freelist, counters,
2667 NULL, new.counters,
2668 "get_freelist"));
2669
2670 return freelist;
2671}
2672
2673/*
Christoph Lameter894b8782007-05-10 03:15:16 -07002674 * Slow path. The lockless freelist is empty or we need to perform
2675 * debugging duties.
Christoph Lameter81819f02007-05-06 14:49:36 -07002676 *
Christoph Lameter894b8782007-05-10 03:15:16 -07002677 * Processing is still very fast if new objects have been freed to the
2678 * regular freelist. In that case we simply take over the regular freelist
2679 * as the lockless freelist and zap the regular freelist.
Christoph Lameter81819f02007-05-06 14:49:36 -07002680 *
Christoph Lameter894b8782007-05-10 03:15:16 -07002681 * If that is not working then we fall back to the partial lists. We take the
2682 * first element of the freelist as the object to allocate now and move the
2683 * rest of the freelist to the lockless freelist.
2684 *
2685 * And if we were unable to get a new slab from the partial slab lists then
Christoph Lameter6446faa2008-02-15 23:45:26 -08002686 * we need to allocate a new slab. This is the slowest path since it involves
2687 * a call to the page allocator and the setup of a new slab.
Christoph Lametera380a3c2015-11-20 15:57:35 -08002688 *
Vlastimil Babkae5000592021-05-07 19:32:31 +02002689 * Version of __slab_alloc to use when we know that preemption is
Christoph Lametera380a3c2015-11-20 15:57:35 -08002690 * already disabled (which is the case for bulk allocation).
Christoph Lameter81819f02007-05-06 14:49:36 -07002691 */
Christoph Lametera380a3c2015-11-20 15:57:35 -08002692static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03002693 unsigned long addr, struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07002694{
Christoph Lameter6faa6832012-05-09 10:09:51 -05002695 void *freelist;
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002696 struct page *page;
Vlastimil Babkae5000592021-05-07 19:32:31 +02002697 unsigned long flags;
Christoph Lameter81819f02007-05-06 14:49:36 -07002698
Abel Wu9f986d92020-10-13 16:48:43 -07002699 stat(s, ALLOC_SLOWPATH);
2700
Vlastimil Babka0b303fb2021-05-08 02:28:02 +02002701reread_page:
2702
2703 page = READ_ONCE(c->page);
Vlastimil Babka0715e6c2020-03-21 18:22:37 -07002704 if (!page) {
2705 /*
2706 * if the node is not online or has no normal memory, just
2707 * ignore the node constraint
2708 */
2709 if (unlikely(node != NUMA_NO_NODE &&
Vlastimil Babka7e1fa932021-02-24 12:01:12 -08002710 !node_isset(node, slab_nodes)))
Vlastimil Babka0715e6c2020-03-21 18:22:37 -07002711 node = NUMA_NO_NODE;
Christoph Lameter81819f02007-05-06 14:49:36 -07002712 goto new_slab;
Vlastimil Babka0715e6c2020-03-21 18:22:37 -07002713 }
Christoph Lameter49e22582011-08-09 16:12:27 -05002714redo:
Christoph Lameter6faa6832012-05-09 10:09:51 -05002715
Christoph Lameter57d437d2012-05-09 10:09:59 -05002716 if (unlikely(!node_match(page, node))) {
Vlastimil Babka0715e6c2020-03-21 18:22:37 -07002717 /*
2718 * same as above but node_match() being false already
2719 * implies node != NUMA_NO_NODE
2720 */
Vlastimil Babka7e1fa932021-02-24 12:01:12 -08002721 if (!node_isset(node, slab_nodes)) {
Vlastimil Babka0715e6c2020-03-21 18:22:37 -07002722 node = NUMA_NO_NODE;
2723 goto redo;
2724 } else {
Joonsoo Kima561ce02014-10-09 15:26:15 -07002725 stat(s, ALLOC_NODE_MISMATCH);
Vlastimil Babka0b303fb2021-05-08 02:28:02 +02002726 goto deactivate_slab;
Joonsoo Kima561ce02014-10-09 15:26:15 -07002727 }
Christoph Lameterfc59c052011-06-01 12:25:56 -05002728 }
Christoph Lameter6446faa2008-02-15 23:45:26 -08002729
Mel Gorman072bb0a2012-07-31 16:43:58 -07002730 /*
2731 * By rights, we should be searching for a slab page that was
2732 * PFMEMALLOC but right now, we are losing the pfmemalloc
2733 * information when the page leaves the per-cpu allocator
2734 */
Vlastimil Babka0b303fb2021-05-08 02:28:02 +02002735 if (unlikely(!pfmemalloc_match_unsafe(page, gfpflags)))
2736 goto deactivate_slab;
Mel Gorman072bb0a2012-07-31 16:43:58 -07002737
Vlastimil Babka0b303fb2021-05-08 02:28:02 +02002738 /* must check again c->page in case IRQ handler changed it */
2739 local_irq_save(flags);
2740 if (unlikely(page != c->page)) {
2741 local_irq_restore(flags);
2742 goto reread_page;
2743 }
Christoph Lameter6faa6832012-05-09 10:09:51 -05002744 freelist = c->freelist;
2745 if (freelist)
Eric Dumazet73736e02011-12-13 04:57:06 +01002746 goto load_freelist;
2747
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002748 freelist = get_freelist(s, page);
Christoph Lameter6446faa2008-02-15 23:45:26 -08002749
Christoph Lameter6faa6832012-05-09 10:09:51 -05002750 if (!freelist) {
Christoph Lameter03e404a2011-06-01 12:25:58 -05002751 c->page = NULL;
Vlastimil Babkafa417ab2021-05-10 13:56:17 +02002752 local_irq_restore(flags);
Christoph Lameter03e404a2011-06-01 12:25:58 -05002753 stat(s, DEACTIVATE_BYPASS);
Christoph Lameterfc59c052011-06-01 12:25:56 -05002754 goto new_slab;
Christoph Lameter03e404a2011-06-01 12:25:58 -05002755 }
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002756
Christoph Lameter81819f02007-05-06 14:49:36 -07002757 stat(s, ALLOC_REFILL);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08002758
Christoph Lameter894b8782007-05-10 03:15:16 -07002759load_freelist:
Vlastimil Babka0b303fb2021-05-08 02:28:02 +02002760
2761 lockdep_assert_irqs_disabled();
2762
Christoph Lameter507effe2012-05-09 10:09:52 -05002763 /*
2764 * freelist is pointing to the list of objects to be used.
2765 * page is pointing to the page from which the objects are obtained.
2766 * That page must be frozen for per cpu allocations to work.
2767 */
Dave Hansena0132ac2014-01-29 14:05:50 -08002768 VM_BUG_ON(!c->page->frozen);
Christoph Lameter6faa6832012-05-09 10:09:51 -05002769 c->freelist = get_freepointer(s, freelist);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002770 c->tid = next_tid(c->tid);
Vlastimil Babkae5000592021-05-07 19:32:31 +02002771 local_irq_restore(flags);
Christoph Lameter6faa6832012-05-09 10:09:51 -05002772 return freelist;
Christoph Lameter81819f02007-05-06 14:49:36 -07002773
Vlastimil Babka0b303fb2021-05-08 02:28:02 +02002774deactivate_slab:
2775
2776 local_irq_save(flags);
2777 if (page != c->page) {
2778 local_irq_restore(flags);
2779 goto reread_page;
2780 }
Vlastimil Babkaa019d202021-05-12 13:53:34 +02002781 freelist = c->freelist;
2782 c->page = NULL;
2783 c->freelist = NULL;
Vlastimil Babkafa417ab2021-05-10 13:56:17 +02002784 local_irq_restore(flags);
Vlastimil Babkacfdf8362021-05-12 14:04:43 +02002785 deactivate_slab(s, page, freelist);
Vlastimil Babka0b303fb2021-05-08 02:28:02 +02002786
Christoph Lameter81819f02007-05-06 14:49:36 -07002787new_slab:
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002788
Wei Yanga93cf072017-07-06 15:36:31 -07002789 if (slub_percpu_partial(c)) {
Vlastimil Babkafa417ab2021-05-10 13:56:17 +02002790 local_irq_save(flags);
2791 if (unlikely(c->page)) {
2792 local_irq_restore(flags);
2793 goto reread_page;
2794 }
Vlastimil Babka4b1f4492021-05-11 17:45:26 +02002795 if (unlikely(!slub_percpu_partial(c))) {
2796 local_irq_restore(flags);
Vlastimil Babkafa417ab2021-05-10 13:56:17 +02002797 goto new_objects; /* stolen by an IRQ handler */
Vlastimil Babka4b1f4492021-05-11 17:45:26 +02002798 }
Vlastimil Babkafa417ab2021-05-10 13:56:17 +02002799
Wei Yanga93cf072017-07-06 15:36:31 -07002800 page = c->page = slub_percpu_partial(c);
2801 slub_set_percpu_partial(c, page);
Vlastimil Babka0b303fb2021-05-08 02:28:02 +02002802 local_irq_restore(flags);
Christoph Lameter49e22582011-08-09 16:12:27 -05002803 stat(s, CPU_PARTIAL_ALLOC);
Christoph Lameter49e22582011-08-09 16:12:27 -05002804 goto redo;
Christoph Lameter81819f02007-05-06 14:49:36 -07002805 }
2806
Vlastimil Babkafa417ab2021-05-10 13:56:17 +02002807new_objects:
2808
Vlastimil Babka75c8ff22021-05-11 14:05:22 +02002809 freelist = get_partial(s, gfpflags, node, &page);
Vlastimil Babka3f2b77e2021-05-11 16:37:51 +02002810 if (freelist)
Vlastimil Babka2a904902021-05-11 12:45:48 +02002811 goto check_new_page;
2812
Vlastimil Babkae5000592021-05-07 19:32:31 +02002813 put_cpu_ptr(s->cpu_slab);
Vlastimil Babka53a0de02021-05-11 13:01:34 +02002814 page = new_slab(s, gfpflags, node);
Vlastimil Babkae5000592021-05-07 19:32:31 +02002815 c = get_cpu_ptr(s->cpu_slab);
Christoph Lameterb811c202007-10-16 23:25:51 -07002816
Vlastimil Babka53a0de02021-05-11 13:01:34 +02002817 if (unlikely(!page)) {
David Rientjes9a02d692014-06-04 16:06:36 -07002818 slab_out_of_memory(s, gfpflags, node);
Christoph Lameterf46974362012-05-09 10:09:54 -05002819 return NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07002820 }
Christoph Lameter894b8782007-05-10 03:15:16 -07002821
Vlastimil Babka53a0de02021-05-11 13:01:34 +02002822 /*
2823 * No other reference to the page yet so we can
2824 * muck around with it freely without cmpxchg
2825 */
2826 freelist = page->freelist;
2827 page->freelist = NULL;
2828
2829 stat(s, ALLOC_SLAB);
Vlastimil Babka53a0de02021-05-11 13:01:34 +02002830
Vlastimil Babka2a904902021-05-11 12:45:48 +02002831check_new_page:
Christoph Lameter894b8782007-05-10 03:15:16 -07002832
Vlastimil Babka1572df72021-05-11 18:25:09 +02002833 if (kmem_cache_debug(s)) {
Vlastimil Babkafa417ab2021-05-10 13:56:17 +02002834 if (!alloc_debug_processing(s, page, freelist, addr)) {
Vlastimil Babka1572df72021-05-11 18:25:09 +02002835 /* Slab failed checks. Next slab needed */
2836 goto new_slab;
Vlastimil Babkafa417ab2021-05-10 13:56:17 +02002837 } else {
Vlastimil Babka1572df72021-05-11 18:25:09 +02002838 /*
2839 * For debug case, we don't load freelist so that all
2840 * allocations go through alloc_debug_processing()
2841 */
2842 goto return_single;
Vlastimil Babkafa417ab2021-05-10 13:56:17 +02002843 }
Vlastimil Babka1572df72021-05-11 18:25:09 +02002844 }
2845
2846 if (unlikely(!pfmemalloc_match(page, gfpflags)))
2847 /*
2848 * For !pfmemalloc_match() case we don't load freelist so that
2849 * we don't make further mismatched allocations easier.
2850 */
2851 goto return_single;
2852
Vlastimil Babkacfdf8362021-05-12 14:04:43 +02002853retry_load_page:
2854
Vlastimil Babka9f101ee2021-05-11 16:56:09 +02002855 local_irq_save(flags);
Vlastimil Babkacfdf8362021-05-12 14:04:43 +02002856 if (unlikely(c->page)) {
2857 void *flush_freelist = c->freelist;
2858 struct page *flush_page = c->page;
2859
2860 c->page = NULL;
2861 c->freelist = NULL;
2862 c->tid = next_tid(c->tid);
2863
2864 local_irq_restore(flags);
2865
2866 deactivate_slab(s, flush_page, flush_freelist);
2867
2868 stat(s, CPUSLAB_FLUSH);
2869
2870 goto retry_load_page;
2871 }
Vlastimil Babka3f2b77e2021-05-11 16:37:51 +02002872 c->page = page;
2873
Vlastimil Babka1572df72021-05-11 18:25:09 +02002874 goto load_freelist;
2875
2876return_single:
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002877
Vlastimil Babkaa019d202021-05-12 13:53:34 +02002878 deactivate_slab(s, page, get_freepointer(s, freelist));
Christoph Lameter6faa6832012-05-09 10:09:51 -05002879 return freelist;
Christoph Lameter894b8782007-05-10 03:15:16 -07002880}
2881
2882/*
Vlastimil Babkae5000592021-05-07 19:32:31 +02002883 * A wrapper for ___slab_alloc() for contexts where preemption is not yet
2884 * disabled. Compensates for possible cpu changes by refetching the per cpu area
2885 * pointer.
Christoph Lametera380a3c2015-11-20 15:57:35 -08002886 */
2887static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2888 unsigned long addr, struct kmem_cache_cpu *c)
2889{
2890 void *p;
Christoph Lametera380a3c2015-11-20 15:57:35 -08002891
Vlastimil Babkae5000592021-05-07 19:32:31 +02002892#ifdef CONFIG_PREEMPT_COUNT
Christoph Lametera380a3c2015-11-20 15:57:35 -08002893 /*
2894 * We may have been preempted and rescheduled on a different
Vlastimil Babkae5000592021-05-07 19:32:31 +02002895 * cpu before disabling preemption. Need to reload cpu area
Christoph Lametera380a3c2015-11-20 15:57:35 -08002896 * pointer.
2897 */
Vlastimil Babkae5000592021-05-07 19:32:31 +02002898 c = get_cpu_ptr(s->cpu_slab);
Christoph Lametera380a3c2015-11-20 15:57:35 -08002899#endif
2900
2901 p = ___slab_alloc(s, gfpflags, node, addr, c);
Vlastimil Babkae5000592021-05-07 19:32:31 +02002902#ifdef CONFIG_PREEMPT_COUNT
2903 put_cpu_ptr(s->cpu_slab);
2904#endif
Christoph Lametera380a3c2015-11-20 15:57:35 -08002905 return p;
2906}
2907
2908/*
Alexander Potapenko0f181f92019-10-14 14:11:57 -07002909 * If the object has been wiped upon free, make sure it's fully initialized by
2910 * zeroing out freelist pointer.
2911 */
2912static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
2913 void *obj)
2914{
2915 if (unlikely(slab_want_init_on_free(s)) && obj)
Andrey Konovalovce5716c2021-01-23 21:01:38 -08002916 memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
2917 0, sizeof(void *));
Alexander Potapenko0f181f92019-10-14 14:11:57 -07002918}
2919
2920/*
Christoph Lameter894b8782007-05-10 03:15:16 -07002921 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2922 * have the fastpath folded into their functions. So no function call
2923 * overhead for requests that can be satisfied on the fastpath.
2924 *
2925 * The fastpath works by first checking if the lockless freelist can be used.
2926 * If not then __slab_alloc is called for slow processing.
2927 *
2928 * Otherwise we can simply pick the next object from the lockless free list.
2929 */
Ezequiel Garcia2b847c32012-09-08 17:47:58 -03002930static __always_inline void *slab_alloc_node(struct kmem_cache *s,
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08002931 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
Christoph Lameter894b8782007-05-10 03:15:16 -07002932{
Jesper Dangaard Brouer03ec0ed2015-11-20 15:57:52 -08002933 void *object;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002934 struct kmem_cache_cpu *c;
Christoph Lameter57d437d2012-05-09 10:09:59 -05002935 struct page *page;
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002936 unsigned long tid;
Roman Gushchin964d4bd2020-08-06 23:20:56 -07002937 struct obj_cgroup *objcg = NULL;
Andrey Konovalovda844b72021-04-29 23:00:06 -07002938 bool init = false;
Christoph Lameter1f842602008-01-07 23:20:30 -08002939
Roman Gushchin964d4bd2020-08-06 23:20:56 -07002940 s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002941 if (!s)
Akinobu Mita773ff602008-12-23 19:37:01 +09002942 return NULL;
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08002943
2944 object = kfence_alloc(s, orig_size, gfpflags);
2945 if (unlikely(object))
2946 goto out;
2947
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002948redo:
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002949 /*
2950 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2951 * enabled. We may switch back and forth between cpus while
2952 * reading from one cpu area. That does not matter as long
2953 * as we end up on the original cpu again when doing the cmpxchg.
Christoph Lameter7cccd80b2013-01-23 21:45:48 +00002954 *
Vlastimil Babka9b4bc852021-05-18 02:01:39 +02002955 * We must guarantee that tid and kmem_cache_cpu are retrieved on the
2956 * same cpu. We read first the kmem_cache_cpu pointer and use it to read
2957 * the tid. If we are preempted and switched to another cpu between the
2958 * two reads, it's OK as the two are still associated with the same cpu
2959 * and cmpxchg later will validate the cpu.
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002960 */
Vlastimil Babka9b4bc852021-05-18 02:01:39 +02002961 c = raw_cpu_ptr(s->cpu_slab);
2962 tid = READ_ONCE(c->tid);
Joonsoo Kim9aabf812015-02-10 14:09:32 -08002963
2964 /*
2965 * Irqless object alloc/free algorithm used here depends on sequence
2966 * of fetching cpu_slab's data. tid should be fetched before anything
2967 * on c to guarantee that object and page associated with previous tid
2968 * won't be used with current tid. If we fetch tid first, object and
2969 * page could be one associated with next tid and our alloc/free
2970 * request will be failed. In this case, we will retry. So, no problem.
2971 */
2972 barrier();
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002973
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002974 /*
2975 * The transaction ids are globally unique per cpu and per operation on
2976 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2977 * occurs on the right processor and that there was no operation on the
2978 * linked list in between.
2979 */
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002980
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002981 object = c->freelist;
Christoph Lameter57d437d2012-05-09 10:09:59 -05002982 page = c->page;
Laurent Dufour22e46632020-11-13 22:51:53 -08002983 if (unlikely(!object || !page || !node_match(page, node))) {
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002984 object = __slab_alloc(s, gfpflags, node, addr, c);
Dave Hansen8eae1492014-06-04 16:06:37 -07002985 } else {
Eric Dumazet0ad95002011-12-16 16:25:34 +01002986 void *next_object = get_freepointer_safe(s, object);
2987
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002988 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002989 * The cmpxchg will only match if there was no additional
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002990 * operation and if we are on the right processor.
2991 *
Chen Gangd0e0ac92013-07-15 09:05:29 +08002992 * The cmpxchg does the following atomically (without lock
2993 * semantics!)
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002994 * 1. Relocate first pointer to the current per cpu area.
2995 * 2. Verify that tid and freelist have not been changed
2996 * 3. If they were not changed replace tid and freelist
2997 *
Chen Gangd0e0ac92013-07-15 09:05:29 +08002998 * Since this is without lock semantics the protection is only
2999 * against code executing on this cpu *not* from access by
3000 * other cpus.
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06003001 */
Christoph Lameter933393f2011-12-22 11:58:51 -06003002 if (unlikely(!this_cpu_cmpxchg_double(
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06003003 s->cpu_slab->freelist, s->cpu_slab->tid,
3004 object, tid,
Eric Dumazet0ad95002011-12-16 16:25:34 +01003005 next_object, next_tid(tid)))) {
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06003006
3007 note_cmpxchg_failure("slab_alloc", s, tid);
3008 goto redo;
3009 }
Eric Dumazet0ad95002011-12-16 16:25:34 +01003010 prefetch_freepointer(s, next_object);
Christoph Lameter84e554e62009-12-18 16:26:23 -06003011 stat(s, ALLOC_FASTPATH);
Christoph Lameter894b8782007-05-10 03:15:16 -07003012 }
Alexander Potapenko0f181f92019-10-14 14:11:57 -07003013
Andrey Konovalovce5716c2021-01-23 21:01:38 -08003014 maybe_wipe_obj_freeptr(s, object);
Andrey Konovalovda844b72021-04-29 23:00:06 -07003015 init = slab_want_init_on_alloc(gfpflags, s);
Christoph Lameterd07dbea2007-07-17 04:03:23 -07003016
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08003017out:
Andrey Konovalovda844b72021-04-29 23:00:06 -07003018 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init);
Vegard Nossum5a896d92008-04-04 00:54:48 +02003019
Christoph Lameter894b8782007-05-10 03:15:16 -07003020 return object;
Christoph Lameter81819f02007-05-06 14:49:36 -07003021}
3022
Ezequiel Garcia2b847c32012-09-08 17:47:58 -03003023static __always_inline void *slab_alloc(struct kmem_cache *s,
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08003024 gfp_t gfpflags, unsigned long addr, size_t orig_size)
Ezequiel Garcia2b847c32012-09-08 17:47:58 -03003025{
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08003026 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size);
Ezequiel Garcia2b847c32012-09-08 17:47:58 -03003027}
3028
Christoph Lameter81819f02007-05-06 14:49:36 -07003029void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
3030{
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08003031 void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003032
Chen Gangd0e0ac92013-07-15 09:05:29 +08003033 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
3034 s->size, gfpflags);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003035
3036 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003037}
3038EXPORT_SYMBOL(kmem_cache_alloc);
3039
Li Zefan0f24f122009-12-11 15:45:30 +08003040#ifdef CONFIG_TRACING
Richard Kennedy4a923792010-10-21 10:29:19 +01003041void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003042{
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08003043 void *ret = slab_alloc(s, gfpflags, _RET_IP_, size);
Richard Kennedy4a923792010-10-21 10:29:19 +01003044 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
Andrey Konovalov01165232018-12-28 00:29:37 -08003045 ret = kasan_kmalloc(s, ret, size, gfpflags);
Richard Kennedy4a923792010-10-21 10:29:19 +01003046 return ret;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003047}
Richard Kennedy4a923792010-10-21 10:29:19 +01003048EXPORT_SYMBOL(kmem_cache_alloc_trace);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003049#endif
3050
Christoph Lameter81819f02007-05-06 14:49:36 -07003051#ifdef CONFIG_NUMA
3052void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
3053{
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08003054 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003055
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003056 trace_kmem_cache_alloc_node(_RET_IP_, ret,
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003057 s->object_size, s->size, gfpflags, node);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003058
3059 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003060}
3061EXPORT_SYMBOL(kmem_cache_alloc_node);
Christoph Lameter81819f02007-05-06 14:49:36 -07003062
Li Zefan0f24f122009-12-11 15:45:30 +08003063#ifdef CONFIG_TRACING
Richard Kennedy4a923792010-10-21 10:29:19 +01003064void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003065 gfp_t gfpflags,
Richard Kennedy4a923792010-10-21 10:29:19 +01003066 int node, size_t size)
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003067{
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08003068 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size);
Richard Kennedy4a923792010-10-21 10:29:19 +01003069
3070 trace_kmalloc_node(_RET_IP_, ret,
3071 size, s->size, gfpflags, node);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08003072
Andrey Konovalov01165232018-12-28 00:29:37 -08003073 ret = kasan_kmalloc(s, ret, size, gfpflags);
Richard Kennedy4a923792010-10-21 10:29:19 +01003074 return ret;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003075}
Richard Kennedy4a923792010-10-21 10:29:19 +01003076EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003077#endif
Tobin C. Harding6dfd1b62019-05-13 17:16:09 -07003078#endif /* CONFIG_NUMA */
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003079
Christoph Lameter81819f02007-05-06 14:49:36 -07003080/*
Kim Phillips94e4d712015-02-10 14:09:37 -08003081 * Slow path handling. This may still be called frequently since objects
Christoph Lameter894b8782007-05-10 03:15:16 -07003082 * have a longer lifetime than the cpu slabs in most processing loads.
Christoph Lameter81819f02007-05-06 14:49:36 -07003083 *
Christoph Lameter894b8782007-05-10 03:15:16 -07003084 * So we still attempt to reduce cache line usage. Just take the slab
3085 * lock and free the item. If there is no additional partial page
3086 * handling required then we can return immediately.
Christoph Lameter81819f02007-05-06 14:49:36 -07003087 */
Christoph Lameter894b8782007-05-10 03:15:16 -07003088static void __slab_free(struct kmem_cache *s, struct page *page,
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08003089 void *head, void *tail, int cnt,
3090 unsigned long addr)
3091
Christoph Lameter81819f02007-05-06 14:49:36 -07003092{
3093 void *prior;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05003094 int was_frozen;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05003095 struct page new;
3096 unsigned long counters;
3097 struct kmem_cache_node *n = NULL;
Kees Cook3f649ab2020-06-03 13:09:38 -07003098 unsigned long flags;
Christoph Lameter81819f02007-05-06 14:49:36 -07003099
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06003100 stat(s, FREE_SLOWPATH);
Christoph Lameter81819f02007-05-06 14:49:36 -07003101
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08003102 if (kfence_free(head))
3103 return;
3104
Christoph Lameter19c7ff92012-05-30 12:54:46 -05003105 if (kmem_cache_debug(s) &&
Laura Abbott282acb42016-03-15 14:54:59 -07003106 !free_debug_processing(s, page, head, tail, cnt, addr))
Christoph Lameter80f08c12011-06-01 12:25:55 -05003107 return;
Christoph Lameter6446faa2008-02-15 23:45:26 -08003108
Christoph Lameter2cfb7452011-06-01 12:25:52 -05003109 do {
Joonsoo Kim837d6782012-08-16 00:02:40 +09003110 if (unlikely(n)) {
3111 spin_unlock_irqrestore(&n->list_lock, flags);
3112 n = NULL;
3113 }
Christoph Lameter2cfb7452011-06-01 12:25:52 -05003114 prior = page->freelist;
3115 counters = page->counters;
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08003116 set_freepointer(s, tail, prior);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05003117 new.counters = counters;
3118 was_frozen = new.frozen;
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08003119 new.inuse -= cnt;
Joonsoo Kim837d6782012-08-16 00:02:40 +09003120 if ((!new.inuse || !prior) && !was_frozen) {
Christoph Lameter49e22582011-08-09 16:12:27 -05003121
Peter Zijlstrac65c1872014-01-10 13:23:49 +01003122 if (kmem_cache_has_cpu_partial(s) && !prior) {
Christoph Lameter49e22582011-08-09 16:12:27 -05003123
3124 /*
Chen Gangd0e0ac92013-07-15 09:05:29 +08003125 * Slab was on no list before and will be
3126 * partially empty
3127 * We can defer the list move and instead
3128 * freeze it.
Christoph Lameter49e22582011-08-09 16:12:27 -05003129 */
3130 new.frozen = 1;
3131
Peter Zijlstrac65c1872014-01-10 13:23:49 +01003132 } else { /* Needs to be taken off a list */
Christoph Lameter49e22582011-08-09 16:12:27 -05003133
LQYMGTb455def2014-12-10 15:42:13 -08003134 n = get_node(s, page_to_nid(page));
Christoph Lameter49e22582011-08-09 16:12:27 -05003135 /*
3136 * Speculatively acquire the list_lock.
3137 * If the cmpxchg does not succeed then we may
3138 * drop the list_lock without any processing.
3139 *
3140 * Otherwise the list_lock will synchronize with
3141 * other processors updating the list of slabs.
3142 */
3143 spin_lock_irqsave(&n->list_lock, flags);
3144
3145 }
Christoph Lameter2cfb7452011-06-01 12:25:52 -05003146 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003147
Christoph Lameter2cfb7452011-06-01 12:25:52 -05003148 } while (!cmpxchg_double_slab(s, page,
3149 prior, counters,
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08003150 head, new.counters,
Christoph Lameter2cfb7452011-06-01 12:25:52 -05003151 "__slab_free"));
Christoph Lameter81819f02007-05-06 14:49:36 -07003152
Christoph Lameter2cfb7452011-06-01 12:25:52 -05003153 if (likely(!n)) {
Christoph Lameter49e22582011-08-09 16:12:27 -05003154
Abel Wuc270cf32020-10-13 16:48:40 -07003155 if (likely(was_frozen)) {
3156 /*
3157 * The list lock was not taken therefore no list
3158 * activity can be necessary.
3159 */
3160 stat(s, FREE_FROZEN);
3161 } else if (new.frozen) {
3162 /*
3163 * If we just froze the page then put it onto the
3164 * per cpu partial list.
3165 */
Christoph Lameter49e22582011-08-09 16:12:27 -05003166 put_cpu_partial(s, page, 1);
Alex Shi8028dce2012-02-03 23:34:56 +08003167 stat(s, CPU_PARTIAL_FREE);
3168 }
Abel Wuc270cf32020-10-13 16:48:40 -07003169
LQYMGTb455def2014-12-10 15:42:13 -08003170 return;
3171 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003172
Joonsoo Kim8a5b20a2014-07-02 15:22:35 -07003173 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
Joonsoo Kim837d6782012-08-16 00:02:40 +09003174 goto slab_empty;
Christoph Lameter81819f02007-05-06 14:49:36 -07003175
Joonsoo Kim837d6782012-08-16 00:02:40 +09003176 /*
3177 * Objects left in the slab. If it was not on the partial list before
3178 * then add it.
3179 */
Joonsoo Kim345c9052013-06-19 14:05:52 +09003180 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
Liu Xianga4d3f892019-05-13 17:16:22 -07003181 remove_full(s, n, page);
Joonsoo Kim837d6782012-08-16 00:02:40 +09003182 add_partial(n, page, DEACTIVATE_TO_TAIL);
3183 stat(s, FREE_ADD_PARTIAL);
Christoph Lameter81819f02007-05-06 14:49:36 -07003184 }
Christoph Lameter80f08c12011-06-01 12:25:55 -05003185 spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter81819f02007-05-06 14:49:36 -07003186 return;
3187
3188slab_empty:
Christoph Lametera973e9d2008-03-01 13:40:44 -08003189 if (prior) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003190 /*
Christoph Lameter6fbabb22011-08-08 11:16:56 -05003191 * Slab on the partial list.
Christoph Lameter81819f02007-05-06 14:49:36 -07003192 */
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05003193 remove_partial(n, page);
Christoph Lameter84e554e62009-12-18 16:26:23 -06003194 stat(s, FREE_REMOVE_PARTIAL);
Peter Zijlstrac65c1872014-01-10 13:23:49 +01003195 } else {
Christoph Lameter6fbabb22011-08-08 11:16:56 -05003196 /* Slab must be on the full list */
Peter Zijlstrac65c1872014-01-10 13:23:49 +01003197 remove_full(s, n, page);
3198 }
Christoph Lameter2cfb7452011-06-01 12:25:52 -05003199
Christoph Lameter80f08c12011-06-01 12:25:55 -05003200 spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter84e554e62009-12-18 16:26:23 -06003201 stat(s, FREE_SLAB);
Christoph Lameter81819f02007-05-06 14:49:36 -07003202 discard_slab(s, page);
Christoph Lameter81819f02007-05-06 14:49:36 -07003203}
3204
Christoph Lameter894b8782007-05-10 03:15:16 -07003205/*
3206 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
3207 * can perform fastpath freeing without additional function calls.
3208 *
3209 * The fastpath is only possible if we are freeing to the current cpu slab
3210 * of this processor. This typically the case if we have just allocated
3211 * the item before.
3212 *
3213 * If fastpath is not possible then fall back to __slab_free where we deal
3214 * with all sorts of special processing.
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08003215 *
3216 * Bulk free of a freelist with several objects (all pointing to the
3217 * same page) possible by specifying head and tail ptr, plus objects
3218 * count (cnt). Bulk free indicated by tail pointer being set.
Christoph Lameter894b8782007-05-10 03:15:16 -07003219 */
Alexander Potapenko80a92012016-07-28 15:49:07 -07003220static __always_inline void do_slab_free(struct kmem_cache *s,
3221 struct page *page, void *head, void *tail,
3222 int cnt, unsigned long addr)
Christoph Lameter894b8782007-05-10 03:15:16 -07003223{
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08003224 void *tail_obj = tail ? : head;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07003225 struct kmem_cache_cpu *c;
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06003226 unsigned long tid;
Roman Gushchin964d4bd2020-08-06 23:20:56 -07003227
Bharata B Raod1b2cf62020-10-13 16:53:09 -07003228 memcg_slab_free_hook(s, &head, 1);
Christoph Lametera24c5a02011-03-15 12:45:21 -05003229redo:
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06003230 /*
3231 * Determine the currently cpus per cpu slab.
3232 * The cpu may change afterward. However that does not matter since
3233 * data is retrieved via this pointer. If we are on the same cpu
Jesper Dangaard Brouer2ae44002015-09-04 15:45:31 -07003234 * during the cmpxchg then the free will succeed.
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06003235 */
Vlastimil Babka9b4bc852021-05-18 02:01:39 +02003236 c = raw_cpu_ptr(s->cpu_slab);
3237 tid = READ_ONCE(c->tid);
Christoph Lameterc016b0b2010-08-20 12:37:16 -05003238
Joonsoo Kim9aabf812015-02-10 14:09:32 -08003239 /* Same with comment on barrier() in slab_alloc_node() */
3240 barrier();
Christoph Lameterc016b0b2010-08-20 12:37:16 -05003241
Christoph Lameter442b06b2011-05-17 16:29:31 -05003242 if (likely(page == c->page)) {
Linus Torvalds50761902020-03-17 11:04:09 -07003243 void **freelist = READ_ONCE(c->freelist);
3244
3245 set_freepointer(s, tail_obj, freelist);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06003246
Christoph Lameter933393f2011-12-22 11:58:51 -06003247 if (unlikely(!this_cpu_cmpxchg_double(
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06003248 s->cpu_slab->freelist, s->cpu_slab->tid,
Linus Torvalds50761902020-03-17 11:04:09 -07003249 freelist, tid,
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08003250 head, next_tid(tid)))) {
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06003251
3252 note_cmpxchg_failure("slab_free", s, tid);
3253 goto redo;
3254 }
Christoph Lameter84e554e62009-12-18 16:26:23 -06003255 stat(s, FREE_FASTPATH);
Christoph Lameter894b8782007-05-10 03:15:16 -07003256 } else
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08003257 __slab_free(s, page, head, tail_obj, cnt, addr);
Christoph Lameter894b8782007-05-10 03:15:16 -07003258
Christoph Lameter894b8782007-05-10 03:15:16 -07003259}
3260
Alexander Potapenko80a92012016-07-28 15:49:07 -07003261static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
3262 void *head, void *tail, int cnt,
3263 unsigned long addr)
3264{
Alexander Potapenko80a92012016-07-28 15:49:07 -07003265 /*
Andrey Konovalovc3895392018-04-10 16:30:31 -07003266 * With KASAN enabled slab_free_freelist_hook modifies the freelist
3267 * to remove objects, whose reuse must be delayed.
Alexander Potapenko80a92012016-07-28 15:49:07 -07003268 */
Andrey Konovalovc3895392018-04-10 16:30:31 -07003269 if (slab_free_freelist_hook(s, &head, &tail))
3270 do_slab_free(s, page, head, tail, cnt, addr);
Alexander Potapenko80a92012016-07-28 15:49:07 -07003271}
3272
Andrey Konovalov2bd926b2018-12-28 00:29:53 -08003273#ifdef CONFIG_KASAN_GENERIC
Alexander Potapenko80a92012016-07-28 15:49:07 -07003274void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
3275{
3276 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
3277}
3278#endif
3279
Christoph Lameter81819f02007-05-06 14:49:36 -07003280void kmem_cache_free(struct kmem_cache *s, void *x)
3281{
Glauber Costab9ce5ef2012-12-18 14:22:46 -08003282 s = cache_from_obj(s, x);
3283 if (!s)
Christoph Lameter79576102012-09-04 23:06:14 +00003284 return;
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08003285 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
Jacob Wen3544de8e2021-02-24 12:00:55 -08003286 trace_kmem_cache_free(_RET_IP_, x, s->name);
Christoph Lameter81819f02007-05-06 14:49:36 -07003287}
3288EXPORT_SYMBOL(kmem_cache_free);
3289
Jesper Dangaard Brouerd0ecd892015-11-20 15:57:49 -08003290struct detached_freelist {
3291 struct page *page;
3292 void *tail;
3293 void *freelist;
3294 int cnt;
Jesper Dangaard Brouer376bf122016-03-15 14:53:32 -07003295 struct kmem_cache *s;
Jesper Dangaard Brouerd0ecd892015-11-20 15:57:49 -08003296};
3297
Shakeel Butt1ed7ce52021-08-13 16:54:31 -07003298static inline void free_nonslab_page(struct page *page, void *object)
Shakeel Buttf227f0f2021-07-29 14:53:50 -07003299{
3300 unsigned int order = compound_order(page);
3301
3302 VM_BUG_ON_PAGE(!PageCompound(page), page);
Shakeel Butt1ed7ce52021-08-13 16:54:31 -07003303 kfree_hook(object);
Shakeel Buttf227f0f2021-07-29 14:53:50 -07003304 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
3305 __free_pages(page, order);
3306}
3307
Jesper Dangaard Brouerd0ecd892015-11-20 15:57:49 -08003308/*
3309 * This function progressively scans the array with free objects (with
3310 * a limited look ahead) and extract objects belonging to the same
3311 * page. It builds a detached freelist directly within the given
3312 * page/objects. This can happen without any need for
3313 * synchronization, because the objects are owned by running process.
3314 * The freelist is build up as a single linked list in the objects.
3315 * The idea is, that this detached freelist can then be bulk
3316 * transferred to the real freelist(s), but only requiring a single
3317 * synchronization primitive. Look ahead in the array is limited due
3318 * to performance reasons.
3319 */
Jesper Dangaard Brouer376bf122016-03-15 14:53:32 -07003320static inline
3321int build_detached_freelist(struct kmem_cache *s, size_t size,
3322 void **p, struct detached_freelist *df)
Jesper Dangaard Brouerd0ecd892015-11-20 15:57:49 -08003323{
3324 size_t first_skipped_index = 0;
3325 int lookahead = 3;
3326 void *object;
Jesper Dangaard Brouerca257192016-03-15 14:54:00 -07003327 struct page *page;
Jesper Dangaard Brouerd0ecd892015-11-20 15:57:49 -08003328
3329 /* Always re-init detached_freelist */
3330 df->page = NULL;
3331
3332 do {
3333 object = p[--size];
Jesper Dangaard Brouerca257192016-03-15 14:54:00 -07003334 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
Jesper Dangaard Brouerd0ecd892015-11-20 15:57:49 -08003335 } while (!object && size);
3336
3337 if (!object)
3338 return 0;
3339
Jesper Dangaard Brouerca257192016-03-15 14:54:00 -07003340 page = virt_to_head_page(object);
3341 if (!s) {
3342 /* Handle kalloc'ed objects */
3343 if (unlikely(!PageSlab(page))) {
Shakeel Butt1ed7ce52021-08-13 16:54:31 -07003344 free_nonslab_page(page, object);
Jesper Dangaard Brouerca257192016-03-15 14:54:00 -07003345 p[size] = NULL; /* mark object processed */
3346 return size;
3347 }
3348 /* Derive kmem_cache from object */
3349 df->s = page->slab_cache;
3350 } else {
3351 df->s = cache_from_obj(s, object); /* Support for memcg */
3352 }
Jesper Dangaard Brouer376bf122016-03-15 14:53:32 -07003353
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08003354 if (is_kfence_address(object)) {
Andrey Konovalovd57a9642021-04-29 23:00:09 -07003355 slab_free_hook(df->s, object, false);
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08003356 __kfence_free(object);
3357 p[size] = NULL; /* mark object processed */
3358 return size;
3359 }
3360
Jesper Dangaard Brouerd0ecd892015-11-20 15:57:49 -08003361 /* Start new detached freelist */
Jesper Dangaard Brouerca257192016-03-15 14:54:00 -07003362 df->page = page;
Jesper Dangaard Brouer376bf122016-03-15 14:53:32 -07003363 set_freepointer(df->s, object, NULL);
Jesper Dangaard Brouerd0ecd892015-11-20 15:57:49 -08003364 df->tail = object;
3365 df->freelist = object;
3366 p[size] = NULL; /* mark object processed */
3367 df->cnt = 1;
3368
3369 while (size) {
3370 object = p[--size];
3371 if (!object)
3372 continue; /* Skip processed objects */
3373
3374 /* df->page is always set at this point */
3375 if (df->page == virt_to_head_page(object)) {
3376 /* Opportunity build freelist */
Jesper Dangaard Brouer376bf122016-03-15 14:53:32 -07003377 set_freepointer(df->s, object, df->freelist);
Jesper Dangaard Brouerd0ecd892015-11-20 15:57:49 -08003378 df->freelist = object;
3379 df->cnt++;
3380 p[size] = NULL; /* mark object processed */
3381
3382 continue;
3383 }
3384
3385 /* Limit look ahead search */
3386 if (!--lookahead)
3387 break;
3388
3389 if (!first_skipped_index)
3390 first_skipped_index = size + 1;
3391 }
3392
3393 return first_skipped_index;
3394}
3395
Jesper Dangaard Brouer994eb762015-09-04 15:45:37 -07003396/* Note that interrupts must be enabled when calling this function. */
Jesper Dangaard Brouer376bf122016-03-15 14:53:32 -07003397void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
Christoph Lameter484748f2015-09-04 15:45:34 -07003398{
Jesper Dangaard Brouerd0ecd892015-11-20 15:57:49 -08003399 if (WARN_ON(!size))
3400 return;
Jesper Dangaard Brouerfbd02632015-09-04 15:45:43 -07003401
Bharata B Raod1b2cf62020-10-13 16:53:09 -07003402 memcg_slab_free_hook(s, p, size);
Jesper Dangaard Brouerd0ecd892015-11-20 15:57:49 -08003403 do {
3404 struct detached_freelist df;
Jesper Dangaard Brouerfbd02632015-09-04 15:45:43 -07003405
Jesper Dangaard Brouerd0ecd892015-11-20 15:57:49 -08003406 size = build_detached_freelist(s, size, p, &df);
Arnd Bergmann84582c82016-12-12 16:41:35 -08003407 if (!df.page)
Jesper Dangaard Brouerd0ecd892015-11-20 15:57:49 -08003408 continue;
Jesper Dangaard Brouerfbd02632015-09-04 15:45:43 -07003409
Zhiyuan Dai457c82c2021-02-24 12:01:26 -08003410 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
Jesper Dangaard Brouerd0ecd892015-11-20 15:57:49 -08003411 } while (likely(size));
Christoph Lameter484748f2015-09-04 15:45:34 -07003412}
3413EXPORT_SYMBOL(kmem_cache_free_bulk);
3414
Jesper Dangaard Brouer994eb762015-09-04 15:45:37 -07003415/* Note that interrupts must be enabled when calling this function. */
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -08003416int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3417 void **p)
Christoph Lameter484748f2015-09-04 15:45:34 -07003418{
Jesper Dangaard Brouer994eb762015-09-04 15:45:37 -07003419 struct kmem_cache_cpu *c;
3420 int i;
Roman Gushchin964d4bd2020-08-06 23:20:56 -07003421 struct obj_cgroup *objcg = NULL;
Jesper Dangaard Brouer994eb762015-09-04 15:45:37 -07003422
Jesper Dangaard Brouer03ec0ed2015-11-20 15:57:52 -08003423 /* memcg and kmem_cache debug support */
Roman Gushchin964d4bd2020-08-06 23:20:56 -07003424 s = slab_pre_alloc_hook(s, &objcg, size, flags);
Jesper Dangaard Brouer03ec0ed2015-11-20 15:57:52 -08003425 if (unlikely(!s))
3426 return false;
Jesper Dangaard Brouer994eb762015-09-04 15:45:37 -07003427 /*
3428 * Drain objects in the per cpu slab, while disabling local
3429 * IRQs, which protects against PREEMPT and interrupts
3430 * handlers invoking normal fastpath.
3431 */
Vlastimil Babkae5000592021-05-07 19:32:31 +02003432 c = get_cpu_ptr(s->cpu_slab);
Jesper Dangaard Brouer994eb762015-09-04 15:45:37 -07003433 local_irq_disable();
Jesper Dangaard Brouer994eb762015-09-04 15:45:37 -07003434
3435 for (i = 0; i < size; i++) {
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08003436 void *object = kfence_alloc(s, s->object_size, flags);
Jesper Dangaard Brouer994eb762015-09-04 15:45:37 -07003437
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08003438 if (unlikely(object)) {
3439 p[i] = object;
3440 continue;
3441 }
3442
3443 object = c->freelist;
Jesper Dangaard Brouerebe909e2015-09-04 15:45:40 -07003444 if (unlikely(!object)) {
Jesper Dangaard Brouerebe909e2015-09-04 15:45:40 -07003445 /*
Jann Hornfd4d9c72020-03-17 01:28:45 +01003446 * We may have removed an object from c->freelist using
3447 * the fastpath in the previous iteration; in that case,
3448 * c->tid has not been bumped yet.
3449 * Since ___slab_alloc() may reenable interrupts while
3450 * allocating memory, we should bump c->tid now.
3451 */
3452 c->tid = next_tid(c->tid);
3453
Vlastimil Babkae5000592021-05-07 19:32:31 +02003454 local_irq_enable();
3455
Jann Hornfd4d9c72020-03-17 01:28:45 +01003456 /*
Jesper Dangaard Brouerebe909e2015-09-04 15:45:40 -07003457 * Invoking slow path likely have side-effect
3458 * of re-populating per CPU c->freelist
3459 */
Christoph Lameter87098372015-11-20 15:57:38 -08003460 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
Jesper Dangaard Brouerebe909e2015-09-04 15:45:40 -07003461 _RET_IP_, c);
Christoph Lameter87098372015-11-20 15:57:38 -08003462 if (unlikely(!p[i]))
3463 goto error;
3464
Jesper Dangaard Brouerebe909e2015-09-04 15:45:40 -07003465 c = this_cpu_ptr(s->cpu_slab);
Alexander Potapenko0f181f92019-10-14 14:11:57 -07003466 maybe_wipe_obj_freeptr(s, p[i]);
3467
Vlastimil Babkae5000592021-05-07 19:32:31 +02003468 local_irq_disable();
3469
Jesper Dangaard Brouerebe909e2015-09-04 15:45:40 -07003470 continue; /* goto for-loop */
3471 }
Jesper Dangaard Brouer994eb762015-09-04 15:45:37 -07003472 c->freelist = get_freepointer(s, object);
3473 p[i] = object;
Alexander Potapenko0f181f92019-10-14 14:11:57 -07003474 maybe_wipe_obj_freeptr(s, p[i]);
Jesper Dangaard Brouer994eb762015-09-04 15:45:37 -07003475 }
3476 c->tid = next_tid(c->tid);
3477 local_irq_enable();
Vlastimil Babkae5000592021-05-07 19:32:31 +02003478 put_cpu_ptr(s->cpu_slab);
Jesper Dangaard Brouer994eb762015-09-04 15:45:37 -07003479
Andrey Konovalovda844b72021-04-29 23:00:06 -07003480 /*
3481 * memcg and kmem_cache debug support and memory initialization.
3482 * Done outside of the IRQ disabled fastpath loop.
3483 */
3484 slab_post_alloc_hook(s, objcg, flags, size, p,
3485 slab_want_init_on_alloc(flags, s));
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -08003486 return i;
Christoph Lameter87098372015-11-20 15:57:38 -08003487error:
Vlastimil Babkae5000592021-05-07 19:32:31 +02003488 put_cpu_ptr(s->cpu_slab);
Andrey Konovalovda844b72021-04-29 23:00:06 -07003489 slab_post_alloc_hook(s, objcg, flags, i, p, false);
Jesper Dangaard Brouer03ec0ed2015-11-20 15:57:52 -08003490 __kmem_cache_free_bulk(s, i, p);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -08003491 return 0;
Christoph Lameter484748f2015-09-04 15:45:34 -07003492}
3493EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3494
3495
Christoph Lameter81819f02007-05-06 14:49:36 -07003496/*
Christoph Lameter672bba32007-05-09 02:32:39 -07003497 * Object placement in a slab is made very easy because we always start at
3498 * offset 0. If we tune the size of the object to the alignment then we can
3499 * get the required alignment by putting one properly sized object after
3500 * another.
Christoph Lameter81819f02007-05-06 14:49:36 -07003501 *
3502 * Notice that the allocation order determines the sizes of the per cpu
3503 * caches. Each processor has always one slab available for allocations.
3504 * Increasing the allocation order reduces the number of times that slabs
Christoph Lameter672bba32007-05-09 02:32:39 -07003505 * must be moved on and off the partial lists and is therefore a factor in
Christoph Lameter81819f02007-05-06 14:49:36 -07003506 * locking overhead.
Christoph Lameter81819f02007-05-06 14:49:36 -07003507 */
3508
3509/*
Ingo Molnarf0953a12021-05-06 18:06:47 -07003510 * Minimum / Maximum order of slab pages. This influences locking overhead
Christoph Lameter81819f02007-05-06 14:49:36 -07003511 * and slab fragmentation. A higher order reduces the number of partial slabs
3512 * and increases the number of allocations possible without having to
3513 * take the list_lock.
3514 */
Alexey Dobriyan19af27a2018-04-05 16:21:39 -07003515static unsigned int slub_min_order;
3516static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
3517static unsigned int slub_min_objects;
Christoph Lameter81819f02007-05-06 14:49:36 -07003518
3519/*
Christoph Lameter81819f02007-05-06 14:49:36 -07003520 * Calculate the order of allocation given an slab object size.
3521 *
Christoph Lameter672bba32007-05-09 02:32:39 -07003522 * The order of allocation has significant impact on performance and other
3523 * system components. Generally order 0 allocations should be preferred since
3524 * order 0 does not cause fragmentation in the page allocator. Larger objects
3525 * be problematic to put into order 0 slabs because there may be too much
Christoph Lameterc124f5b2008-04-14 19:13:29 +03003526 * unused space left. We go to a higher order if more than 1/16th of the slab
Christoph Lameter672bba32007-05-09 02:32:39 -07003527 * would be wasted.
Christoph Lameter81819f02007-05-06 14:49:36 -07003528 *
Christoph Lameter672bba32007-05-09 02:32:39 -07003529 * In order to reach satisfactory performance we must ensure that a minimum
3530 * number of objects is in one slab. Otherwise we may generate too much
3531 * activity on the partial lists which requires taking the list_lock. This is
3532 * less a concern for large slabs though which are rarely used.
Christoph Lameter81819f02007-05-06 14:49:36 -07003533 *
Christoph Lameter672bba32007-05-09 02:32:39 -07003534 * slub_max_order specifies the order where we begin to stop considering the
3535 * number of objects in a slab as critical. If we reach slub_max_order then
3536 * we try to keep the page order as low as possible. So we accept more waste
3537 * of space in favor of a small page order.
3538 *
3539 * Higher order allocations also allow the placement of more objects in a
3540 * slab and thereby reduce object handling overhead. If the user has
Bhaskar Chowdhurydc842072021-04-29 22:54:51 -07003541 * requested a higher minimum order then we start with that one instead of
Christoph Lameter672bba32007-05-09 02:32:39 -07003542 * the smallest order which will fit the object.
Christoph Lameter81819f02007-05-06 14:49:36 -07003543 */
Alexey Dobriyan19af27a2018-04-05 16:21:39 -07003544static inline unsigned int slab_order(unsigned int size,
3545 unsigned int min_objects, unsigned int max_order,
Matthew Wilcox9736d2a2018-06-07 17:09:10 -07003546 unsigned int fract_leftover)
Christoph Lameter81819f02007-05-06 14:49:36 -07003547{
Alexey Dobriyan19af27a2018-04-05 16:21:39 -07003548 unsigned int min_order = slub_min_order;
3549 unsigned int order;
Christoph Lameter81819f02007-05-06 14:49:36 -07003550
Matthew Wilcox9736d2a2018-06-07 17:09:10 -07003551 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +04003552 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
Christoph Lameter39b26462008-04-14 19:11:30 +03003553
Matthew Wilcox9736d2a2018-06-07 17:09:10 -07003554 for (order = max(min_order, (unsigned int)get_order(min_objects * size));
Christoph Lameter5e6d4442007-05-09 02:32:46 -07003555 order <= max_order; order++) {
3556
Alexey Dobriyan19af27a2018-04-05 16:21:39 -07003557 unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
3558 unsigned int rem;
Christoph Lameter81819f02007-05-06 14:49:36 -07003559
Matthew Wilcox9736d2a2018-06-07 17:09:10 -07003560 rem = slab_size % size;
Christoph Lameter81819f02007-05-06 14:49:36 -07003561
Christoph Lameter5e6d4442007-05-09 02:32:46 -07003562 if (rem <= slab_size / fract_leftover)
Christoph Lameter81819f02007-05-06 14:49:36 -07003563 break;
Christoph Lameter81819f02007-05-06 14:49:36 -07003564 }
Christoph Lameter672bba32007-05-09 02:32:39 -07003565
Christoph Lameter81819f02007-05-06 14:49:36 -07003566 return order;
3567}
3568
Matthew Wilcox9736d2a2018-06-07 17:09:10 -07003569static inline int calculate_order(unsigned int size)
Christoph Lameter5e6d4442007-05-09 02:32:46 -07003570{
Alexey Dobriyan19af27a2018-04-05 16:21:39 -07003571 unsigned int order;
3572 unsigned int min_objects;
3573 unsigned int max_objects;
Vlastimil Babka32862222021-02-09 13:42:32 -08003574 unsigned int nr_cpus;
Christoph Lameter5e6d4442007-05-09 02:32:46 -07003575
3576 /*
3577 * Attempt to find best configuration for a slab. This
3578 * works by first attempting to generate a layout with
3579 * the best configuration and backing off gradually.
3580 *
Wei Yang422ff4d2015-11-05 18:45:46 -08003581 * First we increase the acceptable waste in a slab. Then
Christoph Lameter5e6d4442007-05-09 02:32:46 -07003582 * we reduce the minimum objects required in a slab.
3583 */
3584 min_objects = slub_min_objects;
Vlastimil Babka32862222021-02-09 13:42:32 -08003585 if (!min_objects) {
3586 /*
3587 * Some architectures will only update present cpus when
3588 * onlining them, so don't trust the number if it's just 1. But
3589 * we also don't want to use nr_cpu_ids always, as on some other
3590 * architectures, there can be many possible cpus, but never
3591 * onlined. Here we compromise between trying to avoid too high
3592 * order on systems that appear larger than they are, and too
3593 * low order on systems that appear smaller than they are.
3594 */
3595 nr_cpus = num_present_cpus();
3596 if (nr_cpus <= 1)
3597 nr_cpus = nr_cpu_ids;
3598 min_objects = 4 * (fls(nr_cpus) + 1);
3599 }
Matthew Wilcox9736d2a2018-06-07 17:09:10 -07003600 max_objects = order_objects(slub_max_order, size);
Zhang Yanmine8120ff2009-02-12 18:00:17 +02003601 min_objects = min(min_objects, max_objects);
3602
Christoph Lameter5e6d4442007-05-09 02:32:46 -07003603 while (min_objects > 1) {
Alexey Dobriyan19af27a2018-04-05 16:21:39 -07003604 unsigned int fraction;
3605
Christoph Lameterc124f5b2008-04-14 19:13:29 +03003606 fraction = 16;
Christoph Lameter5e6d4442007-05-09 02:32:46 -07003607 while (fraction >= 4) {
3608 order = slab_order(size, min_objects,
Matthew Wilcox9736d2a2018-06-07 17:09:10 -07003609 slub_max_order, fraction);
Christoph Lameter5e6d4442007-05-09 02:32:46 -07003610 if (order <= slub_max_order)
3611 return order;
3612 fraction /= 2;
3613 }
Amerigo Wang5086c389c2009-08-19 21:44:13 +03003614 min_objects--;
Christoph Lameter5e6d4442007-05-09 02:32:46 -07003615 }
3616
3617 /*
3618 * We were unable to place multiple objects in a slab. Now
3619 * lets see if we can place a single object there.
3620 */
Matthew Wilcox9736d2a2018-06-07 17:09:10 -07003621 order = slab_order(size, 1, slub_max_order, 1);
Christoph Lameter5e6d4442007-05-09 02:32:46 -07003622 if (order <= slub_max_order)
3623 return order;
3624
3625 /*
3626 * Doh this slab cannot be placed using slub_max_order.
3627 */
Matthew Wilcox9736d2a2018-06-07 17:09:10 -07003628 order = slab_order(size, 1, MAX_ORDER, 1);
David Rientjes818cf592009-04-23 09:58:22 +03003629 if (order < MAX_ORDER)
Christoph Lameter5e6d4442007-05-09 02:32:46 -07003630 return order;
3631 return -ENOSYS;
3632}
3633
Pekka Enberg5595cff2008-08-05 09:28:47 +03003634static void
Joonsoo Kim40534972012-05-11 00:50:47 +09003635init_kmem_cache_node(struct kmem_cache_node *n)
Christoph Lameter81819f02007-05-06 14:49:36 -07003636{
3637 n->nr_partial = 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07003638 spin_lock_init(&n->list_lock);
3639 INIT_LIST_HEAD(&n->partial);
Christoph Lameter8ab13722007-07-17 04:03:32 -07003640#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter0f389ec2008-04-14 18:53:02 +03003641 atomic_long_set(&n->nr_slabs, 0);
Salman Qazi02b71b72008-09-11 12:25:41 -07003642 atomic_long_set(&n->total_objects, 0);
Christoph Lameter643b1132007-05-06 14:49:42 -07003643 INIT_LIST_HEAD(&n->full);
Christoph Lameter8ab13722007-07-17 04:03:32 -07003644#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07003645}
3646
Christoph Lameter55136592010-08-20 12:37:13 -05003647static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
Christoph Lameter4c93c3552007-10-16 01:26:08 -07003648{
Christoph Lameter6c182dc2010-08-20 12:37:14 -05003649 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
Christoph Lameter95a05b42013-01-10 19:14:19 +00003650 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06003651
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06003652 /*
Chris Metcalfd4d84fe2011-06-02 10:19:41 -04003653 * Must align to double word boundary for the double cmpxchg
3654 * instructions to work; see __pcpu_double_call_return_bool().
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06003655 */
Chris Metcalfd4d84fe2011-06-02 10:19:41 -04003656 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3657 2 * sizeof(void *));
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06003658
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06003659 if (!s->cpu_slab)
3660 return 0;
3661
3662 init_kmem_cache_cpus(s);
3663
3664 return 1;
Christoph Lameter4c93c3552007-10-16 01:26:08 -07003665}
Christoph Lameter4c93c3552007-10-16 01:26:08 -07003666
Christoph Lameter51df1142010-08-20 12:37:15 -05003667static struct kmem_cache *kmem_cache_node;
3668
Christoph Lameter81819f02007-05-06 14:49:36 -07003669/*
3670 * No kmalloc_node yet so do it by hand. We know that this is the first
3671 * slab on the node for this slabcache. There are no concurrent accesses
3672 * possible.
3673 *
Zhi Yong Wu721ae222013-11-08 20:47:37 +08003674 * Note that this function only works on the kmem_cache_node
3675 * when allocating for the kmem_cache_node. This is used for bootstrapping
Christoph Lameter4c93c3552007-10-16 01:26:08 -07003676 * memory on a fresh node that has no slab structures yet.
Christoph Lameter81819f02007-05-06 14:49:36 -07003677 */
Christoph Lameter55136592010-08-20 12:37:13 -05003678static void early_kmem_cache_node_alloc(int node)
Christoph Lameter81819f02007-05-06 14:49:36 -07003679{
3680 struct page *page;
3681 struct kmem_cache_node *n;
3682
Christoph Lameter51df1142010-08-20 12:37:15 -05003683 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
Christoph Lameter81819f02007-05-06 14:49:36 -07003684
Christoph Lameter51df1142010-08-20 12:37:15 -05003685 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
Christoph Lameter81819f02007-05-06 14:49:36 -07003686
3687 BUG_ON(!page);
Christoph Lametera2f92ee2007-08-22 14:01:57 -07003688 if (page_to_nid(page) != node) {
Fabian Frederickf9f58282014-06-04 16:06:34 -07003689 pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3690 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
Christoph Lametera2f92ee2007-08-22 14:01:57 -07003691 }
3692
Christoph Lameter81819f02007-05-06 14:49:36 -07003693 n = page->freelist;
3694 BUG_ON(!n);
Christoph Lameter8ab13722007-07-17 04:03:32 -07003695#ifdef CONFIG_SLUB_DEBUG
Christoph Lameterf7cb1932010-09-29 07:15:01 -05003696 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
Christoph Lameter51df1142010-08-20 12:37:15 -05003697 init_tracking(kmem_cache_node, n);
Christoph Lameter8ab13722007-07-17 04:03:32 -07003698#endif
Andrey Konovalovda844b72021-04-29 23:00:06 -07003699 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
Andrey Konovalov12b22382018-12-28 00:29:41 -08003700 page->freelist = get_freepointer(kmem_cache_node, n);
3701 page->inuse = 1;
3702 page->frozen = 0;
3703 kmem_cache_node->node[node] = n;
Joonsoo Kim40534972012-05-11 00:50:47 +09003704 init_kmem_cache_node(n);
Christoph Lameter51df1142010-08-20 12:37:15 -05003705 inc_slabs_node(kmem_cache_node, node, page->objects);
Christoph Lameter6446faa2008-02-15 23:45:26 -08003706
Dave Hansen67b6c902014-01-24 07:20:23 -08003707 /*
Steven Rostedt1e4dd942014-02-10 14:25:46 -08003708 * No locks need to be taken here as it has just been
3709 * initialized and there is no concurrent access.
Dave Hansen67b6c902014-01-24 07:20:23 -08003710 */
Steven Rostedt1e4dd942014-02-10 14:25:46 -08003711 __add_partial(n, page, DEACTIVATE_TO_HEAD);
Christoph Lameter81819f02007-05-06 14:49:36 -07003712}
3713
3714static void free_kmem_cache_nodes(struct kmem_cache *s)
3715{
3716 int node;
Christoph Lameterfa45dc22014-08-06 16:04:09 -07003717 struct kmem_cache_node *n;
Christoph Lameter81819f02007-05-06 14:49:36 -07003718
Christoph Lameterfa45dc22014-08-06 16:04:09 -07003719 for_each_kmem_cache_node(s, node, n) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003720 s->node[node] = NULL;
Alexander Potapenkoea37df52017-09-06 16:19:15 -07003721 kmem_cache_free(kmem_cache_node, n);
Christoph Lameter81819f02007-05-06 14:49:36 -07003722 }
3723}
3724
Dmitry Safonov52b4b952016-02-17 13:11:37 -08003725void __kmem_cache_release(struct kmem_cache *s)
3726{
Thomas Garnier210e7a42016-07-26 15:21:59 -07003727 cache_random_seq_destroy(s);
Dmitry Safonov52b4b952016-02-17 13:11:37 -08003728 free_percpu(s->cpu_slab);
3729 free_kmem_cache_nodes(s);
3730}
3731
Christoph Lameter55136592010-08-20 12:37:13 -05003732static int init_kmem_cache_nodes(struct kmem_cache *s)
Christoph Lameter81819f02007-05-06 14:49:36 -07003733{
3734 int node;
Christoph Lameter81819f02007-05-06 14:49:36 -07003735
Vlastimil Babka7e1fa932021-02-24 12:01:12 -08003736 for_each_node_mask(node, slab_nodes) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003737 struct kmem_cache_node *n;
3738
Alexander Duyck73367bd2010-05-21 14:41:35 -07003739 if (slab_state == DOWN) {
Christoph Lameter55136592010-08-20 12:37:13 -05003740 early_kmem_cache_node_alloc(node);
Alexander Duyck73367bd2010-05-21 14:41:35 -07003741 continue;
Christoph Lameter81819f02007-05-06 14:49:36 -07003742 }
Christoph Lameter51df1142010-08-20 12:37:15 -05003743 n = kmem_cache_alloc_node(kmem_cache_node,
Christoph Lameter55136592010-08-20 12:37:13 -05003744 GFP_KERNEL, node);
Alexander Duyck73367bd2010-05-21 14:41:35 -07003745
3746 if (!n) {
3747 free_kmem_cache_nodes(s);
3748 return 0;
3749 }
3750
Joonsoo Kim40534972012-05-11 00:50:47 +09003751 init_kmem_cache_node(n);
Alexander Potapenkoea37df52017-09-06 16:19:15 -07003752 s->node[node] = n;
Christoph Lameter81819f02007-05-06 14:49:36 -07003753 }
3754 return 1;
3755}
Christoph Lameter81819f02007-05-06 14:49:36 -07003756
David Rientjesc0bdb232009-02-25 09:16:35 +02003757static void set_min_partial(struct kmem_cache *s, unsigned long min)
David Rientjes3b89d7d2009-02-22 17:40:07 -08003758{
3759 if (min < MIN_PARTIAL)
3760 min = MIN_PARTIAL;
3761 else if (min > MAX_PARTIAL)
3762 min = MAX_PARTIAL;
3763 s->min_partial = min;
3764}
3765
Wei Yange6d0e1d2017-07-06 15:36:34 -07003766static void set_cpu_partial(struct kmem_cache *s)
3767{
3768#ifdef CONFIG_SLUB_CPU_PARTIAL
3769 /*
3770 * cpu_partial determined the maximum number of objects kept in the
3771 * per cpu partial lists of a processor.
3772 *
3773 * Per cpu partial lists mainly contain slabs that just have one
3774 * object freed. If they are used for allocation then they can be
3775 * filled up again with minimal effort. The slab will never hit the
3776 * per node partial lists and therefore no locking will be required.
3777 *
3778 * This setting also determines
3779 *
3780 * A) The number of objects from per cpu partial slabs dumped to the
3781 * per node list when we reach the limit.
3782 * B) The number of objects in cpu partial slabs to extract from the
3783 * per node list when we run out of per cpu objects. We only fetch
3784 * 50% to keep some capacity around for frees.
3785 */
3786 if (!kmem_cache_has_cpu_partial(s))
chenqiwubbd4e302020-04-01 21:04:19 -07003787 slub_set_cpu_partial(s, 0);
Wei Yange6d0e1d2017-07-06 15:36:34 -07003788 else if (s->size >= PAGE_SIZE)
chenqiwubbd4e302020-04-01 21:04:19 -07003789 slub_set_cpu_partial(s, 2);
Wei Yange6d0e1d2017-07-06 15:36:34 -07003790 else if (s->size >= 1024)
chenqiwubbd4e302020-04-01 21:04:19 -07003791 slub_set_cpu_partial(s, 6);
Wei Yange6d0e1d2017-07-06 15:36:34 -07003792 else if (s->size >= 256)
chenqiwubbd4e302020-04-01 21:04:19 -07003793 slub_set_cpu_partial(s, 13);
Wei Yange6d0e1d2017-07-06 15:36:34 -07003794 else
chenqiwubbd4e302020-04-01 21:04:19 -07003795 slub_set_cpu_partial(s, 30);
Wei Yange6d0e1d2017-07-06 15:36:34 -07003796#endif
3797}
3798
Christoph Lameter81819f02007-05-06 14:49:36 -07003799/*
3800 * calculate_sizes() determines the order and the distribution of data within
3801 * a slab object.
3802 */
Christoph Lameter06b285d2008-04-14 19:11:41 +03003803static int calculate_sizes(struct kmem_cache *s, int forced_order)
Christoph Lameter81819f02007-05-06 14:49:36 -07003804{
Alexey Dobriyand50112e2017-11-15 17:32:18 -08003805 slab_flags_t flags = s->flags;
Alexey Dobriyanbe4a7982018-04-05 16:21:28 -07003806 unsigned int size = s->object_size;
Alexey Dobriyan19af27a2018-04-05 16:21:39 -07003807 unsigned int order;
Christoph Lameter81819f02007-05-06 14:49:36 -07003808
3809 /*
Christoph Lameterd8b42bf2008-02-15 23:45:25 -08003810 * Round up object size to the next word boundary. We can only
3811 * place the free pointer at word boundaries and this determines
3812 * the possible location of the free pointer.
3813 */
3814 size = ALIGN(size, sizeof(void *));
3815
3816#ifdef CONFIG_SLUB_DEBUG
3817 /*
Christoph Lameter81819f02007-05-06 14:49:36 -07003818 * Determine if we can poison the object itself. If the user of
3819 * the slab may touch the object after free or before allocation
3820 * then we should never poison the object itself.
3821 */
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08003822 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
Christoph Lameterc59def92007-05-16 22:10:50 -07003823 !s->ctor)
Christoph Lameter81819f02007-05-06 14:49:36 -07003824 s->flags |= __OBJECT_POISON;
3825 else
3826 s->flags &= ~__OBJECT_POISON;
3827
Christoph Lameter81819f02007-05-06 14:49:36 -07003828
3829 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07003830 * If we are Redzoning then check if there is some space between the
Christoph Lameter81819f02007-05-06 14:49:36 -07003831 * end of the object and the free pointer. If not then add an
Christoph Lameter672bba32007-05-09 02:32:39 -07003832 * additional word to have some bytes to store Redzone information.
Christoph Lameter81819f02007-05-06 14:49:36 -07003833 */
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003834 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
Christoph Lameter81819f02007-05-06 14:49:36 -07003835 size += sizeof(void *);
Christoph Lameter41ecc552007-05-09 02:32:44 -07003836#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07003837
3838 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07003839 * With that we have determined the number of bytes in actual use
Kees Cooke41a49fa2021-06-15 18:23:26 -07003840 * by the object and redzoning.
Christoph Lameter81819f02007-05-06 14:49:36 -07003841 */
3842 s->inuse = size;
3843
Kees Cook74c1d3e2021-06-15 18:23:22 -07003844 if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
3845 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
3846 s->ctor) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003847 /*
3848 * Relocate free pointer after the object if it is not
3849 * permitted to overwrite the first word of the object on
3850 * kmem_cache_free.
3851 *
3852 * This is the case if we do RCU, have a constructor or
Kees Cook74c1d3e2021-06-15 18:23:22 -07003853 * destructor, are poisoning the objects, or are
3854 * redzoning an object smaller than sizeof(void *).
Waiman Longcbfc35a2020-05-07 18:36:06 -07003855 *
3856 * The assumption that s->offset >= s->inuse means free
3857 * pointer is outside of the object is used in the
3858 * freeptr_outside_object() function. If that is no
3859 * longer true, the function needs to be modified.
Christoph Lameter81819f02007-05-06 14:49:36 -07003860 */
3861 s->offset = size;
3862 size += sizeof(void *);
Kees Cooke41a49fa2021-06-15 18:23:26 -07003863 } else {
Kees Cook3202fa62020-04-01 21:04:27 -07003864 /*
3865 * Store freelist pointer near middle of object to keep
3866 * it away from the edges of the object to avoid small
3867 * sized over/underflows from neighboring allocations.
3868 */
Kees Cooke41a49fa2021-06-15 18:23:26 -07003869 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
Christoph Lameter81819f02007-05-06 14:49:36 -07003870 }
3871
Christoph Lameterc12b3c62007-05-23 13:57:31 -07003872#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter81819f02007-05-06 14:49:36 -07003873 if (flags & SLAB_STORE_USER)
3874 /*
3875 * Need to store information about allocs and frees after
3876 * the object.
3877 */
3878 size += 2 * sizeof(struct track);
Alexander Potapenko80a92012016-07-28 15:49:07 -07003879#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07003880
Alexander Potapenko80a92012016-07-28 15:49:07 -07003881 kasan_cache_create(s, &size, &s->flags);
3882#ifdef CONFIG_SLUB_DEBUG
Joonsoo Kimd86bd1b2016-03-15 14:55:12 -07003883 if (flags & SLAB_RED_ZONE) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003884 /*
3885 * Add some empty padding so that we can catch
3886 * overwrites from earlier objects rather than let
3887 * tracking information or the free pointer be
Frederik Schwarzer0211a9c2008-12-29 22:14:56 +01003888 * corrupted if a user writes before the start
Christoph Lameter81819f02007-05-06 14:49:36 -07003889 * of the object.
3890 */
3891 size += sizeof(void *);
Joonsoo Kimd86bd1b2016-03-15 14:55:12 -07003892
3893 s->red_left_pad = sizeof(void *);
3894 s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3895 size += s->red_left_pad;
3896 }
Christoph Lameter41ecc552007-05-09 02:32:44 -07003897#endif
Christoph Lameter672bba32007-05-09 02:32:39 -07003898
Christoph Lameter81819f02007-05-06 14:49:36 -07003899 /*
Christoph Lameter81819f02007-05-06 14:49:36 -07003900 * SLUB stores one object immediately after another beginning from
3901 * offset 0. In order to align the objects we have to simply size
3902 * each object to conform to the alignment.
3903 */
Christoph Lameter45906852012-11-28 16:23:16 +00003904 size = ALIGN(size, s->align);
Christoph Lameter81819f02007-05-06 14:49:36 -07003905 s->size = size;
Roman Gushchin4138fdf2020-08-06 23:20:42 -07003906 s->reciprocal_size = reciprocal_value(size);
Christoph Lameter06b285d2008-04-14 19:11:41 +03003907 if (forced_order >= 0)
3908 order = forced_order;
3909 else
Matthew Wilcox9736d2a2018-06-07 17:09:10 -07003910 order = calculate_order(size);
Christoph Lameter81819f02007-05-06 14:49:36 -07003911
Alexey Dobriyan19af27a2018-04-05 16:21:39 -07003912 if ((int)order < 0)
Christoph Lameter81819f02007-05-06 14:49:36 -07003913 return 0;
3914
Christoph Lameterb7a49f02008-02-14 14:21:32 -08003915 s->allocflags = 0;
Christoph Lameter834f3d12008-04-14 19:11:31 +03003916 if (order)
Christoph Lameterb7a49f02008-02-14 14:21:32 -08003917 s->allocflags |= __GFP_COMP;
3918
3919 if (s->flags & SLAB_CACHE_DMA)
Christoph Lameter2c59dd62013-01-10 19:14:19 +00003920 s->allocflags |= GFP_DMA;
Christoph Lameterb7a49f02008-02-14 14:21:32 -08003921
Nicolas Boichat6d6ea1e2019-03-28 20:43:42 -07003922 if (s->flags & SLAB_CACHE_DMA32)
3923 s->allocflags |= GFP_DMA32;
3924
Christoph Lameterb7a49f02008-02-14 14:21:32 -08003925 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3926 s->allocflags |= __GFP_RECLAIMABLE;
3927
Christoph Lameter81819f02007-05-06 14:49:36 -07003928 /*
3929 * Determine the number of objects per slab
3930 */
Matthew Wilcox9736d2a2018-06-07 17:09:10 -07003931 s->oo = oo_make(order, size);
3932 s->min = oo_make(get_order(size), size);
Christoph Lameter205ab992008-04-14 19:11:40 +03003933 if (oo_objects(s->oo) > oo_objects(s->max))
3934 s->max = s->oo;
Christoph Lameter81819f02007-05-06 14:49:36 -07003935
Christoph Lameter834f3d12008-04-14 19:11:31 +03003936 return !!oo_objects(s->oo);
Christoph Lameter81819f02007-05-06 14:49:36 -07003937}
3938
Alexey Dobriyand50112e2017-11-15 17:32:18 -08003939static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
Christoph Lameter81819f02007-05-06 14:49:36 -07003940{
Nikolay Borisov37540002021-02-24 12:00:58 -08003941 s->flags = kmem_cache_flags(s->size, flags, s->name);
Kees Cook2482ddec2017-09-06 16:19:18 -07003942#ifdef CONFIG_SLAB_FREELIST_HARDENED
3943 s->random = get_random_long();
3944#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07003945
Christoph Lameter06b285d2008-04-14 19:11:41 +03003946 if (!calculate_sizes(s, -1))
Christoph Lameter81819f02007-05-06 14:49:36 -07003947 goto error;
David Rientjes3de47212009-07-27 18:30:35 -07003948 if (disable_higher_order_debug) {
3949 /*
3950 * Disable debugging flags that store metadata if the min slab
3951 * order increased.
3952 */
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003953 if (get_order(s->size) > get_order(s->object_size)) {
David Rientjes3de47212009-07-27 18:30:35 -07003954 s->flags &= ~DEBUG_METADATA_FLAGS;
3955 s->offset = 0;
3956 if (!calculate_sizes(s, -1))
3957 goto error;
3958 }
3959 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003960
Heiko Carstens25654092012-01-12 17:17:33 -08003961#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3962 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
Laura Abbott149daaf2016-03-15 14:55:09 -07003963 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
Christoph Lameterb789ef52011-06-01 12:25:49 -05003964 /* Enable fast mode */
3965 s->flags |= __CMPXCHG_DOUBLE;
3966#endif
3967
David Rientjes3b89d7d2009-02-22 17:40:07 -08003968 /*
3969 * The larger the object size is, the more pages we want on the partial
3970 * list to avoid pounding the page allocator excessively.
3971 */
Christoph Lameter49e22582011-08-09 16:12:27 -05003972 set_min_partial(s, ilog2(s->size) / 2);
3973
Wei Yange6d0e1d2017-07-06 15:36:34 -07003974 set_cpu_partial(s);
Christoph Lameter49e22582011-08-09 16:12:27 -05003975
Christoph Lameter81819f02007-05-06 14:49:36 -07003976#ifdef CONFIG_NUMA
Christoph Lametere2cb96b2008-08-19 08:51:22 -05003977 s->remote_node_defrag_ratio = 1000;
Christoph Lameter81819f02007-05-06 14:49:36 -07003978#endif
Thomas Garnier210e7a42016-07-26 15:21:59 -07003979
3980 /* Initialize the pre-computed randomized freelist if slab is up */
3981 if (slab_state >= UP) {
3982 if (init_cache_random_seq(s))
3983 goto error;
3984 }
3985
Christoph Lameter55136592010-08-20 12:37:13 -05003986 if (!init_kmem_cache_nodes(s))
Christoph Lameterdfb4f092007-10-16 01:26:05 -07003987 goto error;
Christoph Lameter81819f02007-05-06 14:49:36 -07003988
Christoph Lameter55136592010-08-20 12:37:13 -05003989 if (alloc_kmem_cache_cpus(s))
Christoph Lameter278b1bb2012-09-05 00:20:34 +00003990 return 0;
Christoph Lameterff120592009-12-18 16:26:22 -06003991
Christoph Lameter4c93c3552007-10-16 01:26:08 -07003992 free_kmem_cache_nodes(s);
Christoph Lameter81819f02007-05-06 14:49:36 -07003993error:
Christoph Lameter278b1bb2012-09-05 00:20:34 +00003994 return -EINVAL;
Christoph Lameter81819f02007-05-06 14:49:36 -07003995}
Christoph Lameter81819f02007-05-06 14:49:36 -07003996
Christoph Lameter33b12c32008-04-25 12:22:43 -07003997static void list_slab_objects(struct kmem_cache *s, struct page *page,
Sebastian Andrzej Siewior55860d92020-06-25 20:29:55 -07003998 const char *text)
Christoph Lameter81819f02007-05-06 14:49:36 -07003999{
Christoph Lameter33b12c32008-04-25 12:22:43 -07004000#ifdef CONFIG_SLUB_DEBUG
4001 void *addr = page_address(page);
Sebastian Andrzej Siewior55860d92020-06-25 20:29:55 -07004002 unsigned long *map;
Christoph Lameter33b12c32008-04-25 12:22:43 -07004003 void *p;
Christopher Lameteraa456c72020-06-01 21:45:53 -07004004
Christoph Lameter945cf2b2012-09-04 23:18:33 +00004005 slab_err(s, page, text, s->name);
Christoph Lameter33b12c32008-04-25 12:22:43 -07004006 slab_lock(page);
Christoph Lameter33b12c32008-04-25 12:22:43 -07004007
Yu Zhao90e9f6a2020-01-30 22:11:57 -08004008 map = get_map(s, page);
Christoph Lameter33b12c32008-04-25 12:22:43 -07004009 for_each_object(p, s, addr, page->objects) {
4010
Roman Gushchin4138fdf2020-08-06 23:20:42 -07004011 if (!test_bit(__obj_to_index(s, addr, p), map)) {
Yafang Shao96b94ab2021-03-19 18:12:45 +08004012 pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
Christoph Lameter33b12c32008-04-25 12:22:43 -07004013 print_tracking(s, p);
4014 }
4015 }
Sebastian Andrzej Siewior55860d92020-06-25 20:29:55 -07004016 put_map(map);
Christoph Lameter33b12c32008-04-25 12:22:43 -07004017 slab_unlock(page);
4018#endif
4019}
4020
Christoph Lameter81819f02007-05-06 14:49:36 -07004021/*
Christoph Lameter599870b2008-04-23 12:36:52 -07004022 * Attempt to free all partial slabs on a node.
Dmitry Safonov52b4b952016-02-17 13:11:37 -08004023 * This is called from __kmem_cache_shutdown(). We must take list_lock
4024 * because sysfs file might still access partial list after the shutdowning.
Christoph Lameter81819f02007-05-06 14:49:36 -07004025 */
Christoph Lameter599870b2008-04-23 12:36:52 -07004026static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
Christoph Lameter81819f02007-05-06 14:49:36 -07004027{
Chris Wilson60398922016-08-10 16:27:58 -07004028 LIST_HEAD(discard);
Christoph Lameter81819f02007-05-06 14:49:36 -07004029 struct page *page, *h;
4030
Dmitry Safonov52b4b952016-02-17 13:11:37 -08004031 BUG_ON(irqs_disabled());
4032 spin_lock_irq(&n->list_lock);
Tobin C. Harding916ac052019-05-13 17:16:12 -07004033 list_for_each_entry_safe(page, h, &n->partial, slab_list) {
Christoph Lameter81819f02007-05-06 14:49:36 -07004034 if (!page->inuse) {
Dmitry Safonov52b4b952016-02-17 13:11:37 -08004035 remove_partial(n, page);
Tobin C. Harding916ac052019-05-13 17:16:12 -07004036 list_add(&page->slab_list, &discard);
Christoph Lameter33b12c32008-04-25 12:22:43 -07004037 } else {
4038 list_slab_objects(s, page,
Sebastian Andrzej Siewior55860d92020-06-25 20:29:55 -07004039 "Objects remaining in %s on __kmem_cache_shutdown()");
Christoph Lameter599870b2008-04-23 12:36:52 -07004040 }
Christoph Lameter33b12c32008-04-25 12:22:43 -07004041 }
Dmitry Safonov52b4b952016-02-17 13:11:37 -08004042 spin_unlock_irq(&n->list_lock);
Chris Wilson60398922016-08-10 16:27:58 -07004043
Tobin C. Harding916ac052019-05-13 17:16:12 -07004044 list_for_each_entry_safe(page, h, &discard, slab_list)
Chris Wilson60398922016-08-10 16:27:58 -07004045 discard_slab(s, page);
Christoph Lameter81819f02007-05-06 14:49:36 -07004046}
4047
Shakeel Buttf9e13c02018-04-05 16:21:57 -07004048bool __kmem_cache_empty(struct kmem_cache *s)
4049{
4050 int node;
4051 struct kmem_cache_node *n;
4052
4053 for_each_kmem_cache_node(s, node, n)
4054 if (n->nr_partial || slabs_node(s, node))
4055 return false;
4056 return true;
4057}
4058
Christoph Lameter81819f02007-05-06 14:49:36 -07004059/*
Christoph Lameter672bba32007-05-09 02:32:39 -07004060 * Release all resources used by a slab cache.
Christoph Lameter81819f02007-05-06 14:49:36 -07004061 */
Dmitry Safonov52b4b952016-02-17 13:11:37 -08004062int __kmem_cache_shutdown(struct kmem_cache *s)
Christoph Lameter81819f02007-05-06 14:49:36 -07004063{
4064 int node;
Christoph Lameterfa45dc22014-08-06 16:04:09 -07004065 struct kmem_cache_node *n;
Christoph Lameter81819f02007-05-06 14:49:36 -07004066
4067 flush_all(s);
Christoph Lameter81819f02007-05-06 14:49:36 -07004068 /* Attempt to free all objects */
Christoph Lameterfa45dc22014-08-06 16:04:09 -07004069 for_each_kmem_cache_node(s, node, n) {
Christoph Lameter599870b2008-04-23 12:36:52 -07004070 free_partial(s, n);
4071 if (n->nr_partial || slabs_node(s, node))
Christoph Lameter81819f02007-05-06 14:49:36 -07004072 return 1;
4073 }
Christoph Lameter81819f02007-05-06 14:49:36 -07004074 return 0;
4075}
4076
Paul E. McKenney5bb1bb32021-01-07 13:46:11 -08004077#ifdef CONFIG_PRINTK
Paul E. McKenney8e7f37f2020-12-07 17:41:02 -08004078void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
4079{
4080 void *base;
4081 int __maybe_unused i;
4082 unsigned int objnr;
4083 void *objp;
4084 void *objp0;
4085 struct kmem_cache *s = page->slab_cache;
4086 struct track __maybe_unused *trackp;
4087
4088 kpp->kp_ptr = object;
4089 kpp->kp_page = page;
4090 kpp->kp_slab_cache = s;
4091 base = page_address(page);
4092 objp0 = kasan_reset_tag(object);
4093#ifdef CONFIG_SLUB_DEBUG
4094 objp = restore_red_left(s, objp0);
4095#else
4096 objp = objp0;
4097#endif
4098 objnr = obj_to_index(s, page, objp);
4099 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
4100 objp = base + s->size * objnr;
4101 kpp->kp_objp = objp;
4102 if (WARN_ON_ONCE(objp < base || objp >= base + page->objects * s->size || (objp - base) % s->size) ||
4103 !(s->flags & SLAB_STORE_USER))
4104 return;
4105#ifdef CONFIG_SLUB_DEBUG
Maninder Singh0cbc1242021-03-16 16:07:10 +05304106 objp = fixup_red_left(s, objp);
Paul E. McKenney8e7f37f2020-12-07 17:41:02 -08004107 trackp = get_track(s, objp, TRACK_ALLOC);
4108 kpp->kp_ret = (void *)trackp->addr;
Linus Torvaldsae14c632021-07-17 13:27:00 -07004109#ifdef CONFIG_STACKTRACE
4110 for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
4111 kpp->kp_stack[i] = (void *)trackp->addrs[i];
4112 if (!kpp->kp_stack[i])
4113 break;
4114 }
Maninder Singhe548eaa2021-03-16 16:07:11 +05304115
Linus Torvaldsae14c632021-07-17 13:27:00 -07004116 trackp = get_track(s, objp, TRACK_FREE);
4117 for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
4118 kpp->kp_free_stack[i] = (void *)trackp->addrs[i];
4119 if (!kpp->kp_free_stack[i])
4120 break;
Maninder Singhe548eaa2021-03-16 16:07:11 +05304121 }
Paul E. McKenney8e7f37f2020-12-07 17:41:02 -08004122#endif
4123#endif
4124}
Paul E. McKenney5bb1bb32021-01-07 13:46:11 -08004125#endif
Paul E. McKenney8e7f37f2020-12-07 17:41:02 -08004126
Christoph Lameter81819f02007-05-06 14:49:36 -07004127/********************************************************************
4128 * Kmalloc subsystem
4129 *******************************************************************/
4130
Christoph Lameter81819f02007-05-06 14:49:36 -07004131static int __init setup_slub_min_order(char *str)
4132{
Alexey Dobriyan19af27a2018-04-05 16:21:39 -07004133 get_option(&str, (int *)&slub_min_order);
Christoph Lameter81819f02007-05-06 14:49:36 -07004134
4135 return 1;
4136}
4137
4138__setup("slub_min_order=", setup_slub_min_order);
4139
4140static int __init setup_slub_max_order(char *str)
4141{
Alexey Dobriyan19af27a2018-04-05 16:21:39 -07004142 get_option(&str, (int *)&slub_max_order);
4143 slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
Christoph Lameter81819f02007-05-06 14:49:36 -07004144
4145 return 1;
4146}
4147
4148__setup("slub_max_order=", setup_slub_max_order);
4149
4150static int __init setup_slub_min_objects(char *str)
4151{
Alexey Dobriyan19af27a2018-04-05 16:21:39 -07004152 get_option(&str, (int *)&slub_min_objects);
Christoph Lameter81819f02007-05-06 14:49:36 -07004153
4154 return 1;
4155}
4156
4157__setup("slub_min_objects=", setup_slub_min_objects);
4158
Christoph Lameter81819f02007-05-06 14:49:36 -07004159void *__kmalloc(size_t size, gfp_t flags)
4160{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004161 struct kmem_cache *s;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03004162 void *ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07004163
Christoph Lameter95a05b42013-01-10 19:14:19 +00004164 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
Pekka Enbergeada35e2008-02-11 22:47:46 +02004165 return kmalloc_large(size, flags);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004166
Christoph Lameter2c59dd62013-01-10 19:14:19 +00004167 s = kmalloc_slab(size, flags);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004168
4169 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07004170 return s;
4171
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08004172 ret = slab_alloc(s, flags, _RET_IP_, size);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03004173
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02004174 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03004175
Andrey Konovalov01165232018-12-28 00:29:37 -08004176 ret = kasan_kmalloc(s, ret, size, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08004177
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03004178 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07004179}
4180EXPORT_SYMBOL(__kmalloc);
4181
Namhyung Kim5d1f57e2010-09-29 21:02:15 +09004182#ifdef CONFIG_NUMA
Christoph Lameterf619cfe2008-03-01 13:56:40 -08004183static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
4184{
Vegard Nossumb1eeab62008-11-25 16:55:53 +01004185 struct page *page;
Catalin Marinase4f7c0b42009-07-07 10:32:59 +01004186 void *ptr = NULL;
Vlastimil Babka6a486c02019-10-06 17:58:42 -07004187 unsigned int order = get_order(size);
Christoph Lameterf619cfe2008-03-01 13:56:40 -08004188
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08004189 flags |= __GFP_COMP;
Vlastimil Babka6a486c02019-10-06 17:58:42 -07004190 page = alloc_pages_node(node, flags, order);
4191 if (page) {
Catalin Marinase4f7c0b42009-07-07 10:32:59 +01004192 ptr = page_address(page);
Muchun Song96403bf2021-02-24 12:04:26 -08004193 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
4194 PAGE_SIZE << order);
Vlastimil Babka6a486c02019-10-06 17:58:42 -07004195 }
Catalin Marinase4f7c0b42009-07-07 10:32:59 +01004196
Andrey Konovalov01165232018-12-28 00:29:37 -08004197 return kmalloc_large_node_hook(ptr, size, flags);
Christoph Lameterf619cfe2008-03-01 13:56:40 -08004198}
4199
Christoph Lameter81819f02007-05-06 14:49:36 -07004200void *__kmalloc_node(size_t size, gfp_t flags, int node)
4201{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004202 struct kmem_cache *s;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03004203 void *ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07004204
Christoph Lameter95a05b42013-01-10 19:14:19 +00004205 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03004206 ret = kmalloc_large_node(size, flags, node);
4207
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02004208 trace_kmalloc_node(_RET_IP_, ret,
4209 size, PAGE_SIZE << get_order(size),
4210 flags, node);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03004211
4212 return ret;
4213 }
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004214
Christoph Lameter2c59dd62013-01-10 19:14:19 +00004215 s = kmalloc_slab(size, flags);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004216
4217 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07004218 return s;
4219
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08004220 ret = slab_alloc_node(s, flags, node, _RET_IP_, size);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03004221
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02004222 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03004223
Andrey Konovalov01165232018-12-28 00:29:37 -08004224 ret = kasan_kmalloc(s, ret, size, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08004225
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03004226 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07004227}
4228EXPORT_SYMBOL(__kmalloc_node);
Tobin C. Harding6dfd1b62019-05-13 17:16:09 -07004229#endif /* CONFIG_NUMA */
Christoph Lameter81819f02007-05-06 14:49:36 -07004230
Kees Cooked18adc2016-06-23 15:24:05 -07004231#ifdef CONFIG_HARDENED_USERCOPY
4232/*
Kees Cookafcc90f82018-01-10 15:17:01 -08004233 * Rejects incorrectly sized objects and objects that are to be copied
4234 * to/from userspace but do not fall entirely within the containing slab
4235 * cache's usercopy region.
Kees Cooked18adc2016-06-23 15:24:05 -07004236 *
4237 * Returns NULL if check passes, otherwise const char * to name of cache
4238 * to indicate an error.
4239 */
Kees Cookf4e6e282018-01-10 14:48:22 -08004240void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4241 bool to_user)
Kees Cooked18adc2016-06-23 15:24:05 -07004242{
4243 struct kmem_cache *s;
Alexey Dobriyan44065b22018-04-05 16:21:20 -07004244 unsigned int offset;
Kees Cooked18adc2016-06-23 15:24:05 -07004245 size_t object_size;
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08004246 bool is_kfence = is_kfence_address(ptr);
Kees Cooked18adc2016-06-23 15:24:05 -07004247
Andrey Konovalov96fedce2019-01-08 15:23:15 -08004248 ptr = kasan_reset_tag(ptr);
4249
Kees Cooked18adc2016-06-23 15:24:05 -07004250 /* Find object and usable object size. */
4251 s = page->slab_cache;
Kees Cooked18adc2016-06-23 15:24:05 -07004252
4253 /* Reject impossible pointers. */
4254 if (ptr < page_address(page))
Kees Cookf4e6e282018-01-10 14:48:22 -08004255 usercopy_abort("SLUB object not in SLUB page?!", NULL,
4256 to_user, 0, n);
Kees Cooked18adc2016-06-23 15:24:05 -07004257
4258 /* Find offset within object. */
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08004259 if (is_kfence)
4260 offset = ptr - kfence_object_start(ptr);
4261 else
4262 offset = (ptr - page_address(page)) % s->size;
Kees Cooked18adc2016-06-23 15:24:05 -07004263
4264 /* Adjust for redzone and reject if within the redzone. */
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08004265 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
Kees Cooked18adc2016-06-23 15:24:05 -07004266 if (offset < s->red_left_pad)
Kees Cookf4e6e282018-01-10 14:48:22 -08004267 usercopy_abort("SLUB object in left red zone",
4268 s->name, to_user, offset, n);
Kees Cooked18adc2016-06-23 15:24:05 -07004269 offset -= s->red_left_pad;
4270 }
4271
Kees Cookafcc90f82018-01-10 15:17:01 -08004272 /* Allow address range falling entirely within usercopy region. */
4273 if (offset >= s->useroffset &&
4274 offset - s->useroffset <= s->usersize &&
4275 n <= s->useroffset - offset + s->usersize)
Kees Cookf4e6e282018-01-10 14:48:22 -08004276 return;
Kees Cooked18adc2016-06-23 15:24:05 -07004277
Kees Cookafcc90f82018-01-10 15:17:01 -08004278 /*
4279 * If the copy is still within the allocated object, produce
4280 * a warning instead of rejecting the copy. This is intended
4281 * to be a temporary method to find any missing usercopy
4282 * whitelists.
4283 */
4284 object_size = slab_ksize(s);
Kees Cook2d891fb2017-11-30 13:04:32 -08004285 if (usercopy_fallback &&
4286 offset <= object_size && n <= object_size - offset) {
Kees Cookafcc90f82018-01-10 15:17:01 -08004287 usercopy_warn("SLUB object", s->name, to_user, offset, n);
4288 return;
4289 }
4290
Kees Cookf4e6e282018-01-10 14:48:22 -08004291 usercopy_abort("SLUB object", s->name, to_user, offset, n);
Kees Cooked18adc2016-06-23 15:24:05 -07004292}
4293#endif /* CONFIG_HARDENED_USERCOPY */
4294
Marco Elver10d1f8c2019-07-11 20:54:14 -07004295size_t __ksize(const void *object)
Christoph Lameter81819f02007-05-06 14:49:36 -07004296{
Christoph Lameter272c1d22007-06-08 13:46:49 -07004297 struct page *page;
Christoph Lameter81819f02007-05-06 14:49:36 -07004298
Christoph Lameteref8b4522007-10-16 01:24:46 -07004299 if (unlikely(object == ZERO_SIZE_PTR))
Christoph Lameter272c1d22007-06-08 13:46:49 -07004300 return 0;
4301
Vegard Nossum294a80a2007-12-04 23:45:30 -08004302 page = virt_to_head_page(object);
Vegard Nossum294a80a2007-12-04 23:45:30 -08004303
Pekka Enberg76994412008-05-22 19:22:25 +03004304 if (unlikely(!PageSlab(page))) {
4305 WARN_ON(!PageCompound(page));
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07004306 return page_size(page);
Pekka Enberg76994412008-05-22 19:22:25 +03004307 }
Christoph Lameter81819f02007-05-06 14:49:36 -07004308
Glauber Costa1b4f59e32012-10-22 18:05:36 +04004309 return slab_ksize(page->slab_cache);
Christoph Lameter81819f02007-05-06 14:49:36 -07004310}
Marco Elver10d1f8c2019-07-11 20:54:14 -07004311EXPORT_SYMBOL(__ksize);
Christoph Lameter81819f02007-05-06 14:49:36 -07004312
4313void kfree(const void *x)
4314{
Christoph Lameter81819f02007-05-06 14:49:36 -07004315 struct page *page;
Christoph Lameter5bb983b2008-02-07 17:47:41 -08004316 void *object = (void *)x;
Christoph Lameter81819f02007-05-06 14:49:36 -07004317
Pekka Enberg2121db72009-03-25 11:05:57 +02004318 trace_kfree(_RET_IP_, x);
4319
Satyam Sharma2408c552007-10-16 01:24:44 -07004320 if (unlikely(ZERO_OR_NULL_PTR(x)))
Christoph Lameter81819f02007-05-06 14:49:36 -07004321 return;
4322
Christoph Lameterb49af682007-05-06 14:49:41 -07004323 page = virt_to_head_page(x);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004324 if (unlikely(!PageSlab(page))) {
Shakeel Butt1ed7ce52021-08-13 16:54:31 -07004325 free_nonslab_page(page, object);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004326 return;
4327 }
Jesper Dangaard Brouer81084652015-11-20 15:57:46 -08004328 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
Christoph Lameter81819f02007-05-06 14:49:36 -07004329}
4330EXPORT_SYMBOL(kfree);
4331
Vladimir Davydov832f37f2015-02-12 14:59:41 -08004332#define SHRINK_PROMOTE_MAX 32
4333
Christoph Lameter2086d262007-05-06 14:49:46 -07004334/*
Vladimir Davydov832f37f2015-02-12 14:59:41 -08004335 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
4336 * up most to the head of the partial lists. New allocations will then
4337 * fill those up and thus they can be removed from the partial lists.
Christoph Lameter672bba32007-05-09 02:32:39 -07004338 *
4339 * The slabs with the least items are placed last. This results in them
4340 * being allocated from last increasing the chance that the last objects
4341 * are freed in them.
Christoph Lameter2086d262007-05-06 14:49:46 -07004342 */
Tejun Heoc9fc5862017-02-22 15:41:27 -08004343int __kmem_cache_shrink(struct kmem_cache *s)
Christoph Lameter2086d262007-05-06 14:49:46 -07004344{
4345 int node;
4346 int i;
4347 struct kmem_cache_node *n;
4348 struct page *page;
4349 struct page *t;
Vladimir Davydov832f37f2015-02-12 14:59:41 -08004350 struct list_head discard;
4351 struct list_head promote[SHRINK_PROMOTE_MAX];
Christoph Lameter2086d262007-05-06 14:49:46 -07004352 unsigned long flags;
Vladimir Davydovce3712d2015-02-12 14:59:44 -08004353 int ret = 0;
Christoph Lameter2086d262007-05-06 14:49:46 -07004354
Christoph Lameter2086d262007-05-06 14:49:46 -07004355 flush_all(s);
Christoph Lameterfa45dc22014-08-06 16:04:09 -07004356 for_each_kmem_cache_node(s, node, n) {
Vladimir Davydov832f37f2015-02-12 14:59:41 -08004357 INIT_LIST_HEAD(&discard);
4358 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
4359 INIT_LIST_HEAD(promote + i);
Christoph Lameter2086d262007-05-06 14:49:46 -07004360
4361 spin_lock_irqsave(&n->list_lock, flags);
4362
4363 /*
Vladimir Davydov832f37f2015-02-12 14:59:41 -08004364 * Build lists of slabs to discard or promote.
Christoph Lameter2086d262007-05-06 14:49:46 -07004365 *
Christoph Lameter672bba32007-05-09 02:32:39 -07004366 * Note that concurrent frees may occur while we hold the
4367 * list_lock. page->inuse here is the upper limit.
Christoph Lameter2086d262007-05-06 14:49:46 -07004368 */
Tobin C. Harding916ac052019-05-13 17:16:12 -07004369 list_for_each_entry_safe(page, t, &n->partial, slab_list) {
Vladimir Davydov832f37f2015-02-12 14:59:41 -08004370 int free = page->objects - page->inuse;
4371
4372 /* Do not reread page->inuse */
4373 barrier();
4374
4375 /* We do not keep full slabs on the list */
4376 BUG_ON(free <= 0);
4377
4378 if (free == page->objects) {
Tobin C. Harding916ac052019-05-13 17:16:12 -07004379 list_move(&page->slab_list, &discard);
Christoph Lameter69cb8e62011-08-09 16:12:22 -05004380 n->nr_partial--;
Vladimir Davydov832f37f2015-02-12 14:59:41 -08004381 } else if (free <= SHRINK_PROMOTE_MAX)
Tobin C. Harding916ac052019-05-13 17:16:12 -07004382 list_move(&page->slab_list, promote + free - 1);
Christoph Lameter2086d262007-05-06 14:49:46 -07004383 }
4384
Christoph Lameter2086d262007-05-06 14:49:46 -07004385 /*
Vladimir Davydov832f37f2015-02-12 14:59:41 -08004386 * Promote the slabs filled up most to the head of the
4387 * partial list.
Christoph Lameter2086d262007-05-06 14:49:46 -07004388 */
Vladimir Davydov832f37f2015-02-12 14:59:41 -08004389 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
4390 list_splice(promote + i, &n->partial);
Christoph Lameter2086d262007-05-06 14:49:46 -07004391
Christoph Lameter2086d262007-05-06 14:49:46 -07004392 spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter69cb8e62011-08-09 16:12:22 -05004393
4394 /* Release empty slabs */
Tobin C. Harding916ac052019-05-13 17:16:12 -07004395 list_for_each_entry_safe(page, t, &discard, slab_list)
Christoph Lameter69cb8e62011-08-09 16:12:22 -05004396 discard_slab(s, page);
Vladimir Davydovce3712d2015-02-12 14:59:44 -08004397
4398 if (slabs_node(s, node))
4399 ret = 1;
Christoph Lameter2086d262007-05-06 14:49:46 -07004400 }
4401
Vladimir Davydovce3712d2015-02-12 14:59:44 -08004402 return ret;
Christoph Lameter2086d262007-05-06 14:49:46 -07004403}
Christoph Lameter2086d262007-05-06 14:49:46 -07004404
Yasunori Gotob9049e22007-10-21 16:41:37 -07004405static int slab_mem_going_offline_callback(void *arg)
4406{
4407 struct kmem_cache *s;
4408
Christoph Lameter18004c52012-07-06 15:25:12 -05004409 mutex_lock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07004410 list_for_each_entry(s, &slab_caches, list)
Tejun Heoc9fc5862017-02-22 15:41:27 -08004411 __kmem_cache_shrink(s);
Christoph Lameter18004c52012-07-06 15:25:12 -05004412 mutex_unlock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07004413
4414 return 0;
4415}
4416
4417static void slab_mem_offline_callback(void *arg)
4418{
Yasunori Gotob9049e22007-10-21 16:41:37 -07004419 struct memory_notify *marg = arg;
4420 int offline_node;
4421
Lai Jiangshanb9d5ab22012-12-11 16:01:05 -08004422 offline_node = marg->status_change_nid_normal;
Yasunori Gotob9049e22007-10-21 16:41:37 -07004423
4424 /*
4425 * If the node still has available memory. we need kmem_cache_node
4426 * for it yet.
4427 */
4428 if (offline_node < 0)
4429 return;
4430
Christoph Lameter18004c52012-07-06 15:25:12 -05004431 mutex_lock(&slab_mutex);
Vlastimil Babka7e1fa932021-02-24 12:01:12 -08004432 node_clear(offline_node, slab_nodes);
Vlastimil Babka666716f2021-02-24 12:01:08 -08004433 /*
4434 * We no longer free kmem_cache_node structures here, as it would be
4435 * racy with all get_node() users, and infeasible to protect them with
4436 * slab_mutex.
4437 */
Christoph Lameter18004c52012-07-06 15:25:12 -05004438 mutex_unlock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07004439}
4440
4441static int slab_mem_going_online_callback(void *arg)
4442{
4443 struct kmem_cache_node *n;
4444 struct kmem_cache *s;
4445 struct memory_notify *marg = arg;
Lai Jiangshanb9d5ab22012-12-11 16:01:05 -08004446 int nid = marg->status_change_nid_normal;
Yasunori Gotob9049e22007-10-21 16:41:37 -07004447 int ret = 0;
4448
4449 /*
4450 * If the node's memory is already available, then kmem_cache_node is
4451 * already created. Nothing to do.
4452 */
4453 if (nid < 0)
4454 return 0;
4455
4456 /*
Christoph Lameter0121c6192008-04-29 16:11:12 -07004457 * We are bringing a node online. No memory is available yet. We must
Yasunori Gotob9049e22007-10-21 16:41:37 -07004458 * allocate a kmem_cache_node structure in order to bring the node
4459 * online.
4460 */
Christoph Lameter18004c52012-07-06 15:25:12 -05004461 mutex_lock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07004462 list_for_each_entry(s, &slab_caches, list) {
4463 /*
Vlastimil Babka666716f2021-02-24 12:01:08 -08004464 * The structure may already exist if the node was previously
4465 * onlined and offlined.
4466 */
4467 if (get_node(s, nid))
4468 continue;
4469 /*
Yasunori Gotob9049e22007-10-21 16:41:37 -07004470 * XXX: kmem_cache_alloc_node will fallback to other nodes
4471 * since memory is not yet available from the node that
4472 * is brought up.
4473 */
Christoph Lameter8de66a02010-08-25 14:51:14 -05004474 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
Yasunori Gotob9049e22007-10-21 16:41:37 -07004475 if (!n) {
4476 ret = -ENOMEM;
4477 goto out;
4478 }
Joonsoo Kim40534972012-05-11 00:50:47 +09004479 init_kmem_cache_node(n);
Yasunori Gotob9049e22007-10-21 16:41:37 -07004480 s->node[nid] = n;
4481 }
Vlastimil Babka7e1fa932021-02-24 12:01:12 -08004482 /*
4483 * Any cache created after this point will also have kmem_cache_node
4484 * initialized for the new node.
4485 */
4486 node_set(nid, slab_nodes);
Yasunori Gotob9049e22007-10-21 16:41:37 -07004487out:
Christoph Lameter18004c52012-07-06 15:25:12 -05004488 mutex_unlock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07004489 return ret;
4490}
4491
4492static int slab_memory_callback(struct notifier_block *self,
4493 unsigned long action, void *arg)
4494{
4495 int ret = 0;
4496
4497 switch (action) {
4498 case MEM_GOING_ONLINE:
4499 ret = slab_mem_going_online_callback(arg);
4500 break;
4501 case MEM_GOING_OFFLINE:
4502 ret = slab_mem_going_offline_callback(arg);
4503 break;
4504 case MEM_OFFLINE:
4505 case MEM_CANCEL_ONLINE:
4506 slab_mem_offline_callback(arg);
4507 break;
4508 case MEM_ONLINE:
4509 case MEM_CANCEL_OFFLINE:
4510 break;
4511 }
KAMEZAWA Hiroyukidc19f9d2008-12-01 13:13:48 -08004512 if (ret)
4513 ret = notifier_from_errno(ret);
4514 else
4515 ret = NOTIFY_OK;
Yasunori Gotob9049e22007-10-21 16:41:37 -07004516 return ret;
4517}
4518
Andrew Morton3ac38fa2013-04-29 15:08:06 -07004519static struct notifier_block slab_memory_callback_nb = {
4520 .notifier_call = slab_memory_callback,
4521 .priority = SLAB_CALLBACK_PRI,
4522};
Yasunori Gotob9049e22007-10-21 16:41:37 -07004523
Christoph Lameter81819f02007-05-06 14:49:36 -07004524/********************************************************************
4525 * Basic setup of slabs
4526 *******************************************************************/
4527
Christoph Lameter51df1142010-08-20 12:37:15 -05004528/*
4529 * Used for early kmem_cache structures that were allocated using
Christoph Lameterdffb4d62012-11-28 16:23:07 +00004530 * the page allocator. Allocate them properly then fix up the pointers
4531 * that may be pointing to the wrong kmem_cache structure.
Christoph Lameter51df1142010-08-20 12:37:15 -05004532 */
4533
Christoph Lameterdffb4d62012-11-28 16:23:07 +00004534static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
Christoph Lameter51df1142010-08-20 12:37:15 -05004535{
4536 int node;
Christoph Lameterdffb4d62012-11-28 16:23:07 +00004537 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
Christoph Lameterfa45dc22014-08-06 16:04:09 -07004538 struct kmem_cache_node *n;
Christoph Lameter51df1142010-08-20 12:37:15 -05004539
Christoph Lameterdffb4d62012-11-28 16:23:07 +00004540 memcpy(s, static_cache, kmem_cache->object_size);
Christoph Lameter51df1142010-08-20 12:37:15 -05004541
Glauber Costa7d557b32013-02-22 20:20:00 +04004542 /*
4543 * This runs very early, and only the boot processor is supposed to be
4544 * up. Even if it weren't true, IRQs are not up so we couldn't fire
4545 * IPIs around.
4546 */
4547 __flush_cpu_slab(s, smp_processor_id());
Christoph Lameterfa45dc22014-08-06 16:04:09 -07004548 for_each_kmem_cache_node(s, node, n) {
Christoph Lameter51df1142010-08-20 12:37:15 -05004549 struct page *p;
4550
Tobin C. Harding916ac052019-05-13 17:16:12 -07004551 list_for_each_entry(p, &n->partial, slab_list)
Christoph Lameterfa45dc22014-08-06 16:04:09 -07004552 p->slab_cache = s;
Christoph Lameter51df1142010-08-20 12:37:15 -05004553
Li Zefan607bf322011-04-12 15:22:26 +08004554#ifdef CONFIG_SLUB_DEBUG
Tobin C. Harding916ac052019-05-13 17:16:12 -07004555 list_for_each_entry(p, &n->full, slab_list)
Christoph Lameterfa45dc22014-08-06 16:04:09 -07004556 p->slab_cache = s;
Christoph Lameter51df1142010-08-20 12:37:15 -05004557#endif
Christoph Lameter51df1142010-08-20 12:37:15 -05004558 }
Christoph Lameterdffb4d62012-11-28 16:23:07 +00004559 list_add(&s->list, &slab_caches);
4560 return s;
Christoph Lameter51df1142010-08-20 12:37:15 -05004561}
4562
Christoph Lameter81819f02007-05-06 14:49:36 -07004563void __init kmem_cache_init(void)
4564{
Christoph Lameterdffb4d62012-11-28 16:23:07 +00004565 static __initdata struct kmem_cache boot_kmem_cache,
4566 boot_kmem_cache_node;
Vlastimil Babka7e1fa932021-02-24 12:01:12 -08004567 int node;
Christoph Lameter51df1142010-08-20 12:37:15 -05004568
Stanislaw Gruszkafc8d8622012-01-10 15:07:32 -08004569 if (debug_guardpage_minorder())
4570 slub_max_order = 0;
4571
Stephen Boyd79270292021-06-28 19:34:52 -07004572 /* Print slub debugging pointers without hashing */
4573 if (__slub_debug_enabled())
4574 no_hash_pointers_enable(NULL);
4575
Christoph Lameterdffb4d62012-11-28 16:23:07 +00004576 kmem_cache_node = &boot_kmem_cache_node;
4577 kmem_cache = &boot_kmem_cache;
Christoph Lameter51df1142010-08-20 12:37:15 -05004578
Vlastimil Babka7e1fa932021-02-24 12:01:12 -08004579 /*
4580 * Initialize the nodemask for which we will allocate per node
4581 * structures. Here we don't need taking slab_mutex yet.
4582 */
4583 for_each_node_state(node, N_NORMAL_MEMORY)
4584 node_set(node, slab_nodes);
4585
Christoph Lameterdffb4d62012-11-28 16:23:07 +00004586 create_boot_cache(kmem_cache_node, "kmem_cache_node",
David Windsor8eb82842017-06-10 22:50:28 -04004587 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
Yasunori Gotob9049e22007-10-21 16:41:37 -07004588
Andrew Morton3ac38fa2013-04-29 15:08:06 -07004589 register_hotmemory_notifier(&slab_memory_callback_nb);
Christoph Lameter81819f02007-05-06 14:49:36 -07004590
4591 /* Able to allocate the per node structures */
4592 slab_state = PARTIAL;
4593
Christoph Lameterdffb4d62012-11-28 16:23:07 +00004594 create_boot_cache(kmem_cache, "kmem_cache",
4595 offsetof(struct kmem_cache, node) +
4596 nr_node_ids * sizeof(struct kmem_cache_node *),
David Windsor8eb82842017-06-10 22:50:28 -04004597 SLAB_HWCACHE_ALIGN, 0, 0);
Christoph Lameter8a13a4c2012-09-04 23:18:33 +00004598
Christoph Lameterdffb4d62012-11-28 16:23:07 +00004599 kmem_cache = bootstrap(&boot_kmem_cache);
Christoph Lameterdffb4d62012-11-28 16:23:07 +00004600 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
Christoph Lameter51df1142010-08-20 12:37:15 -05004601
4602 /* Now we can use the kmem_cache to allocate kmalloc slabs */
Daniel Sanders34cc6992015-06-24 16:55:57 -07004603 setup_kmalloc_cache_index_table();
Christoph Lameterf97d5f62013-01-10 19:12:17 +00004604 create_kmalloc_caches(0);
Christoph Lameter81819f02007-05-06 14:49:36 -07004605
Thomas Garnier210e7a42016-07-26 15:21:59 -07004606 /* Setup random freelists for each cache */
4607 init_freelist_randomization();
4608
Sebastian Andrzej Siewiora96a87b2016-08-18 14:57:19 +02004609 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4610 slub_cpu_dead);
Christoph Lameter81819f02007-05-06 14:49:36 -07004611
Alexey Dobriyanb9726c22019-03-05 15:48:26 -08004612 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
Christoph Lameterf97d5f62013-01-10 19:12:17 +00004613 cache_line_size(),
Christoph Lameter81819f02007-05-06 14:49:36 -07004614 slub_min_order, slub_max_order, slub_min_objects,
4615 nr_cpu_ids, nr_node_ids);
4616}
4617
Pekka Enberg7e85ee02009-06-12 14:03:06 +03004618void __init kmem_cache_init_late(void)
4619{
Pekka Enberg7e85ee02009-06-12 14:03:06 +03004620}
4621
Glauber Costa2633d7a2012-12-18 14:22:34 -08004622struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -07004623__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -08004624 slab_flags_t flags, void (*ctor)(void *))
Christoph Lameter81819f02007-05-06 14:49:36 -07004625{
Roman Gushchin10befea2020-08-06 23:21:27 -07004626 struct kmem_cache *s;
Christoph Lameter81819f02007-05-06 14:49:36 -07004627
Vladimir Davydova44cb9442014-04-07 15:39:23 -07004628 s = find_mergeable(size, align, flags, name, ctor);
Christoph Lameter81819f02007-05-06 14:49:36 -07004629 if (s) {
4630 s->refcount++;
Vladimir Davydov84d0ddd2014-04-07 15:39:29 -07004631
Christoph Lameter81819f02007-05-06 14:49:36 -07004632 /*
4633 * Adjust the object sizes so that we clear
4634 * the complete object on kzalloc.
4635 */
Alexey Dobriyan1b473f22018-04-05 16:21:17 -07004636 s->object_size = max(s->object_size, size);
Alexey Dobriyan52ee6d72018-04-05 16:21:06 -07004637 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
Christoph Lameter6446faa2008-02-15 23:45:26 -08004638
David Rientjes7b8f3b62008-12-17 22:09:46 -08004639 if (sysfs_slab_alias(s, name)) {
David Rientjes7b8f3b62008-12-17 22:09:46 -08004640 s->refcount--;
Christoph Lametercbb79692012-09-05 00:18:32 +00004641 s = NULL;
David Rientjes7b8f3b62008-12-17 22:09:46 -08004642 }
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07004643 }
Christoph Lameter6446faa2008-02-15 23:45:26 -08004644
Christoph Lametercbb79692012-09-05 00:18:32 +00004645 return s;
4646}
Pekka Enberg84c1cf62010-09-14 23:21:12 +03004647
Alexey Dobriyand50112e2017-11-15 17:32:18 -08004648int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
Christoph Lametercbb79692012-09-05 00:18:32 +00004649{
Pekka Enbergaac3a162012-09-05 12:07:44 +03004650 int err;
Christoph Lameter20cea962012-07-06 15:25:13 -05004651
Pekka Enbergaac3a162012-09-05 12:07:44 +03004652 err = kmem_cache_open(s, flags);
4653 if (err)
4654 return err;
Christoph Lameter20cea962012-07-06 15:25:13 -05004655
Christoph Lameter45530c42012-11-28 16:23:07 +00004656 /* Mutex is not taken during early boot */
4657 if (slab_state <= UP)
4658 return 0;
4659
Pekka Enbergaac3a162012-09-05 12:07:44 +03004660 err = sysfs_slab_add(s);
Pekka Enbergaac3a162012-09-05 12:07:44 +03004661 if (err)
Dmitry Safonov52b4b952016-02-17 13:11:37 -08004662 __kmem_cache_release(s);
Pekka Enbergaac3a162012-09-05 12:07:44 +03004663
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -07004664 if (s->flags & SLAB_STORE_USER)
4665 debugfs_slab_add(s);
4666
Pekka Enbergaac3a162012-09-05 12:07:44 +03004667 return err;
Christoph Lameter81819f02007-05-06 14:49:36 -07004668}
Christoph Lameter81819f02007-05-06 14:49:36 -07004669
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03004670void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
Christoph Lameter81819f02007-05-06 14:49:36 -07004671{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004672 struct kmem_cache *s;
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004673 void *ret;
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004674
Christoph Lameter95a05b42013-01-10 19:14:19 +00004675 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
Pekka Enbergeada35e2008-02-11 22:47:46 +02004676 return kmalloc_large(size, gfpflags);
4677
Christoph Lameter2c59dd62013-01-10 19:14:19 +00004678 s = kmalloc_slab(size, gfpflags);
Christoph Lameter81819f02007-05-06 14:49:36 -07004679
Satyam Sharma2408c552007-10-16 01:24:44 -07004680 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07004681 return s;
Christoph Lameter81819f02007-05-06 14:49:36 -07004682
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08004683 ret = slab_alloc(s, gfpflags, caller, size);
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004684
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004685 /* Honor the call site pointer we received. */
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02004686 trace_kmalloc(caller, ret, size, s->size, gfpflags);
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004687
4688 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07004689}
Daniel Vetterfd7cb572020-03-23 15:49:00 +01004690EXPORT_SYMBOL(__kmalloc_track_caller);
Christoph Lameter81819f02007-05-06 14:49:36 -07004691
Namhyung Kim5d1f57e2010-09-29 21:02:15 +09004692#ifdef CONFIG_NUMA
Christoph Lameter81819f02007-05-06 14:49:36 -07004693void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03004694 int node, unsigned long caller)
Christoph Lameter81819f02007-05-06 14:49:36 -07004695{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004696 struct kmem_cache *s;
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004697 void *ret;
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004698
Christoph Lameter95a05b42013-01-10 19:14:19 +00004699 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
Xiaotian Fengd3e14aa2010-04-08 17:26:44 +08004700 ret = kmalloc_large_node(size, gfpflags, node);
4701
4702 trace_kmalloc_node(caller, ret,
4703 size, PAGE_SIZE << get_order(size),
4704 gfpflags, node);
4705
4706 return ret;
4707 }
Pekka Enbergeada35e2008-02-11 22:47:46 +02004708
Christoph Lameter2c59dd62013-01-10 19:14:19 +00004709 s = kmalloc_slab(size, gfpflags);
Christoph Lameter81819f02007-05-06 14:49:36 -07004710
Satyam Sharma2408c552007-10-16 01:24:44 -07004711 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07004712 return s;
Christoph Lameter81819f02007-05-06 14:49:36 -07004713
Alexander Potapenkob89fb5e2021-02-25 17:19:16 -08004714 ret = slab_alloc_node(s, gfpflags, node, caller, size);
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004715
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004716 /* Honor the call site pointer we received. */
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02004717 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004718
4719 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07004720}
Daniel Vetterfd7cb572020-03-23 15:49:00 +01004721EXPORT_SYMBOL(__kmalloc_node_track_caller);
Namhyung Kim5d1f57e2010-09-29 21:02:15 +09004722#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07004723
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004724#ifdef CONFIG_SYSFS
Christoph Lameter205ab992008-04-14 19:11:40 +03004725static int count_inuse(struct page *page)
4726{
4727 return page->inuse;
4728}
4729
4730static int count_total(struct page *page)
4731{
4732 return page->objects;
4733}
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004734#endif
Christoph Lameter205ab992008-04-14 19:11:40 +03004735
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004736#ifdef CONFIG_SLUB_DEBUG
Vlastimil Babka0a19e7d2021-05-23 01:37:07 +02004737static void validate_slab(struct kmem_cache *s, struct page *page,
4738 unsigned long *obj_map)
Christoph Lameter53e15af2007-05-06 14:49:43 -07004739{
4740 void *p;
Christoph Lametera973e9d2008-03-01 13:40:44 -08004741 void *addr = page_address(page);
Yu Zhao90e9f6a2020-01-30 22:11:57 -08004742
4743 slab_lock(page);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004744
Yu Zhaodd98afd2019-11-30 17:49:37 -08004745 if (!check_slab(s, page) || !on_freelist(s, page, NULL))
Yu Zhao90e9f6a2020-01-30 22:11:57 -08004746 goto unlock;
Christoph Lameter53e15af2007-05-06 14:49:43 -07004747
4748 /* Now we know that a valid freelist exists */
Vlastimil Babka0a19e7d2021-05-23 01:37:07 +02004749 __fill_map(obj_map, s, page);
Christoph Lameter5f80b132011-04-15 14:48:13 -05004750 for_each_object(p, s, addr, page->objects) {
Vlastimil Babka0a19e7d2021-05-23 01:37:07 +02004751 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
Yu Zhaodd98afd2019-11-30 17:49:37 -08004752 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
Christoph Lameter53e15af2007-05-06 14:49:43 -07004753
Yu Zhaodd98afd2019-11-30 17:49:37 -08004754 if (!check_object(s, page, p, val))
4755 break;
4756 }
Yu Zhao90e9f6a2020-01-30 22:11:57 -08004757unlock:
Christoph Lameter881db7f2011-06-01 12:25:53 -05004758 slab_unlock(page);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004759}
4760
Christoph Lameter434e2452007-07-17 04:03:30 -07004761static int validate_slab_node(struct kmem_cache *s,
Vlastimil Babka0a19e7d2021-05-23 01:37:07 +02004762 struct kmem_cache_node *n, unsigned long *obj_map)
Christoph Lameter53e15af2007-05-06 14:49:43 -07004763{
4764 unsigned long count = 0;
4765 struct page *page;
4766 unsigned long flags;
4767
4768 spin_lock_irqsave(&n->list_lock, flags);
4769
Tobin C. Harding916ac052019-05-13 17:16:12 -07004770 list_for_each_entry(page, &n->partial, slab_list) {
Vlastimil Babka0a19e7d2021-05-23 01:37:07 +02004771 validate_slab(s, page, obj_map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004772 count++;
4773 }
Oliver Glitta1f9f78b2021-06-28 19:34:33 -07004774 if (count != n->nr_partial) {
Fabian Frederickf9f58282014-06-04 16:06:34 -07004775 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4776 s->name, count, n->nr_partial);
Oliver Glitta1f9f78b2021-06-28 19:34:33 -07004777 slab_add_kunit_errors();
4778 }
Christoph Lameter53e15af2007-05-06 14:49:43 -07004779
4780 if (!(s->flags & SLAB_STORE_USER))
4781 goto out;
4782
Tobin C. Harding916ac052019-05-13 17:16:12 -07004783 list_for_each_entry(page, &n->full, slab_list) {
Vlastimil Babka0a19e7d2021-05-23 01:37:07 +02004784 validate_slab(s, page, obj_map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004785 count++;
4786 }
Oliver Glitta1f9f78b2021-06-28 19:34:33 -07004787 if (count != atomic_long_read(&n->nr_slabs)) {
Fabian Frederickf9f58282014-06-04 16:06:34 -07004788 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4789 s->name, count, atomic_long_read(&n->nr_slabs));
Oliver Glitta1f9f78b2021-06-28 19:34:33 -07004790 slab_add_kunit_errors();
4791 }
Christoph Lameter53e15af2007-05-06 14:49:43 -07004792
4793out:
4794 spin_unlock_irqrestore(&n->list_lock, flags);
4795 return count;
4796}
4797
Oliver Glitta1f9f78b2021-06-28 19:34:33 -07004798long validate_slab_cache(struct kmem_cache *s)
Christoph Lameter53e15af2007-05-06 14:49:43 -07004799{
4800 int node;
4801 unsigned long count = 0;
Christoph Lameterfa45dc22014-08-06 16:04:09 -07004802 struct kmem_cache_node *n;
Vlastimil Babka0a19e7d2021-05-23 01:37:07 +02004803 unsigned long *obj_map;
4804
4805 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
4806 if (!obj_map)
4807 return -ENOMEM;
Christoph Lameter53e15af2007-05-06 14:49:43 -07004808
4809 flush_all(s);
Christoph Lameterfa45dc22014-08-06 16:04:09 -07004810 for_each_kmem_cache_node(s, node, n)
Vlastimil Babka0a19e7d2021-05-23 01:37:07 +02004811 count += validate_slab_node(s, n, obj_map);
4812
4813 bitmap_free(obj_map);
Yu Zhao90e9f6a2020-01-30 22:11:57 -08004814
Christoph Lameter53e15af2007-05-06 14:49:43 -07004815 return count;
4816}
Oliver Glitta1f9f78b2021-06-28 19:34:33 -07004817EXPORT_SYMBOL(validate_slab_cache);
4818
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -07004819#ifdef CONFIG_DEBUG_FS
Christoph Lameter88a420e2007-05-06 14:49:45 -07004820/*
Christoph Lameter672bba32007-05-09 02:32:39 -07004821 * Generate lists of code addresses where slabcache objects are allocated
Christoph Lameter88a420e2007-05-06 14:49:45 -07004822 * and freed.
4823 */
4824
4825struct location {
4826 unsigned long count;
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03004827 unsigned long addr;
Christoph Lameter45edfa52007-05-09 02:32:45 -07004828 long long sum_time;
4829 long min_time;
4830 long max_time;
4831 long min_pid;
4832 long max_pid;
Rusty Russell174596a2009-01-01 10:12:29 +10304833 DECLARE_BITMAP(cpus, NR_CPUS);
Christoph Lameter45edfa52007-05-09 02:32:45 -07004834 nodemask_t nodes;
Christoph Lameter88a420e2007-05-06 14:49:45 -07004835};
4836
4837struct loc_track {
4838 unsigned long max;
4839 unsigned long count;
4840 struct location *loc;
4841};
4842
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -07004843static struct dentry *slab_debugfs_root;
4844
Christoph Lameter88a420e2007-05-06 14:49:45 -07004845static void free_loc_track(struct loc_track *t)
4846{
4847 if (t->max)
4848 free_pages((unsigned long)t->loc,
4849 get_order(sizeof(struct location) * t->max));
4850}
4851
Christoph Lameter68dff6a2007-07-17 04:03:20 -07004852static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
Christoph Lameter88a420e2007-05-06 14:49:45 -07004853{
4854 struct location *l;
4855 int order;
4856
Christoph Lameter88a420e2007-05-06 14:49:45 -07004857 order = get_order(sizeof(struct location) * max);
4858
Christoph Lameter68dff6a2007-07-17 04:03:20 -07004859 l = (void *)__get_free_pages(flags, order);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004860 if (!l)
4861 return 0;
4862
4863 if (t->count) {
4864 memcpy(l, t->loc, sizeof(struct location) * t->count);
4865 free_loc_track(t);
4866 }
4867 t->max = max;
4868 t->loc = l;
4869 return 1;
4870}
4871
4872static int add_location(struct loc_track *t, struct kmem_cache *s,
Christoph Lameter45edfa52007-05-09 02:32:45 -07004873 const struct track *track)
Christoph Lameter88a420e2007-05-06 14:49:45 -07004874{
4875 long start, end, pos;
4876 struct location *l;
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03004877 unsigned long caddr;
Christoph Lameter45edfa52007-05-09 02:32:45 -07004878 unsigned long age = jiffies - track->when;
Christoph Lameter88a420e2007-05-06 14:49:45 -07004879
4880 start = -1;
4881 end = t->count;
4882
4883 for ( ; ; ) {
4884 pos = start + (end - start + 1) / 2;
4885
4886 /*
4887 * There is nothing at "end". If we end up there
4888 * we need to add something to before end.
4889 */
4890 if (pos == end)
4891 break;
4892
4893 caddr = t->loc[pos].addr;
Christoph Lameter45edfa52007-05-09 02:32:45 -07004894 if (track->addr == caddr) {
4895
4896 l = &t->loc[pos];
4897 l->count++;
4898 if (track->when) {
4899 l->sum_time += age;
4900 if (age < l->min_time)
4901 l->min_time = age;
4902 if (age > l->max_time)
4903 l->max_time = age;
4904
4905 if (track->pid < l->min_pid)
4906 l->min_pid = track->pid;
4907 if (track->pid > l->max_pid)
4908 l->max_pid = track->pid;
4909
Rusty Russell174596a2009-01-01 10:12:29 +10304910 cpumask_set_cpu(track->cpu,
4911 to_cpumask(l->cpus));
Christoph Lameter45edfa52007-05-09 02:32:45 -07004912 }
4913 node_set(page_to_nid(virt_to_page(track)), l->nodes);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004914 return 1;
4915 }
4916
Christoph Lameter45edfa52007-05-09 02:32:45 -07004917 if (track->addr < caddr)
Christoph Lameter88a420e2007-05-06 14:49:45 -07004918 end = pos;
4919 else
4920 start = pos;
4921 }
4922
4923 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07004924 * Not found. Insert new tracking element.
Christoph Lameter88a420e2007-05-06 14:49:45 -07004925 */
Christoph Lameter68dff6a2007-07-17 04:03:20 -07004926 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
Christoph Lameter88a420e2007-05-06 14:49:45 -07004927 return 0;
4928
4929 l = t->loc + pos;
4930 if (pos < t->count)
4931 memmove(l + 1, l,
4932 (t->count - pos) * sizeof(struct location));
4933 t->count++;
4934 l->count = 1;
Christoph Lameter45edfa52007-05-09 02:32:45 -07004935 l->addr = track->addr;
4936 l->sum_time = age;
4937 l->min_time = age;
4938 l->max_time = age;
4939 l->min_pid = track->pid;
4940 l->max_pid = track->pid;
Rusty Russell174596a2009-01-01 10:12:29 +10304941 cpumask_clear(to_cpumask(l->cpus));
4942 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
Christoph Lameter45edfa52007-05-09 02:32:45 -07004943 nodes_clear(l->nodes);
4944 node_set(page_to_nid(virt_to_page(track)), l->nodes);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004945 return 1;
4946}
4947
4948static void process_slab(struct loc_track *t, struct kmem_cache *s,
Vlastimil Babkab3fd64e2021-05-23 01:28:37 +02004949 struct page *page, enum track_item alloc,
4950 unsigned long *obj_map)
Christoph Lameter88a420e2007-05-06 14:49:45 -07004951{
Christoph Lametera973e9d2008-03-01 13:40:44 -08004952 void *addr = page_address(page);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004953 void *p;
4954
Vlastimil Babkab3fd64e2021-05-23 01:28:37 +02004955 __fill_map(obj_map, s, page);
4956
Christoph Lameter224a88b2008-04-14 19:11:31 +03004957 for_each_object(p, s, addr, page->objects)
Vlastimil Babkab3fd64e2021-05-23 01:28:37 +02004958 if (!test_bit(__obj_to_index(s, addr, p), obj_map))
Christoph Lameter45edfa52007-05-09 02:32:45 -07004959 add_location(t, s, get_track(s, p, alloc));
Christoph Lameter88a420e2007-05-06 14:49:45 -07004960}
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -07004961#endif /* CONFIG_DEBUG_FS */
Tobin C. Harding6dfd1b62019-05-13 17:16:09 -07004962#endif /* CONFIG_SLUB_DEBUG */
Christoph Lameter88a420e2007-05-06 14:49:45 -07004963
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004964#ifdef CONFIG_SYSFS
Christoph Lameter81819f02007-05-06 14:49:36 -07004965enum slab_stat_type {
Christoph Lameter205ab992008-04-14 19:11:40 +03004966 SL_ALL, /* All slabs */
4967 SL_PARTIAL, /* Only partially allocated slabs */
4968 SL_CPU, /* Only slabs used for cpu caches */
4969 SL_OBJECTS, /* Determine allocated objects not slabs */
4970 SL_TOTAL /* Determine object capacity not slabs */
Christoph Lameter81819f02007-05-06 14:49:36 -07004971};
4972
Christoph Lameter205ab992008-04-14 19:11:40 +03004973#define SO_ALL (1 << SL_ALL)
Christoph Lameter81819f02007-05-06 14:49:36 -07004974#define SO_PARTIAL (1 << SL_PARTIAL)
4975#define SO_CPU (1 << SL_CPU)
4976#define SO_OBJECTS (1 << SL_OBJECTS)
Christoph Lameter205ab992008-04-14 19:11:40 +03004977#define SO_TOTAL (1 << SL_TOTAL)
Christoph Lameter81819f02007-05-06 14:49:36 -07004978
Cyrill Gorcunov62e5c4b2008-03-02 23:28:24 +03004979static ssize_t show_slab_objects(struct kmem_cache *s,
Joe Perchesbf16d192020-12-14 19:14:57 -08004980 char *buf, unsigned long flags)
Christoph Lameter81819f02007-05-06 14:49:36 -07004981{
4982 unsigned long total = 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07004983 int node;
4984 int x;
4985 unsigned long *nodes;
Joe Perchesbf16d192020-12-14 19:14:57 -08004986 int len = 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07004987
Kees Cook6396bb22018-06-12 14:03:40 -07004988 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
Cyrill Gorcunov62e5c4b2008-03-02 23:28:24 +03004989 if (!nodes)
4990 return -ENOMEM;
Christoph Lameter81819f02007-05-06 14:49:36 -07004991
Christoph Lameter205ab992008-04-14 19:11:40 +03004992 if (flags & SO_CPU) {
4993 int cpu;
Christoph Lameter81819f02007-05-06 14:49:36 -07004994
Christoph Lameter205ab992008-04-14 19:11:40 +03004995 for_each_possible_cpu(cpu) {
Chen Gangd0e0ac92013-07-15 09:05:29 +08004996 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4997 cpu);
Christoph Lameterec3ab082012-05-09 10:09:56 -05004998 int node;
Christoph Lameter49e22582011-08-09 16:12:27 -05004999 struct page *page;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07005000
Jason Low4db0c3c2015-04-15 16:14:08 -07005001 page = READ_ONCE(c->page);
Christoph Lameterec3ab082012-05-09 10:09:56 -05005002 if (!page)
5003 continue;
Christoph Lameter205ab992008-04-14 19:11:40 +03005004
Christoph Lameterec3ab082012-05-09 10:09:56 -05005005 node = page_to_nid(page);
5006 if (flags & SO_TOTAL)
5007 x = page->objects;
5008 else if (flags & SO_OBJECTS)
5009 x = page->inuse;
5010 else
5011 x = 1;
Christoph Lameter49e22582011-08-09 16:12:27 -05005012
Christoph Lameterec3ab082012-05-09 10:09:56 -05005013 total += x;
5014 nodes[node] += x;
5015
Wei Yanga93cf072017-07-06 15:36:31 -07005016 page = slub_percpu_partial_read_once(c);
Christoph Lameter49e22582011-08-09 16:12:27 -05005017 if (page) {
Li Zefan8afb1472013-09-10 11:43:37 +08005018 node = page_to_nid(page);
5019 if (flags & SO_TOTAL)
5020 WARN_ON_ONCE(1);
5021 else if (flags & SO_OBJECTS)
5022 WARN_ON_ONCE(1);
5023 else
5024 x = page->pages;
Eric Dumazetbc6697d2011-11-22 16:02:02 +01005025 total += x;
5026 nodes[node] += x;
Christoph Lameter49e22582011-08-09 16:12:27 -05005027 }
Christoph Lameter81819f02007-05-06 14:49:36 -07005028 }
5029 }
5030
Qian Caie4f8e512019-10-14 14:11:51 -07005031 /*
5032 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
5033 * already held which will conflict with an existing lock order:
5034 *
5035 * mem_hotplug_lock->slab_mutex->kernfs_mutex
5036 *
5037 * We don't really need mem_hotplug_lock (to hold off
5038 * slab_mem_going_offline_callback) here because slab's memory hot
5039 * unplug code doesn't destroy the kmem_cache->node[] data.
5040 */
5041
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05005042#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter205ab992008-04-14 19:11:40 +03005043 if (flags & SO_ALL) {
Christoph Lameterfa45dc22014-08-06 16:04:09 -07005044 struct kmem_cache_node *n;
5045
5046 for_each_kmem_cache_node(s, node, n) {
Christoph Lameter81819f02007-05-06 14:49:36 -07005047
Chen Gangd0e0ac92013-07-15 09:05:29 +08005048 if (flags & SO_TOTAL)
5049 x = atomic_long_read(&n->total_objects);
5050 else if (flags & SO_OBJECTS)
5051 x = atomic_long_read(&n->total_objects) -
5052 count_partial(n, count_free);
Christoph Lameter205ab992008-04-14 19:11:40 +03005053 else
5054 x = atomic_long_read(&n->nr_slabs);
5055 total += x;
5056 nodes[node] += x;
5057 }
5058
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05005059 } else
5060#endif
5061 if (flags & SO_PARTIAL) {
Christoph Lameterfa45dc22014-08-06 16:04:09 -07005062 struct kmem_cache_node *n;
Christoph Lameter205ab992008-04-14 19:11:40 +03005063
Christoph Lameterfa45dc22014-08-06 16:04:09 -07005064 for_each_kmem_cache_node(s, node, n) {
Christoph Lameter205ab992008-04-14 19:11:40 +03005065 if (flags & SO_TOTAL)
5066 x = count_partial(n, count_total);
5067 else if (flags & SO_OBJECTS)
5068 x = count_partial(n, count_inuse);
Christoph Lameter81819f02007-05-06 14:49:36 -07005069 else
5070 x = n->nr_partial;
5071 total += x;
5072 nodes[node] += x;
5073 }
Christoph Lameter81819f02007-05-06 14:49:36 -07005074 }
Joe Perchesbf16d192020-12-14 19:14:57 -08005075
5076 len += sysfs_emit_at(buf, len, "%lu", total);
Christoph Lameter81819f02007-05-06 14:49:36 -07005077#ifdef CONFIG_NUMA
Joe Perchesbf16d192020-12-14 19:14:57 -08005078 for (node = 0; node < nr_node_ids; node++) {
Christoph Lameter81819f02007-05-06 14:49:36 -07005079 if (nodes[node])
Joe Perchesbf16d192020-12-14 19:14:57 -08005080 len += sysfs_emit_at(buf, len, " N%d=%lu",
5081 node, nodes[node]);
5082 }
Christoph Lameter81819f02007-05-06 14:49:36 -07005083#endif
Joe Perchesbf16d192020-12-14 19:14:57 -08005084 len += sysfs_emit_at(buf, len, "\n");
Christoph Lameter81819f02007-05-06 14:49:36 -07005085 kfree(nodes);
Joe Perchesbf16d192020-12-14 19:14:57 -08005086
5087 return len;
Christoph Lameter81819f02007-05-06 14:49:36 -07005088}
5089
Christoph Lameter81819f02007-05-06 14:49:36 -07005090#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
Phil Carmody497888c2011-07-14 15:07:13 +03005091#define to_slab(n) container_of(n, struct kmem_cache, kobj)
Christoph Lameter81819f02007-05-06 14:49:36 -07005092
5093struct slab_attribute {
5094 struct attribute attr;
5095 ssize_t (*show)(struct kmem_cache *s, char *buf);
5096 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
5097};
5098
5099#define SLAB_ATTR_RO(_name) \
Vasiliy Kulikovab067e92011-09-27 21:54:53 +04005100 static struct slab_attribute _name##_attr = \
5101 __ATTR(_name, 0400, _name##_show, NULL)
Christoph Lameter81819f02007-05-06 14:49:36 -07005102
5103#define SLAB_ATTR(_name) \
5104 static struct slab_attribute _name##_attr = \
Vasiliy Kulikovab067e92011-09-27 21:54:53 +04005105 __ATTR(_name, 0600, _name##_show, _name##_store)
Christoph Lameter81819f02007-05-06 14:49:36 -07005106
Christoph Lameter81819f02007-05-06 14:49:36 -07005107static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
5108{
Joe Perchesbf16d192020-12-14 19:14:57 -08005109 return sysfs_emit(buf, "%u\n", s->size);
Christoph Lameter81819f02007-05-06 14:49:36 -07005110}
5111SLAB_ATTR_RO(slab_size);
5112
5113static ssize_t align_show(struct kmem_cache *s, char *buf)
5114{
Joe Perchesbf16d192020-12-14 19:14:57 -08005115 return sysfs_emit(buf, "%u\n", s->align);
Christoph Lameter81819f02007-05-06 14:49:36 -07005116}
5117SLAB_ATTR_RO(align);
5118
5119static ssize_t object_size_show(struct kmem_cache *s, char *buf)
5120{
Joe Perchesbf16d192020-12-14 19:14:57 -08005121 return sysfs_emit(buf, "%u\n", s->object_size);
Christoph Lameter81819f02007-05-06 14:49:36 -07005122}
5123SLAB_ATTR_RO(object_size);
5124
5125static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
5126{
Joe Perchesbf16d192020-12-14 19:14:57 -08005127 return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
Christoph Lameter81819f02007-05-06 14:49:36 -07005128}
5129SLAB_ATTR_RO(objs_per_slab);
5130
5131static ssize_t order_show(struct kmem_cache *s, char *buf)
5132{
Joe Perchesbf16d192020-12-14 19:14:57 -08005133 return sysfs_emit(buf, "%u\n", oo_order(s->oo));
Christoph Lameter81819f02007-05-06 14:49:36 -07005134}
Vlastimil Babka32a6f402020-08-06 23:18:41 -07005135SLAB_ATTR_RO(order);
Christoph Lameter81819f02007-05-06 14:49:36 -07005136
David Rientjes73d342b2009-02-22 17:40:09 -08005137static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
5138{
Joe Perchesbf16d192020-12-14 19:14:57 -08005139 return sysfs_emit(buf, "%lu\n", s->min_partial);
David Rientjes73d342b2009-02-22 17:40:09 -08005140}
5141
5142static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
5143 size_t length)
5144{
5145 unsigned long min;
5146 int err;
5147
Jingoo Han3dbb95f2013-09-11 14:20:25 -07005148 err = kstrtoul(buf, 10, &min);
David Rientjes73d342b2009-02-22 17:40:09 -08005149 if (err)
5150 return err;
5151
David Rientjesc0bdb232009-02-25 09:16:35 +02005152 set_min_partial(s, min);
David Rientjes73d342b2009-02-22 17:40:09 -08005153 return length;
5154}
5155SLAB_ATTR(min_partial);
5156
Christoph Lameter49e22582011-08-09 16:12:27 -05005157static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
5158{
Joe Perchesbf16d192020-12-14 19:14:57 -08005159 return sysfs_emit(buf, "%u\n", slub_cpu_partial(s));
Christoph Lameter49e22582011-08-09 16:12:27 -05005160}
5161
5162static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
5163 size_t length)
5164{
Alexey Dobriyane5d99982018-04-05 16:21:10 -07005165 unsigned int objects;
Christoph Lameter49e22582011-08-09 16:12:27 -05005166 int err;
5167
Alexey Dobriyane5d99982018-04-05 16:21:10 -07005168 err = kstrtouint(buf, 10, &objects);
Christoph Lameter49e22582011-08-09 16:12:27 -05005169 if (err)
5170 return err;
Joonsoo Kim345c9052013-06-19 14:05:52 +09005171 if (objects && !kmem_cache_has_cpu_partial(s))
David Rientjes74ee4ef2012-01-09 13:19:45 -08005172 return -EINVAL;
Christoph Lameter49e22582011-08-09 16:12:27 -05005173
Wei Yange6d0e1d2017-07-06 15:36:34 -07005174 slub_set_cpu_partial(s, objects);
Christoph Lameter49e22582011-08-09 16:12:27 -05005175 flush_all(s);
5176 return length;
5177}
5178SLAB_ATTR(cpu_partial);
5179
Christoph Lameter81819f02007-05-06 14:49:36 -07005180static ssize_t ctor_show(struct kmem_cache *s, char *buf)
5181{
Joe Perches62c70bc2011-01-13 15:45:52 -08005182 if (!s->ctor)
5183 return 0;
Joe Perchesbf16d192020-12-14 19:14:57 -08005184 return sysfs_emit(buf, "%pS\n", s->ctor);
Christoph Lameter81819f02007-05-06 14:49:36 -07005185}
5186SLAB_ATTR_RO(ctor);
5187
Christoph Lameter81819f02007-05-06 14:49:36 -07005188static ssize_t aliases_show(struct kmem_cache *s, char *buf)
5189{
Joe Perchesbf16d192020-12-14 19:14:57 -08005190 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
Christoph Lameter81819f02007-05-06 14:49:36 -07005191}
5192SLAB_ATTR_RO(aliases);
5193
Christoph Lameter81819f02007-05-06 14:49:36 -07005194static ssize_t partial_show(struct kmem_cache *s, char *buf)
5195{
Christoph Lameterd9acf4b2008-02-15 15:22:21 -08005196 return show_slab_objects(s, buf, SO_PARTIAL);
Christoph Lameter81819f02007-05-06 14:49:36 -07005197}
5198SLAB_ATTR_RO(partial);
5199
5200static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
5201{
Christoph Lameterd9acf4b2008-02-15 15:22:21 -08005202 return show_slab_objects(s, buf, SO_CPU);
Christoph Lameter81819f02007-05-06 14:49:36 -07005203}
5204SLAB_ATTR_RO(cpu_slabs);
5205
5206static ssize_t objects_show(struct kmem_cache *s, char *buf)
5207{
Christoph Lameter205ab992008-04-14 19:11:40 +03005208 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
Christoph Lameter81819f02007-05-06 14:49:36 -07005209}
5210SLAB_ATTR_RO(objects);
5211
Christoph Lameter205ab992008-04-14 19:11:40 +03005212static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
5213{
5214 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
5215}
5216SLAB_ATTR_RO(objects_partial);
5217
Christoph Lameter49e22582011-08-09 16:12:27 -05005218static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
5219{
5220 int objects = 0;
5221 int pages = 0;
5222 int cpu;
Joe Perchesbf16d192020-12-14 19:14:57 -08005223 int len = 0;
Christoph Lameter49e22582011-08-09 16:12:27 -05005224
5225 for_each_online_cpu(cpu) {
Wei Yanga93cf072017-07-06 15:36:31 -07005226 struct page *page;
5227
5228 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
Christoph Lameter49e22582011-08-09 16:12:27 -05005229
5230 if (page) {
5231 pages += page->pages;
5232 objects += page->pobjects;
5233 }
5234 }
5235
Joe Perchesbf16d192020-12-14 19:14:57 -08005236 len += sysfs_emit_at(buf, len, "%d(%d)", objects, pages);
Christoph Lameter49e22582011-08-09 16:12:27 -05005237
5238#ifdef CONFIG_SMP
5239 for_each_online_cpu(cpu) {
Wei Yanga93cf072017-07-06 15:36:31 -07005240 struct page *page;
5241
5242 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
Joe Perchesbf16d192020-12-14 19:14:57 -08005243 if (page)
5244 len += sysfs_emit_at(buf, len, " C%d=%d(%d)",
5245 cpu, page->pobjects, page->pages);
Christoph Lameter49e22582011-08-09 16:12:27 -05005246 }
5247#endif
Joe Perchesbf16d192020-12-14 19:14:57 -08005248 len += sysfs_emit_at(buf, len, "\n");
5249
5250 return len;
Christoph Lameter49e22582011-08-09 16:12:27 -05005251}
5252SLAB_ATTR_RO(slabs_cpu_partial);
5253
Christoph Lameter81819f02007-05-06 14:49:36 -07005254static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
5255{
Joe Perchesbf16d192020-12-14 19:14:57 -08005256 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
Christoph Lameter81819f02007-05-06 14:49:36 -07005257}
Vlastimil Babka8f58119a2020-08-06 23:18:48 -07005258SLAB_ATTR_RO(reclaim_account);
Christoph Lameter81819f02007-05-06 14:49:36 -07005259
5260static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
5261{
Joe Perchesbf16d192020-12-14 19:14:57 -08005262 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
Christoph Lameter81819f02007-05-06 14:49:36 -07005263}
5264SLAB_ATTR_RO(hwcache_align);
5265
5266#ifdef CONFIG_ZONE_DMA
5267static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
5268{
Joe Perchesbf16d192020-12-14 19:14:57 -08005269 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
Christoph Lameter81819f02007-05-06 14:49:36 -07005270}
5271SLAB_ATTR_RO(cache_dma);
5272#endif
5273
David Windsor8eb82842017-06-10 22:50:28 -04005274static ssize_t usersize_show(struct kmem_cache *s, char *buf)
5275{
Joe Perchesbf16d192020-12-14 19:14:57 -08005276 return sysfs_emit(buf, "%u\n", s->usersize);
David Windsor8eb82842017-06-10 22:50:28 -04005277}
5278SLAB_ATTR_RO(usersize);
5279
Christoph Lameter81819f02007-05-06 14:49:36 -07005280static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
5281{
Joe Perchesbf16d192020-12-14 19:14:57 -08005282 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
Christoph Lameter81819f02007-05-06 14:49:36 -07005283}
5284SLAB_ATTR_RO(destroy_by_rcu);
5285
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05005286#ifdef CONFIG_SLUB_DEBUG
Christoph Lametera5a84752010-10-05 13:57:27 -05005287static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5288{
5289 return show_slab_objects(s, buf, SO_ALL);
5290}
5291SLAB_ATTR_RO(slabs);
5292
5293static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5294{
5295 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5296}
5297SLAB_ATTR_RO(total_objects);
5298
5299static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5300{
Joe Perchesbf16d192020-12-14 19:14:57 -08005301 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
Christoph Lametera5a84752010-10-05 13:57:27 -05005302}
Vlastimil Babka060807f2020-08-06 23:18:45 -07005303SLAB_ATTR_RO(sanity_checks);
Christoph Lametera5a84752010-10-05 13:57:27 -05005304
5305static ssize_t trace_show(struct kmem_cache *s, char *buf)
5306{
Joe Perchesbf16d192020-12-14 19:14:57 -08005307 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
Christoph Lametera5a84752010-10-05 13:57:27 -05005308}
Vlastimil Babka060807f2020-08-06 23:18:45 -07005309SLAB_ATTR_RO(trace);
Christoph Lametera5a84752010-10-05 13:57:27 -05005310
Christoph Lameter81819f02007-05-06 14:49:36 -07005311static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5312{
Joe Perchesbf16d192020-12-14 19:14:57 -08005313 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
Christoph Lameter81819f02007-05-06 14:49:36 -07005314}
5315
Vlastimil Babkaad38b5b2020-08-06 23:18:38 -07005316SLAB_ATTR_RO(red_zone);
Christoph Lameter81819f02007-05-06 14:49:36 -07005317
5318static ssize_t poison_show(struct kmem_cache *s, char *buf)
5319{
Joe Perchesbf16d192020-12-14 19:14:57 -08005320 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
Christoph Lameter81819f02007-05-06 14:49:36 -07005321}
5322
Vlastimil Babkaad38b5b2020-08-06 23:18:38 -07005323SLAB_ATTR_RO(poison);
Christoph Lameter81819f02007-05-06 14:49:36 -07005324
5325static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5326{
Joe Perchesbf16d192020-12-14 19:14:57 -08005327 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
Christoph Lameter81819f02007-05-06 14:49:36 -07005328}
5329
Vlastimil Babkaad38b5b2020-08-06 23:18:38 -07005330SLAB_ATTR_RO(store_user);
Christoph Lameter81819f02007-05-06 14:49:36 -07005331
Christoph Lameter53e15af2007-05-06 14:49:43 -07005332static ssize_t validate_show(struct kmem_cache *s, char *buf)
5333{
5334 return 0;
5335}
5336
5337static ssize_t validate_store(struct kmem_cache *s,
5338 const char *buf, size_t length)
5339{
Christoph Lameter434e2452007-07-17 04:03:30 -07005340 int ret = -EINVAL;
5341
5342 if (buf[0] == '1') {
5343 ret = validate_slab_cache(s);
5344 if (ret >= 0)
5345 ret = length;
5346 }
5347 return ret;
Christoph Lameter53e15af2007-05-06 14:49:43 -07005348}
5349SLAB_ATTR(validate);
Christoph Lametera5a84752010-10-05 13:57:27 -05005350
Christoph Lametera5a84752010-10-05 13:57:27 -05005351#endif /* CONFIG_SLUB_DEBUG */
5352
5353#ifdef CONFIG_FAILSLAB
5354static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5355{
Joe Perchesbf16d192020-12-14 19:14:57 -08005356 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
Christoph Lametera5a84752010-10-05 13:57:27 -05005357}
Vlastimil Babka060807f2020-08-06 23:18:45 -07005358SLAB_ATTR_RO(failslab);
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05005359#endif
Christoph Lameter53e15af2007-05-06 14:49:43 -07005360
Christoph Lameter2086d262007-05-06 14:49:46 -07005361static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5362{
5363 return 0;
5364}
5365
5366static ssize_t shrink_store(struct kmem_cache *s,
5367 const char *buf, size_t length)
5368{
Vladimir Davydov832f37f2015-02-12 14:59:41 -08005369 if (buf[0] == '1')
Roman Gushchin10befea2020-08-06 23:21:27 -07005370 kmem_cache_shrink(s);
Vladimir Davydov832f37f2015-02-12 14:59:41 -08005371 else
Christoph Lameter2086d262007-05-06 14:49:46 -07005372 return -EINVAL;
5373 return length;
5374}
5375SLAB_ATTR(shrink);
5376
Christoph Lameter81819f02007-05-06 14:49:36 -07005377#ifdef CONFIG_NUMA
Christoph Lameter98246012008-01-07 23:20:26 -08005378static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
Christoph Lameter81819f02007-05-06 14:49:36 -07005379{
Joe Perchesbf16d192020-12-14 19:14:57 -08005380 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
Christoph Lameter81819f02007-05-06 14:49:36 -07005381}
5382
Christoph Lameter98246012008-01-07 23:20:26 -08005383static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
Christoph Lameter81819f02007-05-06 14:49:36 -07005384 const char *buf, size_t length)
5385{
Alexey Dobriyaneb7235e2018-04-05 16:20:48 -07005386 unsigned int ratio;
Christoph Lameter0121c6192008-04-29 16:11:12 -07005387 int err;
Christoph Lameter81819f02007-05-06 14:49:36 -07005388
Alexey Dobriyaneb7235e2018-04-05 16:20:48 -07005389 err = kstrtouint(buf, 10, &ratio);
Christoph Lameter0121c6192008-04-29 16:11:12 -07005390 if (err)
5391 return err;
Alexey Dobriyaneb7235e2018-04-05 16:20:48 -07005392 if (ratio > 100)
5393 return -ERANGE;
Christoph Lameter0121c6192008-04-29 16:11:12 -07005394
Alexey Dobriyaneb7235e2018-04-05 16:20:48 -07005395 s->remote_node_defrag_ratio = ratio * 10;
Christoph Lameter0121c6192008-04-29 16:11:12 -07005396
Christoph Lameter81819f02007-05-06 14:49:36 -07005397 return length;
5398}
Christoph Lameter98246012008-01-07 23:20:26 -08005399SLAB_ATTR(remote_node_defrag_ratio);
Christoph Lameter81819f02007-05-06 14:49:36 -07005400#endif
5401
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005402#ifdef CONFIG_SLUB_STATS
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005403static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5404{
5405 unsigned long sum = 0;
5406 int cpu;
Joe Perchesbf16d192020-12-14 19:14:57 -08005407 int len = 0;
Kees Cook6da2ec52018-06-12 13:55:00 -07005408 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005409
5410 if (!data)
5411 return -ENOMEM;
5412
5413 for_each_online_cpu(cpu) {
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06005414 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005415
5416 data[cpu] = x;
5417 sum += x;
5418 }
5419
Joe Perchesbf16d192020-12-14 19:14:57 -08005420 len += sysfs_emit_at(buf, len, "%lu", sum);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005421
Christoph Lameter50ef37b2008-04-14 18:52:05 +03005422#ifdef CONFIG_SMP
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005423 for_each_online_cpu(cpu) {
Joe Perchesbf16d192020-12-14 19:14:57 -08005424 if (data[cpu])
5425 len += sysfs_emit_at(buf, len, " C%d=%u",
5426 cpu, data[cpu]);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005427 }
Christoph Lameter50ef37b2008-04-14 18:52:05 +03005428#endif
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005429 kfree(data);
Joe Perchesbf16d192020-12-14 19:14:57 -08005430 len += sysfs_emit_at(buf, len, "\n");
5431
5432 return len;
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005433}
5434
David Rientjes78eb00c2009-10-15 02:20:22 -07005435static void clear_stat(struct kmem_cache *s, enum stat_item si)
5436{
5437 int cpu;
5438
5439 for_each_online_cpu(cpu)
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06005440 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
David Rientjes78eb00c2009-10-15 02:20:22 -07005441}
5442
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005443#define STAT_ATTR(si, text) \
5444static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5445{ \
5446 return show_stat(s, buf, si); \
5447} \
David Rientjes78eb00c2009-10-15 02:20:22 -07005448static ssize_t text##_store(struct kmem_cache *s, \
5449 const char *buf, size_t length) \
5450{ \
5451 if (buf[0] != '0') \
5452 return -EINVAL; \
5453 clear_stat(s, si); \
5454 return length; \
5455} \
5456SLAB_ATTR(text); \
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005457
5458STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5459STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5460STAT_ATTR(FREE_FASTPATH, free_fastpath);
5461STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5462STAT_ATTR(FREE_FROZEN, free_frozen);
5463STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5464STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5465STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5466STAT_ATTR(ALLOC_SLAB, alloc_slab);
5467STAT_ATTR(ALLOC_REFILL, alloc_refill);
Christoph Lametere36a2652011-06-01 12:25:57 -05005468STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005469STAT_ATTR(FREE_SLAB, free_slab);
5470STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5471STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5472STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5473STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5474STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5475STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
Christoph Lameter03e404a2011-06-01 12:25:58 -05005476STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
Christoph Lameter65c33762008-04-14 19:11:40 +03005477STAT_ATTR(ORDER_FALLBACK, order_fallback);
Christoph Lameterb789ef52011-06-01 12:25:49 -05005478STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5479STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
Christoph Lameter49e22582011-08-09 16:12:27 -05005480STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5481STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
Alex Shi8028dce2012-02-03 23:34:56 +08005482STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5483STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
Tobin C. Harding6dfd1b62019-05-13 17:16:09 -07005484#endif /* CONFIG_SLUB_STATS */
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005485
Pekka Enberg06428782008-01-07 23:20:27 -08005486static struct attribute *slab_attrs[] = {
Christoph Lameter81819f02007-05-06 14:49:36 -07005487 &slab_size_attr.attr,
5488 &object_size_attr.attr,
5489 &objs_per_slab_attr.attr,
5490 &order_attr.attr,
David Rientjes73d342b2009-02-22 17:40:09 -08005491 &min_partial_attr.attr,
Christoph Lameter49e22582011-08-09 16:12:27 -05005492 &cpu_partial_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005493 &objects_attr.attr,
Christoph Lameter205ab992008-04-14 19:11:40 +03005494 &objects_partial_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005495 &partial_attr.attr,
5496 &cpu_slabs_attr.attr,
5497 &ctor_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005498 &aliases_attr.attr,
5499 &align_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005500 &hwcache_align_attr.attr,
5501 &reclaim_account_attr.attr,
5502 &destroy_by_rcu_attr.attr,
Christoph Lametera5a84752010-10-05 13:57:27 -05005503 &shrink_attr.attr,
Christoph Lameter49e22582011-08-09 16:12:27 -05005504 &slabs_cpu_partial_attr.attr,
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05005505#ifdef CONFIG_SLUB_DEBUG
Christoph Lametera5a84752010-10-05 13:57:27 -05005506 &total_objects_attr.attr,
5507 &slabs_attr.attr,
5508 &sanity_checks_attr.attr,
5509 &trace_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005510 &red_zone_attr.attr,
5511 &poison_attr.attr,
5512 &store_user_attr.attr,
Christoph Lameter53e15af2007-05-06 14:49:43 -07005513 &validate_attr.attr,
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05005514#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07005515#ifdef CONFIG_ZONE_DMA
5516 &cache_dma_attr.attr,
5517#endif
5518#ifdef CONFIG_NUMA
Christoph Lameter98246012008-01-07 23:20:26 -08005519 &remote_node_defrag_ratio_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005520#endif
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005521#ifdef CONFIG_SLUB_STATS
5522 &alloc_fastpath_attr.attr,
5523 &alloc_slowpath_attr.attr,
5524 &free_fastpath_attr.attr,
5525 &free_slowpath_attr.attr,
5526 &free_frozen_attr.attr,
5527 &free_add_partial_attr.attr,
5528 &free_remove_partial_attr.attr,
5529 &alloc_from_partial_attr.attr,
5530 &alloc_slab_attr.attr,
5531 &alloc_refill_attr.attr,
Christoph Lametere36a2652011-06-01 12:25:57 -05005532 &alloc_node_mismatch_attr.attr,
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005533 &free_slab_attr.attr,
5534 &cpuslab_flush_attr.attr,
5535 &deactivate_full_attr.attr,
5536 &deactivate_empty_attr.attr,
5537 &deactivate_to_head_attr.attr,
5538 &deactivate_to_tail_attr.attr,
5539 &deactivate_remote_frees_attr.attr,
Christoph Lameter03e404a2011-06-01 12:25:58 -05005540 &deactivate_bypass_attr.attr,
Christoph Lameter65c33762008-04-14 19:11:40 +03005541 &order_fallback_attr.attr,
Christoph Lameterb789ef52011-06-01 12:25:49 -05005542 &cmpxchg_double_fail_attr.attr,
5543 &cmpxchg_double_cpu_fail_attr.attr,
Christoph Lameter49e22582011-08-09 16:12:27 -05005544 &cpu_partial_alloc_attr.attr,
5545 &cpu_partial_free_attr.attr,
Alex Shi8028dce2012-02-03 23:34:56 +08005546 &cpu_partial_node_attr.attr,
5547 &cpu_partial_drain_attr.attr,
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005548#endif
Dmitry Monakhov4c13dd32010-02-26 09:36:12 +03005549#ifdef CONFIG_FAILSLAB
5550 &failslab_attr.attr,
5551#endif
David Windsor8eb82842017-06-10 22:50:28 -04005552 &usersize_attr.attr,
Dmitry Monakhov4c13dd32010-02-26 09:36:12 +03005553
Christoph Lameter81819f02007-05-06 14:49:36 -07005554 NULL
5555};
5556
Arvind Yadav1fdaaa22017-09-06 16:21:56 -07005557static const struct attribute_group slab_attr_group = {
Christoph Lameter81819f02007-05-06 14:49:36 -07005558 .attrs = slab_attrs,
5559};
5560
5561static ssize_t slab_attr_show(struct kobject *kobj,
5562 struct attribute *attr,
5563 char *buf)
5564{
5565 struct slab_attribute *attribute;
5566 struct kmem_cache *s;
5567 int err;
5568
5569 attribute = to_slab_attr(attr);
5570 s = to_slab(kobj);
5571
5572 if (!attribute->show)
5573 return -EIO;
5574
5575 err = attribute->show(s, buf);
5576
5577 return err;
5578}
5579
5580static ssize_t slab_attr_store(struct kobject *kobj,
5581 struct attribute *attr,
5582 const char *buf, size_t len)
5583{
5584 struct slab_attribute *attribute;
5585 struct kmem_cache *s;
5586 int err;
5587
5588 attribute = to_slab_attr(attr);
5589 s = to_slab(kobj);
5590
5591 if (!attribute->store)
5592 return -EIO;
5593
5594 err = attribute->store(s, buf, len);
Christoph Lameter81819f02007-05-06 14:49:36 -07005595 return err;
5596}
5597
Christoph Lameter41a21282014-05-06 12:50:08 -07005598static void kmem_cache_release(struct kobject *k)
5599{
5600 slab_kmem_cache_release(to_slab(k));
5601}
5602
Emese Revfy52cf25d2010-01-19 02:58:23 +01005603static const struct sysfs_ops slab_sysfs_ops = {
Christoph Lameter81819f02007-05-06 14:49:36 -07005604 .show = slab_attr_show,
5605 .store = slab_attr_store,
5606};
5607
5608static struct kobj_type slab_ktype = {
5609 .sysfs_ops = &slab_sysfs_ops,
Christoph Lameter41a21282014-05-06 12:50:08 -07005610 .release = kmem_cache_release,
Christoph Lameter81819f02007-05-06 14:49:36 -07005611};
5612
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06005613static struct kset *slab_kset;
Christoph Lameter81819f02007-05-06 14:49:36 -07005614
Vladimir Davydov9a417072014-04-07 15:39:31 -07005615static inline struct kset *cache_kset(struct kmem_cache *s)
5616{
Vladimir Davydov9a417072014-04-07 15:39:31 -07005617 return slab_kset;
5618}
5619
Christoph Lameter81819f02007-05-06 14:49:36 -07005620#define ID_STR_LENGTH 64
5621
5622/* Create a unique string id for a slab cache:
Christoph Lameter6446faa2008-02-15 23:45:26 -08005623 *
5624 * Format :[flags-]size
Christoph Lameter81819f02007-05-06 14:49:36 -07005625 */
5626static char *create_unique_id(struct kmem_cache *s)
5627{
5628 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5629 char *p = name;
5630
5631 BUG_ON(!name);
5632
5633 *p++ = ':';
5634 /*
5635 * First flags affecting slabcache operations. We will only
5636 * get here for aliasable slabs so we do not need to support
5637 * too many flags. The flags here must cover all flags that
5638 * are matched during merging to guarantee that the id is
5639 * unique.
5640 */
5641 if (s->flags & SLAB_CACHE_DMA)
5642 *p++ = 'd';
Nicolas Boichat6d6ea1e2019-03-28 20:43:42 -07005643 if (s->flags & SLAB_CACHE_DMA32)
5644 *p++ = 'D';
Christoph Lameter81819f02007-05-06 14:49:36 -07005645 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5646 *p++ = 'a';
Laura Abbottbecfda62016-03-15 14:55:06 -07005647 if (s->flags & SLAB_CONSISTENCY_CHECKS)
Christoph Lameter81819f02007-05-06 14:49:36 -07005648 *p++ = 'F';
Vladimir Davydov230e9fc2016-01-14 15:18:15 -08005649 if (s->flags & SLAB_ACCOUNT)
5650 *p++ = 'A';
Christoph Lameter81819f02007-05-06 14:49:36 -07005651 if (p != name + 1)
5652 *p++ = '-';
Alexey Dobriyan44065b22018-04-05 16:21:20 -07005653 p += sprintf(p, "%07u", s->size);
Glauber Costa2633d7a2012-12-18 14:22:34 -08005654
Christoph Lameter81819f02007-05-06 14:49:36 -07005655 BUG_ON(p > name + ID_STR_LENGTH - 1);
5656 return name;
5657}
5658
5659static int sysfs_slab_add(struct kmem_cache *s)
5660{
5661 int err;
5662 const char *name;
Tejun Heo1663f262017-02-22 15:41:39 -08005663 struct kset *kset = cache_kset(s);
Christoph Lameter45530c42012-11-28 16:23:07 +00005664 int unmergeable = slab_unmergeable(s);
Christoph Lameter81819f02007-05-06 14:49:36 -07005665
Tejun Heo1663f262017-02-22 15:41:39 -08005666 if (!kset) {
5667 kobject_init(&s->kobj, &slab_ktype);
5668 return 0;
5669 }
5670
Miles Chen11066382017-11-15 17:32:25 -08005671 if (!unmergeable && disable_higher_order_debug &&
5672 (slub_debug & DEBUG_METADATA_FLAGS))
5673 unmergeable = 1;
5674
Christoph Lameter81819f02007-05-06 14:49:36 -07005675 if (unmergeable) {
5676 /*
5677 * Slabcache can never be merged so we can use the name proper.
5678 * This is typically the case for debug situations. In that
5679 * case we can catch duplicate names easily.
5680 */
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06005681 sysfs_remove_link(&slab_kset->kobj, s->name);
Christoph Lameter81819f02007-05-06 14:49:36 -07005682 name = s->name;
5683 } else {
5684 /*
5685 * Create a unique name for the slab as a target
5686 * for the symlinks.
5687 */
5688 name = create_unique_id(s);
5689 }
5690
Tejun Heo1663f262017-02-22 15:41:39 -08005691 s->kobj.kset = kset;
Tetsuo Handa26e4f202014-01-04 16:32:31 +09005692 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
Wang Hai757fed12021-01-28 19:32:50 +08005693 if (err)
Konstantin Khlebnikov80da0262015-09-04 15:45:51 -07005694 goto out;
Christoph Lameter81819f02007-05-06 14:49:36 -07005695
5696 err = sysfs_create_group(&s->kobj, &slab_attr_group);
Dave Jones54b6a732014-04-07 15:39:32 -07005697 if (err)
5698 goto out_del_kobj;
Vladimir Davydov9a417072014-04-07 15:39:31 -07005699
Christoph Lameter81819f02007-05-06 14:49:36 -07005700 if (!unmergeable) {
5701 /* Setup first alias */
5702 sysfs_slab_alias(s, s->name);
Christoph Lameter81819f02007-05-06 14:49:36 -07005703 }
Dave Jones54b6a732014-04-07 15:39:32 -07005704out:
5705 if (!unmergeable)
5706 kfree(name);
5707 return err;
5708out_del_kobj:
5709 kobject_del(&s->kobj);
Dave Jones54b6a732014-04-07 15:39:32 -07005710 goto out;
Christoph Lameter81819f02007-05-06 14:49:36 -07005711}
5712
Mikulas Patockad50d82f2018-06-27 23:26:09 -07005713void sysfs_slab_unlink(struct kmem_cache *s)
5714{
5715 if (slab_state >= FULL)
5716 kobject_del(&s->kobj);
5717}
5718
Tejun Heobf5eb3d2017-02-22 15:41:11 -08005719void sysfs_slab_release(struct kmem_cache *s)
5720{
5721 if (slab_state >= FULL)
5722 kobject_put(&s->kobj);
Christoph Lameter81819f02007-05-06 14:49:36 -07005723}
5724
5725/*
5726 * Need to buffer aliases during bootup until sysfs becomes
Nick Andrew9f6c708e2008-12-05 14:08:08 +11005727 * available lest we lose that information.
Christoph Lameter81819f02007-05-06 14:49:36 -07005728 */
5729struct saved_alias {
5730 struct kmem_cache *s;
5731 const char *name;
5732 struct saved_alias *next;
5733};
5734
Adrian Bunk5af328a2007-07-17 04:03:27 -07005735static struct saved_alias *alias_list;
Christoph Lameter81819f02007-05-06 14:49:36 -07005736
5737static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5738{
5739 struct saved_alias *al;
5740
Christoph Lameter97d06602012-07-06 15:25:11 -05005741 if (slab_state == FULL) {
Christoph Lameter81819f02007-05-06 14:49:36 -07005742 /*
5743 * If we have a leftover link then remove it.
5744 */
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06005745 sysfs_remove_link(&slab_kset->kobj, name);
5746 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
Christoph Lameter81819f02007-05-06 14:49:36 -07005747 }
5748
5749 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5750 if (!al)
5751 return -ENOMEM;
5752
5753 al->s = s;
5754 al->name = name;
5755 al->next = alias_list;
5756 alias_list = al;
5757 return 0;
5758}
5759
5760static int __init slab_sysfs_init(void)
5761{
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07005762 struct kmem_cache *s;
Christoph Lameter81819f02007-05-06 14:49:36 -07005763 int err;
5764
Christoph Lameter18004c52012-07-06 15:25:12 -05005765 mutex_lock(&slab_mutex);
Christoph Lameter2bce6482010-07-19 11:39:11 -05005766
Christoph Lameterd7660ce2020-06-01 21:45:50 -07005767 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06005768 if (!slab_kset) {
Christoph Lameter18004c52012-07-06 15:25:12 -05005769 mutex_unlock(&slab_mutex);
Fabian Frederickf9f58282014-06-04 16:06:34 -07005770 pr_err("Cannot register slab subsystem.\n");
Christoph Lameter81819f02007-05-06 14:49:36 -07005771 return -ENOSYS;
5772 }
5773
Christoph Lameter97d06602012-07-06 15:25:11 -05005774 slab_state = FULL;
Christoph Lameter26a7bd02007-05-09 02:32:39 -07005775
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07005776 list_for_each_entry(s, &slab_caches, list) {
Christoph Lameter26a7bd02007-05-09 02:32:39 -07005777 err = sysfs_slab_add(s);
Christoph Lameter5d540fb2007-08-30 23:56:26 -07005778 if (err)
Fabian Frederickf9f58282014-06-04 16:06:34 -07005779 pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5780 s->name);
Christoph Lameter26a7bd02007-05-09 02:32:39 -07005781 }
Christoph Lameter81819f02007-05-06 14:49:36 -07005782
5783 while (alias_list) {
5784 struct saved_alias *al = alias_list;
5785
5786 alias_list = alias_list->next;
5787 err = sysfs_slab_alias(al->s, al->name);
Christoph Lameter5d540fb2007-08-30 23:56:26 -07005788 if (err)
Fabian Frederickf9f58282014-06-04 16:06:34 -07005789 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5790 al->name);
Christoph Lameter81819f02007-05-06 14:49:36 -07005791 kfree(al);
5792 }
5793
Christoph Lameter18004c52012-07-06 15:25:12 -05005794 mutex_unlock(&slab_mutex);
Christoph Lameter81819f02007-05-06 14:49:36 -07005795 return 0;
5796}
5797
5798__initcall(slab_sysfs_init);
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05005799#endif /* CONFIG_SYSFS */
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005800
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -07005801#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
5802static int slab_debugfs_show(struct seq_file *seq, void *v)
5803{
5804
5805 struct location *l;
5806 unsigned int idx = *(unsigned int *)v;
5807 struct loc_track *t = seq->private;
5808
5809 if (idx < t->count) {
5810 l = &t->loc[idx];
5811
5812 seq_printf(seq, "%7ld ", l->count);
5813
5814 if (l->addr)
5815 seq_printf(seq, "%pS", (void *)l->addr);
5816 else
5817 seq_puts(seq, "<not-available>");
5818
5819 if (l->sum_time != l->min_time) {
5820 seq_printf(seq, " age=%ld/%llu/%ld",
5821 l->min_time, div_u64(l->sum_time, l->count),
5822 l->max_time);
5823 } else
5824 seq_printf(seq, " age=%ld", l->min_time);
5825
5826 if (l->min_pid != l->max_pid)
5827 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
5828 else
5829 seq_printf(seq, " pid=%ld",
5830 l->min_pid);
5831
5832 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
5833 seq_printf(seq, " cpus=%*pbl",
5834 cpumask_pr_args(to_cpumask(l->cpus)));
5835
5836 if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
5837 seq_printf(seq, " nodes=%*pbl",
5838 nodemask_pr_args(&l->nodes));
5839
5840 seq_puts(seq, "\n");
5841 }
5842
5843 if (!idx && !t->count)
5844 seq_puts(seq, "No data\n");
5845
5846 return 0;
5847}
5848
5849static void slab_debugfs_stop(struct seq_file *seq, void *v)
5850{
5851}
5852
5853static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
5854{
5855 struct loc_track *t = seq->private;
5856
5857 v = ppos;
5858 ++*ppos;
5859 if (*ppos <= t->count)
5860 return v;
5861
5862 return NULL;
5863}
5864
5865static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
5866{
5867 return ppos;
5868}
5869
5870static const struct seq_operations slab_debugfs_sops = {
5871 .start = slab_debugfs_start,
5872 .next = slab_debugfs_next,
5873 .stop = slab_debugfs_stop,
5874 .show = slab_debugfs_show,
5875};
5876
5877static int slab_debug_trace_open(struct inode *inode, struct file *filep)
5878{
5879
5880 struct kmem_cache_node *n;
5881 enum track_item alloc;
5882 int node;
5883 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
5884 sizeof(struct loc_track));
5885 struct kmem_cache *s = file_inode(filep)->i_private;
Vlastimil Babkab3fd64e2021-05-23 01:28:37 +02005886 unsigned long *obj_map;
5887
5888 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
5889 if (!obj_map)
5890 return -ENOMEM;
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -07005891
5892 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
5893 alloc = TRACK_ALLOC;
5894 else
5895 alloc = TRACK_FREE;
5896
Vlastimil Babkab3fd64e2021-05-23 01:28:37 +02005897 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
5898 bitmap_free(obj_map);
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -07005899 return -ENOMEM;
Vlastimil Babkab3fd64e2021-05-23 01:28:37 +02005900 }
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -07005901
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -07005902 for_each_kmem_cache_node(s, node, n) {
5903 unsigned long flags;
5904 struct page *page;
5905
5906 if (!atomic_long_read(&n->nr_slabs))
5907 continue;
5908
5909 spin_lock_irqsave(&n->list_lock, flags);
5910 list_for_each_entry(page, &n->partial, slab_list)
Vlastimil Babkab3fd64e2021-05-23 01:28:37 +02005911 process_slab(t, s, page, alloc, obj_map);
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -07005912 list_for_each_entry(page, &n->full, slab_list)
Vlastimil Babkab3fd64e2021-05-23 01:28:37 +02005913 process_slab(t, s, page, alloc, obj_map);
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -07005914 spin_unlock_irqrestore(&n->list_lock, flags);
5915 }
5916
Vlastimil Babkab3fd64e2021-05-23 01:28:37 +02005917 bitmap_free(obj_map);
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -07005918 return 0;
5919}
5920
5921static int slab_debug_trace_release(struct inode *inode, struct file *file)
5922{
5923 struct seq_file *seq = file->private_data;
5924 struct loc_track *t = seq->private;
5925
5926 free_loc_track(t);
5927 return seq_release_private(inode, file);
5928}
5929
5930static const struct file_operations slab_debugfs_fops = {
5931 .open = slab_debug_trace_open,
5932 .read = seq_read,
5933 .llseek = seq_lseek,
5934 .release = slab_debug_trace_release,
5935};
5936
5937static void debugfs_slab_add(struct kmem_cache *s)
5938{
5939 struct dentry *slab_cache_dir;
5940
5941 if (unlikely(!slab_debugfs_root))
5942 return;
5943
5944 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
5945
5946 debugfs_create_file("alloc_traces", 0400,
5947 slab_cache_dir, s, &slab_debugfs_fops);
5948
5949 debugfs_create_file("free_traces", 0400,
5950 slab_cache_dir, s, &slab_debugfs_fops);
5951}
5952
5953void debugfs_slab_release(struct kmem_cache *s)
5954{
5955 debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root));
5956}
5957
5958static int __init slab_debugfs_init(void)
5959{
5960 struct kmem_cache *s;
5961
5962 slab_debugfs_root = debugfs_create_dir("slab", NULL);
5963
5964 list_for_each_entry(s, &slab_caches, list)
5965 if (s->flags & SLAB_STORE_USER)
5966 debugfs_slab_add(s);
5967
5968 return 0;
5969
5970}
5971__initcall(slab_debugfs_init);
5972#endif
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005973/*
5974 * The /proc/slabinfo ABI
5975 */
Yang Shi5b365772017-11-15 17:32:03 -08005976#ifdef CONFIG_SLUB_DEBUG
Glauber Costa0d7561c2012-10-19 18:20:27 +04005977void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005978{
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005979 unsigned long nr_slabs = 0;
Christoph Lameter205ab992008-04-14 19:11:40 +03005980 unsigned long nr_objs = 0;
5981 unsigned long nr_free = 0;
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005982 int node;
Christoph Lameterfa45dc22014-08-06 16:04:09 -07005983 struct kmem_cache_node *n;
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005984
Christoph Lameterfa45dc22014-08-06 16:04:09 -07005985 for_each_kmem_cache_node(s, node, n) {
Wanpeng Lic17fd132013-07-04 08:33:26 +08005986 nr_slabs += node_nr_slabs(n);
5987 nr_objs += node_nr_objs(n);
Christoph Lameter205ab992008-04-14 19:11:40 +03005988 nr_free += count_partial(n, count_free);
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005989 }
5990
Glauber Costa0d7561c2012-10-19 18:20:27 +04005991 sinfo->active_objs = nr_objs - nr_free;
5992 sinfo->num_objs = nr_objs;
5993 sinfo->active_slabs = nr_slabs;
5994 sinfo->num_slabs = nr_slabs;
5995 sinfo->objects_per_slab = oo_objects(s->oo);
5996 sinfo->cache_order = oo_order(s->oo);
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005997}
5998
Glauber Costa0d7561c2012-10-19 18:20:27 +04005999void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04006000{
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04006001}
6002
Glauber Costab7454ad2012-10-19 18:20:25 +04006003ssize_t slabinfo_write(struct file *file, const char __user *buffer,
6004 size_t count, loff_t *ppos)
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04006005{
Glauber Costab7454ad2012-10-19 18:20:25 +04006006 return -EIO;
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04006007}
Yang Shi5b365772017-11-15 17:32:03 -08006008#endif /* CONFIG_SLUB_DEBUG */