blob: 95b9a74a2d515fd83091253b786886be9cc08476 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameter97d06602012-07-06 15:25:11 -05002#ifndef MM_SLAB_H
3#define MM_SLAB_H
4/*
5 * Internal slab definitions
6 */
7
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +01008/* Reuses the bits in struct page */
9struct slab {
10 unsigned long __page_flags;
Vlastimil Babka401fb122021-11-04 11:30:58 +010011
12#if defined(CONFIG_SLAB)
13
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010014 union {
15 struct list_head slab_list;
Vlastimil Babka401fb122021-11-04 11:30:58 +010016 struct rcu_head rcu_head;
17 };
18 struct kmem_cache *slab_cache;
19 void *freelist; /* array of free object indexes */
20 void *s_mem; /* first object */
21 unsigned int active;
22
23#elif defined(CONFIG_SLUB)
24
25 union {
26 struct list_head slab_list;
27 struct rcu_head rcu_head;
Vlastimil Babka9c01e9a2021-11-10 14:12:45 +010028#ifdef CONFIG_SLUB_CPU_PARTIAL
Vlastimil Babka401fb122021-11-04 11:30:58 +010029 struct {
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010030 struct slab *next;
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010031 int slabs; /* Nr of slabs left */
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010032 };
Vlastimil Babka9c01e9a2021-11-10 14:12:45 +010033#endif
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010034 };
Vlastimil Babka401fb122021-11-04 11:30:58 +010035 struct kmem_cache *slab_cache;
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010036 /* Double-word boundary */
37 void *freelist; /* first free object */
38 union {
Vlastimil Babka401fb122021-11-04 11:30:58 +010039 unsigned long counters;
40 struct {
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010041 unsigned inuse:16;
42 unsigned objects:15;
43 unsigned frozen:1;
44 };
45 };
Vlastimil Babka401fb122021-11-04 11:30:58 +010046 unsigned int __unused;
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010047
Vlastimil Babka401fb122021-11-04 11:30:58 +010048#elif defined(CONFIG_SLOB)
49
50 struct list_head slab_list;
51 void *__unused_1;
52 void *freelist; /* first free block */
Hyeonggon Yoob01af5c2021-12-12 06:52:41 +000053 long units;
54 unsigned int __unused_2;
Vlastimil Babka401fb122021-11-04 11:30:58 +010055
56#else
57#error "Unexpected slab allocator configured"
58#endif
59
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010060 atomic_t __page_refcount;
61#ifdef CONFIG_MEMCG
62 unsigned long memcg_data;
63#endif
64};
65
66#define SLAB_MATCH(pg, sl) \
67 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
68SLAB_MATCH(flags, __page_flags);
69SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
70SLAB_MATCH(slab_list, slab_list);
Vlastimil Babka401fb122021-11-04 11:30:58 +010071#ifndef CONFIG_SLOB
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010072SLAB_MATCH(rcu_head, rcu_head);
73SLAB_MATCH(slab_cache, slab_cache);
Vlastimil Babka401fb122021-11-04 11:30:58 +010074#endif
75#ifdef CONFIG_SLAB
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010076SLAB_MATCH(s_mem, s_mem);
77SLAB_MATCH(active, active);
Vlastimil Babka401fb122021-11-04 11:30:58 +010078#endif
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010079SLAB_MATCH(_refcount, __page_refcount);
80#ifdef CONFIG_MEMCG
81SLAB_MATCH(memcg_data, memcg_data);
82#endif
83#undef SLAB_MATCH
84static_assert(sizeof(struct slab) <= sizeof(struct page));
85
86/**
87 * folio_slab - Converts from folio to slab.
88 * @folio: The folio.
89 *
90 * Currently struct slab is a different representation of a folio where
91 * folio_test_slab() is true.
92 *
93 * Return: The slab which contains this folio.
94 */
95#define folio_slab(folio) (_Generic((folio), \
96 const struct folio *: (const struct slab *)(folio), \
97 struct folio *: (struct slab *)(folio)))
98
99/**
100 * slab_folio - The folio allocated for a slab
101 * @slab: The slab.
102 *
103 * Slabs are allocated as folios that contain the individual objects and are
104 * using some fields in the first struct page of the folio - those fields are
105 * now accessed by struct slab. It is occasionally necessary to convert back to
106 * a folio in order to communicate with the rest of the mm. Please use this
107 * helper function instead of casting yourself, as the implementation may change
108 * in the future.
109 */
110#define slab_folio(s) (_Generic((s), \
111 const struct slab *: (const struct folio *)s, \
112 struct slab *: (struct folio *)s))
113
114/**
115 * page_slab - Converts from first struct page to slab.
116 * @p: The first (either head of compound or single) page of slab.
117 *
118 * A temporary wrapper to convert struct page to struct slab in situations where
119 * we know the page is the compound head, or single order-0 page.
120 *
121 * Long-term ideally everything would work with struct slab directly or go
122 * through folio to struct slab.
123 *
124 * Return: The slab which contains this page
125 */
126#define page_slab(p) (_Generic((p), \
127 const struct page *: (const struct slab *)(p), \
128 struct page *: (struct slab *)(p)))
129
130/**
131 * slab_page - The first struct page allocated for a slab
132 * @slab: The slab.
133 *
134 * A convenience wrapper for converting slab to the first struct page of the
135 * underlying folio, to communicate with code not yet converted to folio or
136 * struct slab.
137 */
138#define slab_page(s) folio_page(slab_folio(s), 0)
139
140/*
141 * If network-based swap is enabled, sl*b must keep track of whether pages
142 * were allocated from pfmemalloc reserves.
143 */
144static inline bool slab_test_pfmemalloc(const struct slab *slab)
145{
146 return folio_test_active((struct folio *)slab_folio(slab));
147}
148
149static inline void slab_set_pfmemalloc(struct slab *slab)
150{
151 folio_set_active(slab_folio(slab));
152}
153
154static inline void slab_clear_pfmemalloc(struct slab *slab)
155{
156 folio_clear_active(slab_folio(slab));
157}
158
159static inline void __slab_clear_pfmemalloc(struct slab *slab)
160{
161 __folio_clear_active(slab_folio(slab));
162}
163
164static inline void *slab_address(const struct slab *slab)
165{
166 return folio_address(slab_folio(slab));
167}
168
169static inline int slab_nid(const struct slab *slab)
170{
171 return folio_nid(slab_folio(slab));
172}
173
174static inline pg_data_t *slab_pgdat(const struct slab *slab)
175{
176 return folio_pgdat(slab_folio(slab));
177}
178
179static inline struct slab *virt_to_slab(const void *addr)
180{
181 struct folio *folio = virt_to_folio(addr);
182
183 if (!folio_test_slab(folio))
184 return NULL;
185
186 return folio_slab(folio);
187}
188
189static inline int slab_order(const struct slab *slab)
190{
191 return folio_order((struct folio *)slab_folio(slab));
192}
193
194static inline size_t slab_size(const struct slab *slab)
195{
196 return PAGE_SIZE << slab_order(slab);
197}
198
Joonsoo Kim07f361b2014-10-09 15:26:00 -0700199#ifdef CONFIG_SLOB
200/*
201 * Common fields provided in kmem_cache by all slab allocators
202 * This struct is either used directly by the allocator (SLOB)
203 * or the allocator must include definitions for all fields
204 * provided in kmem_cache_common in their definition of kmem_cache.
205 *
206 * Once we can do anonymous structs (C11 standard) we could put a
207 * anonymous struct definition in these allocators so that the
208 * separate allocations in the kmem_cache structure of SLAB and
209 * SLUB is no longer needed.
210 */
211struct kmem_cache {
212 unsigned int object_size;/* The original size of the object */
213 unsigned int size; /* The aligned/padded/added on size */
214 unsigned int align; /* Alignment as calculated */
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800215 slab_flags_t flags; /* Active flags on the slab */
Alexey Dobriyan7bbdb812018-04-05 16:21:31 -0700216 unsigned int useroffset;/* Usercopy region offset */
217 unsigned int usersize; /* Usercopy region size */
Joonsoo Kim07f361b2014-10-09 15:26:00 -0700218 const char *name; /* Slab name for sysfs */
219 int refcount; /* Use counter */
220 void (*ctor)(void *); /* Called on object slot creation */
221 struct list_head list; /* List of all slab caches on the system */
222};
223
224#endif /* CONFIG_SLOB */
225
226#ifdef CONFIG_SLAB
227#include <linux/slab_def.h>
228#endif
229
230#ifdef CONFIG_SLUB
231#include <linux/slub_def.h>
232#endif
233
234#include <linux/memcontrol.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700235#include <linux/fault-inject.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700236#include <linux/kasan.h>
237#include <linux/kmemleak.h>
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700238#include <linux/random.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100239#include <linux/sched/mm.h>
Joonsoo Kim07f361b2014-10-09 15:26:00 -0700240
Christoph Lameter97d06602012-07-06 15:25:11 -0500241/*
242 * State of the slab allocator.
243 *
244 * This is used to describe the states of the allocator during bootup.
245 * Allocators use this to gradually bootstrap themselves. Most allocators
246 * have the problem that the structures used for managing slab caches are
247 * allocated from slab caches themselves.
248 */
249enum slab_state {
250 DOWN, /* No slab functionality yet */
251 PARTIAL, /* SLUB: kmem_cache_node available */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000252 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
Christoph Lameter97d06602012-07-06 15:25:11 -0500253 UP, /* Slab caches usable but not all extras yet */
254 FULL /* Everything is working */
255};
256
257extern enum slab_state slab_state;
258
Christoph Lameter18004c52012-07-06 15:25:12 -0500259/* The slab cache mutex protects the management structures during changes */
260extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000261
262/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -0500263extern struct list_head slab_caches;
264
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000265/* The slab cache that manages slab cache information */
266extern struct kmem_cache *kmem_cache;
267
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -0800268/* A table of kmalloc cache names and sizes */
269extern const struct kmalloc_info_struct {
Pengfei Licb5d9fb2019-11-30 17:49:21 -0800270 const char *name[NR_KMALLOC_TYPES];
Alexey Dobriyan55de8b92018-04-05 16:20:29 -0700271 unsigned int size;
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -0800272} kmalloc_info[];
273
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000274#ifndef CONFIG_SLOB
275/* Kmalloc array related functions */
Daniel Sanders34cc6992015-06-24 16:55:57 -0700276void setup_kmalloc_cache_index_table(void);
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800277void create_kmalloc_caches(slab_flags_t);
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000278
279/* Find the kmalloc slab corresponding for a certain size */
280struct kmem_cache *kmalloc_slab(size_t, gfp_t);
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000281#endif
282
Long Li44405092020-08-06 23:18:28 -0700283gfp_t kmalloc_fix_flags(gfp_t flags);
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000284
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000285/* Functions provided by the slab allocators */
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800286int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
Christoph Lameter97d06602012-07-06 15:25:11 -0500287
Alexey Dobriyan55de8b92018-04-05 16:20:29 -0700288struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
289 slab_flags_t flags, unsigned int useroffset,
290 unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000291extern void create_boot_cache(struct kmem_cache *, const char *name,
Alexey Dobriyan361d5752018-04-05 16:20:33 -0700292 unsigned int size, slab_flags_t flags,
293 unsigned int useroffset, unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000294
Joonsoo Kim423c9292014-10-09 15:26:22 -0700295int slab_unmergeable(struct kmem_cache *s);
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700296struct kmem_cache *find_mergeable(unsigned size, unsigned align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800297 slab_flags_t flags, const char *name, void (*ctor)(void *));
Joonsoo Kim12220de2014-10-09 15:26:24 -0700298#ifndef CONFIG_SLOB
Glauber Costa2633d7a2012-12-18 14:22:34 -0800299struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700300__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800301 slab_flags_t flags, void (*ctor)(void *));
Joonsoo Kim423c9292014-10-09 15:26:22 -0700302
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700303slab_flags_t kmem_cache_flags(unsigned int object_size,
Nikolay Borisov37540002021-02-24 12:00:58 -0800304 slab_flags_t flags, const char *name);
Christoph Lametercbb79692012-09-05 00:18:32 +0000305#else
Glauber Costa2633d7a2012-12-18 14:22:34 -0800306static inline struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700307__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800308 slab_flags_t flags, void (*ctor)(void *))
Christoph Lametercbb79692012-09-05 00:18:32 +0000309{ return NULL; }
Joonsoo Kim423c9292014-10-09 15:26:22 -0700310
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700311static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
Nikolay Borisov37540002021-02-24 12:00:58 -0800312 slab_flags_t flags, const char *name)
Joonsoo Kim423c9292014-10-09 15:26:22 -0700313{
314 return flags;
315}
Christoph Lametercbb79692012-09-05 00:18:32 +0000316#endif
317
318
Glauber Costad8843922012-10-17 15:36:51 +0400319/* Legal flag mask for kmem_cache_create(), for various configurations */
Nicolas Boichat6d6ea1e2019-03-28 20:43:42 -0700320#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
321 SLAB_CACHE_DMA32 | SLAB_PANIC | \
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800322 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
Glauber Costad8843922012-10-17 15:36:51 +0400323
324#if defined(CONFIG_DEBUG_SLAB)
325#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
326#elif defined(CONFIG_SLUB_DEBUG)
327#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Laura Abbottbecfda62016-03-15 14:55:06 -0700328 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
Glauber Costad8843922012-10-17 15:36:51 +0400329#else
330#define SLAB_DEBUG_FLAGS (0)
331#endif
332
333#if defined(CONFIG_SLAB)
334#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
Vladimir Davydov230e9fc2016-01-14 15:18:15 -0800335 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800336 SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400337#elif defined(CONFIG_SLUB)
338#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800339 SLAB_TEMPORARY | SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400340#else
Rustam Kovhaev34dbc3aa2021-11-19 16:43:37 -0800341#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
Glauber Costad8843922012-10-17 15:36:51 +0400342#endif
343
Thomas Garniere70954f2016-12-12 16:41:38 -0800344/* Common flags available with current configuration */
Glauber Costad8843922012-10-17 15:36:51 +0400345#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
346
Thomas Garniere70954f2016-12-12 16:41:38 -0800347/* Common flags permitted for kmem_cache_create */
348#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
349 SLAB_RED_ZONE | \
350 SLAB_POISON | \
351 SLAB_STORE_USER | \
352 SLAB_TRACE | \
353 SLAB_CONSISTENCY_CHECKS | \
354 SLAB_MEM_SPREAD | \
355 SLAB_NOLEAKTRACE | \
356 SLAB_RECLAIM_ACCOUNT | \
357 SLAB_TEMPORARY | \
Thomas Garniere70954f2016-12-12 16:41:38 -0800358 SLAB_ACCOUNT)
359
Shakeel Buttf9e13c02018-04-05 16:21:57 -0700360bool __kmem_cache_empty(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000361int __kmem_cache_shutdown(struct kmem_cache *);
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800362void __kmem_cache_release(struct kmem_cache *);
Tejun Heoc9fc5862017-02-22 15:41:27 -0800363int __kmem_cache_shrink(struct kmem_cache *);
Christoph Lameter41a21282014-05-06 12:50:08 -0700364void slab_kmem_cache_release(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000365
Glauber Costab7454ad2012-10-19 18:20:25 +0400366struct seq_file;
367struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +0400368
Glauber Costa0d7561c2012-10-19 18:20:27 +0400369struct slabinfo {
370 unsigned long active_objs;
371 unsigned long num_objs;
372 unsigned long active_slabs;
373 unsigned long num_slabs;
374 unsigned long shared_avail;
375 unsigned int limit;
376 unsigned int batchcount;
377 unsigned int shared;
378 unsigned int objects_per_slab;
379 unsigned int cache_order;
380};
381
382void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
383void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400384ssize_t slabinfo_write(struct file *file, const char __user *buffer,
385 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800386
Christoph Lameter484748f2015-09-04 15:45:34 -0700387/*
388 * Generic implementation of bulk operations
389 * These are useful for situations in which the allocator cannot
Jesper Dangaard Brouer9f706d62016-03-15 14:54:03 -0700390 * perform optimizations. In that case segments of the object listed
Christoph Lameter484748f2015-09-04 15:45:34 -0700391 * may be allocated or freed using these operations.
392 */
393void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800394int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
Christoph Lameter484748f2015-09-04 15:45:34 -0700395
Muchun Song1a984c42020-12-14 19:06:24 -0800396static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700397{
398 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
Roman Gushchind42f3242020-08-06 23:20:39 -0700399 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700400}
401
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700402#ifdef CONFIG_SLUB_DEBUG
403#ifdef CONFIG_SLUB_DEBUG_ON
404DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
405#else
406DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
407#endif
408extern void print_tracking(struct kmem_cache *s, void *object);
Oliver Glitta1f9f78b2021-06-28 19:34:33 -0700409long validate_slab_cache(struct kmem_cache *s);
Marco Elver0d4a0622021-07-14 21:26:34 -0700410static inline bool __slub_debug_enabled(void)
411{
412 return static_branch_unlikely(&slub_debug_enabled);
413}
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700414#else
415static inline void print_tracking(struct kmem_cache *s, void *object)
416{
417}
Marco Elver0d4a0622021-07-14 21:26:34 -0700418static inline bool __slub_debug_enabled(void)
419{
420 return false;
421}
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700422#endif
423
424/*
425 * Returns true if any of the specified slub_debug flags is enabled for the
426 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
427 * the static key.
428 */
429static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
430{
Marco Elver0d4a0622021-07-14 21:26:34 -0700431 if (IS_ENABLED(CONFIG_SLUB_DEBUG))
432 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
433 if (__slub_debug_enabled())
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700434 return s->flags & flags;
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700435 return false;
436}
437
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700438#ifdef CONFIG_MEMCG_KMEM
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100439/*
440 * slab_objcgs - get the object cgroups vector associated with a slab
441 * @slab: a pointer to the slab struct
442 *
443 * Returns a pointer to the object cgroups vector associated with the slab,
444 * or NULL if no such vector has been associated yet.
445 */
446static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
447{
448 unsigned long memcg_data = READ_ONCE(slab->memcg_data);
449
450 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS),
451 slab_page(slab));
452 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
453
454 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
455}
456
457int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
458 gfp_t gfp, bool new_slab);
Waiman Longfdbcb2a2021-06-28 19:37:19 -0700459void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
460 enum node_stat_item idx, int nr);
Roman Gushchin286e04b2020-08-06 23:20:52 -0700461
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100462static inline void memcg_free_slab_cgroups(struct slab *slab)
Roman Gushchin286e04b2020-08-06 23:20:52 -0700463{
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100464 kfree(slab_objcgs(slab));
465 slab->memcg_data = 0;
Roman Gushchin286e04b2020-08-06 23:20:52 -0700466}
467
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700468static inline size_t obj_full_size(struct kmem_cache *s)
469{
470 /*
471 * For each accounted object there is an extra space which is used
472 * to store obj_cgroup membership. Charge it too.
473 */
474 return s->size + sizeof(struct obj_cgroup *);
475}
476
Roman Gushchinbecaba62020-12-05 22:14:45 -0800477/*
478 * Returns false if the allocation should fail.
479 */
480static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
481 struct obj_cgroup **objcgp,
482 size_t objects, gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700483{
Roman Gushchin98556092020-08-06 23:21:10 -0700484 struct obj_cgroup *objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700485
Roman Gushchinbecaba62020-12-05 22:14:45 -0800486 if (!memcg_kmem_enabled())
487 return true;
488
489 if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
490 return true;
491
Roman Gushchin98556092020-08-06 23:21:10 -0700492 objcg = get_obj_cgroup_from_current();
493 if (!objcg)
Roman Gushchinbecaba62020-12-05 22:14:45 -0800494 return true;
Roman Gushchin98556092020-08-06 23:21:10 -0700495
496 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
497 obj_cgroup_put(objcg);
Roman Gushchinbecaba62020-12-05 22:14:45 -0800498 return false;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700499 }
500
Roman Gushchinbecaba62020-12-05 22:14:45 -0800501 *objcgp = objcg;
502 return true;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700503}
504
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700505static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
506 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700507 gfp_t flags, size_t size,
508 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700509{
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100510 struct slab *slab;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700511 unsigned long off;
512 size_t i;
513
Roman Gushchinbecaba62020-12-05 22:14:45 -0800514 if (!memcg_kmem_enabled() || !objcg)
Roman Gushchin10befea2020-08-06 23:21:27 -0700515 return;
516
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700517 for (i = 0; i < size; i++) {
518 if (likely(p[i])) {
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100519 slab = virt_to_slab(p[i]);
Roman Gushchin10befea2020-08-06 23:21:27 -0700520
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100521 if (!slab_objcgs(slab) &&
522 memcg_alloc_slab_cgroups(slab, s, flags,
Roman Gushchin2e9bd482021-02-24 12:03:11 -0800523 false)) {
Roman Gushchin10befea2020-08-06 23:21:27 -0700524 obj_cgroup_uncharge(objcg, obj_full_size(s));
525 continue;
526 }
527
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100528 off = obj_to_index(s, slab, p[i]);
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700529 obj_cgroup_get(objcg);
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100530 slab_objcgs(slab)[off] = objcg;
531 mod_objcg_state(objcg, slab_pgdat(slab),
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700532 cache_vmstat_idx(s), obj_full_size(s));
533 } else {
534 obj_cgroup_uncharge(objcg, obj_full_size(s));
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700535 }
536 }
537 obj_cgroup_put(objcg);
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700538}
539
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700540static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
541 void **p, int objects)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700542{
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700543 struct kmem_cache *s;
Roman Gushchin270c6a72020-12-01 13:58:28 -0800544 struct obj_cgroup **objcgs;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700545 struct obj_cgroup *objcg;
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100546 struct slab *slab;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700547 unsigned int off;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700548 int i;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700549
Roman Gushchin10befea2020-08-06 23:21:27 -0700550 if (!memcg_kmem_enabled())
551 return;
552
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700553 for (i = 0; i < objects; i++) {
554 if (unlikely(!p[i]))
555 continue;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700556
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100557 slab = virt_to_slab(p[i]);
558 /* we could be given a kmalloc_large() object, skip those */
559 if (!slab)
560 continue;
561
562 objcgs = slab_objcgs(slab);
Roman Gushchin270c6a72020-12-01 13:58:28 -0800563 if (!objcgs)
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700564 continue;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700565
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700566 if (!s_orig)
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100567 s = slab->slab_cache;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700568 else
569 s = s_orig;
Roman Gushchin10befea2020-08-06 23:21:27 -0700570
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100571 off = obj_to_index(s, slab, p[i]);
Roman Gushchin270c6a72020-12-01 13:58:28 -0800572 objcg = objcgs[off];
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700573 if (!objcg)
574 continue;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700575
Roman Gushchin270c6a72020-12-01 13:58:28 -0800576 objcgs[off] = NULL;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700577 obj_cgroup_uncharge(objcg, obj_full_size(s));
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100578 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700579 -obj_full_size(s));
580 obj_cgroup_put(objcg);
581 }
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700582}
583
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700584#else /* CONFIG_MEMCG_KMEM */
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100585static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
586{
587 return NULL;
588}
589
Roman Gushchin98556092020-08-06 23:21:10 -0700590static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700591{
592 return NULL;
593}
594
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100595static inline int memcg_alloc_slab_cgroups(struct slab *slab,
Roman Gushchin2e9bd482021-02-24 12:03:11 -0800596 struct kmem_cache *s, gfp_t gfp,
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100597 bool new_slab)
Roman Gushchin286e04b2020-08-06 23:20:52 -0700598{
599 return 0;
600}
601
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100602static inline void memcg_free_slab_cgroups(struct slab *slab)
Roman Gushchin286e04b2020-08-06 23:20:52 -0700603{
604}
605
Roman Gushchinbecaba62020-12-05 22:14:45 -0800606static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
607 struct obj_cgroup **objcgp,
608 size_t objects, gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700609{
Roman Gushchinbecaba62020-12-05 22:14:45 -0800610 return true;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700611}
612
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700613static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
614 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700615 gfp_t flags, size_t size,
616 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700617{
618}
619
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700620static inline void memcg_slab_free_hook(struct kmem_cache *s,
621 void **p, int objects)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700622{
623}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700624#endif /* CONFIG_MEMCG_KMEM */
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800625
Vlastimil Babka401fb122021-11-04 11:30:58 +0100626#ifndef CONFIG_SLOB
Kees Cooka64b5372019-07-11 20:53:26 -0700627static inline struct kmem_cache *virt_to_cache(const void *obj)
628{
Matthew Wilcox (Oracle)82c17752021-10-04 14:45:53 +0100629 struct slab *slab;
Kees Cooka64b5372019-07-11 20:53:26 -0700630
Matthew Wilcox (Oracle)82c17752021-10-04 14:45:53 +0100631 slab = virt_to_slab(obj);
632 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n",
Kees Cooka64b5372019-07-11 20:53:26 -0700633 __func__))
634 return NULL;
Matthew Wilcox (Oracle)82c17752021-10-04 14:45:53 +0100635 return slab->slab_cache;
Kees Cooka64b5372019-07-11 20:53:26 -0700636}
637
Matthew Wilcox (Oracle)b9186532021-10-04 14:45:52 +0100638static __always_inline void account_slab(struct slab *slab, int order,
639 struct kmem_cache *s, gfp_t gfp)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700640{
Roman Gushchin2e9bd482021-02-24 12:03:11 -0800641 if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100642 memcg_alloc_slab_cgroups(slab, s, gfp, true);
Roman Gushchin2e9bd482021-02-24 12:03:11 -0800643
Matthew Wilcox (Oracle)b9186532021-10-04 14:45:52 +0100644 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700645 PAGE_SIZE << order);
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700646}
647
Matthew Wilcox (Oracle)b9186532021-10-04 14:45:52 +0100648static __always_inline void unaccount_slab(struct slab *slab, int order,
649 struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700650{
Roman Gushchin10befea2020-08-06 23:21:27 -0700651 if (memcg_kmem_enabled())
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100652 memcg_free_slab_cgroups(slab);
Roman Gushchin98556092020-08-06 23:21:10 -0700653
Matthew Wilcox (Oracle)b9186532021-10-04 14:45:52 +0100654 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700655 -(PAGE_SIZE << order));
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700656}
657
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700658static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
659{
660 struct kmem_cache *cachep;
661
662 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700663 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
664 return s;
665
666 cachep = virt_to_cache(x);
Roman Gushchin10befea2020-08-06 23:21:27 -0700667 if (WARN(cachep && cachep != s,
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700668 "%s: Wrong slab cache. %s but object is from %s\n",
669 __func__, s->name, cachep->name))
670 print_tracking(cachep, x);
671 return cachep;
672}
Vlastimil Babka401fb122021-11-04 11:30:58 +0100673#endif /* CONFIG_SLOB */
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700674
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700675static inline size_t slab_ksize(const struct kmem_cache *s)
676{
677#ifndef CONFIG_SLUB
678 return s->object_size;
679
680#else /* CONFIG_SLUB */
681# ifdef CONFIG_SLUB_DEBUG
682 /*
683 * Debugging requires use of the padding between object
684 * and whatever may come after it.
685 */
686 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
687 return s->object_size;
688# endif
Alexander Potapenko80a92012016-07-28 15:49:07 -0700689 if (s->flags & SLAB_KASAN)
690 return s->object_size;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700691 /*
692 * If we have the need to store the freelist pointer
693 * back there or track user information then we can
694 * only use the space before that information.
695 */
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800696 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700697 return s->inuse;
698 /*
699 * Else we can use all the padding etc for the allocation
700 */
701 return s->size;
702#endif
703}
704
705static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700706 struct obj_cgroup **objcgp,
707 size_t size, gfp_t flags)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700708{
709 flags &= gfp_allowed_mask;
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100710
Daniel Vetter95d6c702020-12-14 19:08:34 -0800711 might_alloc(flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700712
Jesper Dangaard Brouerfab99632016-03-15 14:53:38 -0700713 if (should_failslab(s, flags))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700714 return NULL;
715
Roman Gushchinbecaba62020-12-05 22:14:45 -0800716 if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
717 return NULL;
Vladimir Davydov45264772016-07-26 15:24:21 -0700718
719 return s;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700720}
721
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700722static inline void slab_post_alloc_hook(struct kmem_cache *s,
Andrey Konovalovda844b72021-04-29 23:00:06 -0700723 struct obj_cgroup *objcg, gfp_t flags,
724 size_t size, void **p, bool init)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700725{
726 size_t i;
727
728 flags &= gfp_allowed_mask;
Andrey Konovalovda844b72021-04-29 23:00:06 -0700729
730 /*
731 * As memory initialization might be integrated into KASAN,
732 * kasan_slab_alloc and initialization memset must be
733 * kept together to avoid discrepancies in behavior.
734 *
735 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
736 */
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700737 for (i = 0; i < size; i++) {
Andrey Konovalovda844b72021-04-29 23:00:06 -0700738 p[i] = kasan_slab_alloc(s, p[i], flags, init);
739 if (p[i] && init && !kasan_has_integrated_init())
740 memset(p[i], 0, s->object_size);
Andrey Konovalov53128242019-02-20 22:19:11 -0800741 kmemleak_alloc_recursive(p[i], s->object_size, 1,
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700742 s->flags, flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700743 }
Vladimir Davydov45264772016-07-26 15:24:21 -0700744
Roman Gushchinbecaba62020-12-05 22:14:45 -0800745 memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700746}
747
Christoph Lameter44c53562014-08-06 16:04:07 -0700748#ifndef CONFIG_SLOB
Christoph Lameterca349562013-01-10 19:14:19 +0000749/*
750 * The slab lists for all objects.
751 */
752struct kmem_cache_node {
753 spinlock_t list_lock;
754
755#ifdef CONFIG_SLAB
756 struct list_head slabs_partial; /* partial list first, better asm code */
757 struct list_head slabs_full;
758 struct list_head slabs_free;
David Rientjesbf00bd32016-12-12 16:41:44 -0800759 unsigned long total_slabs; /* length of all slab lists */
760 unsigned long free_slabs; /* length of free slab list only */
Christoph Lameterca349562013-01-10 19:14:19 +0000761 unsigned long free_objects;
762 unsigned int free_limit;
763 unsigned int colour_next; /* Per-node cache coloring */
764 struct array_cache *shared; /* shared per node */
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700765 struct alien_cache **alien; /* on other nodes */
Christoph Lameterca349562013-01-10 19:14:19 +0000766 unsigned long next_reap; /* updated without locking */
767 int free_touched; /* updated without locking */
768#endif
769
770#ifdef CONFIG_SLUB
771 unsigned long nr_partial;
772 struct list_head partial;
773#ifdef CONFIG_SLUB_DEBUG
774 atomic_long_t nr_slabs;
775 atomic_long_t total_objects;
776 struct list_head full;
777#endif
778#endif
779
780};
Wanpeng Lie25839f2013-07-04 08:33:23 +0800781
Christoph Lameter44c53562014-08-06 16:04:07 -0700782static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
783{
784 return s->node[node];
785}
786
787/*
788 * Iterator over all nodes. The body will be executed for each node that has
789 * a kmem_cache_node structure allocated (which is true for all online nodes)
790 */
791#define for_each_kmem_cache_node(__s, __node, __n) \
Mikulas Patocka91635822014-10-09 15:26:20 -0700792 for (__node = 0; __node < nr_node_ids; __node++) \
793 if ((__n = get_node(__s, __node)))
Christoph Lameter44c53562014-08-06 16:04:07 -0700794
795#endif
796
Vladimir Davydov1df3b262014-12-10 15:42:16 -0800797void *slab_start(struct seq_file *m, loff_t *pos);
Wanpeng Li276a2432013-07-08 08:08:28 +0800798void *slab_next(struct seq_file *m, void *p, loff_t *pos);
799void slab_stop(struct seq_file *m, void *p);
Vladimir Davydovb0475012014-12-10 15:44:19 -0800800int memcg_slab_show(struct seq_file *m, void *p);
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700801
Yang Shi852d8be2017-11-15 17:32:07 -0800802#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
803void dump_unreclaimable_slab(void);
804#else
805static inline void dump_unreclaimable_slab(void)
806{
807}
808#endif
809
Alexander Potapenko55834c52016-05-20 16:59:11 -0700810void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
811
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700812#ifdef CONFIG_SLAB_FREELIST_RANDOM
813int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
814 gfp_t gfp);
815void cache_random_seq_destroy(struct kmem_cache *cachep);
816#else
817static inline int cache_random_seq_create(struct kmem_cache *cachep,
818 unsigned int count, gfp_t gfp)
819{
820 return 0;
821}
822static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
823#endif /* CONFIG_SLAB_FREELIST_RANDOM */
824
Alexander Potapenko64713842019-07-11 20:59:19 -0700825static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
826{
Kees Cook51cba1e2021-04-01 16:23:43 -0700827 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
828 &init_on_alloc)) {
Alexander Potapenko64713842019-07-11 20:59:19 -0700829 if (c->ctor)
830 return false;
831 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
832 return flags & __GFP_ZERO;
833 return true;
834 }
835 return flags & __GFP_ZERO;
836}
837
838static inline bool slab_want_init_on_free(struct kmem_cache *c)
839{
Kees Cook51cba1e2021-04-01 16:23:43 -0700840 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
841 &init_on_free))
Alexander Potapenko64713842019-07-11 20:59:19 -0700842 return !(c->ctor ||
843 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
844 return false;
845}
846
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -0700847#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
848void debugfs_slab_release(struct kmem_cache *);
849#else
850static inline void debugfs_slab_release(struct kmem_cache *s) { }
851#endif
852
Paul E. McKenney5bb1bb32021-01-07 13:46:11 -0800853#ifdef CONFIG_PRINTK
Paul E. McKenney8e7f37f2020-12-07 17:41:02 -0800854#define KS_ADDRS_COUNT 16
855struct kmem_obj_info {
856 void *kp_ptr;
Matthew Wilcox (Oracle)72132302021-10-04 14:45:55 +0100857 struct slab *kp_slab;
Paul E. McKenney8e7f37f2020-12-07 17:41:02 -0800858 void *kp_objp;
859 unsigned long kp_data_offset;
860 struct kmem_cache *kp_slab_cache;
861 void *kp_ret;
862 void *kp_stack[KS_ADDRS_COUNT];
Maninder Singhe548eaa2021-03-16 16:07:11 +0530863 void *kp_free_stack[KS_ADDRS_COUNT];
Paul E. McKenney8e7f37f2020-12-07 17:41:02 -0800864};
Matthew Wilcox (Oracle)72132302021-10-04 14:45:55 +0100865void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
Paul E. McKenney5bb1bb32021-01-07 13:46:11 -0800866#endif
Paul E. McKenney8e7f37f2020-12-07 17:41:02 -0800867
Matthew Wilcox (Oracle)0b3eb0912021-10-04 14:45:56 +0100868#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
869void __check_heap_object(const void *ptr, unsigned long n,
870 const struct slab *slab, bool to_user);
871#else
872static inline
873void __check_heap_object(const void *ptr, unsigned long n,
874 const struct slab *slab, bool to_user)
875{
876}
877#endif
878
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700879#endif /* MM_SLAB_H */