blob: 6cc323f1313afc37ec999f12ffedbcd05f2a86a7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameter97d06602012-07-06 15:25:11 -05002#ifndef MM_SLAB_H
3#define MM_SLAB_H
4/*
5 * Internal slab definitions
6 */
7
Joonsoo Kim07f361b2014-10-09 15:26:00 -07008#ifdef CONFIG_SLOB
9/*
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
14 *
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
19 */
20struct kmem_cache {
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080024 slab_flags_t flags; /* Active flags on the slab */
Alexey Dobriyan7bbdb812018-04-05 16:21:31 -070025 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
Joonsoo Kim07f361b2014-10-09 15:26:00 -070027 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
31};
32
33#endif /* CONFIG_SLOB */
34
35#ifdef CONFIG_SLAB
36#include <linux/slab_def.h>
37#endif
38
39#ifdef CONFIG_SLUB
40#include <linux/slub_def.h>
41#endif
42
43#include <linux/memcontrol.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070044#include <linux/fault-inject.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070045#include <linux/kasan.h>
46#include <linux/kmemleak.h>
Thomas Garnier7c00fce2016-07-26 15:21:56 -070047#include <linux/random.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010048#include <linux/sched/mm.h>
Roman Gushchin286e04b2020-08-06 23:20:52 -070049#include <linux/kmemleak.h>
Joonsoo Kim07f361b2014-10-09 15:26:00 -070050
Christoph Lameter97d06602012-07-06 15:25:11 -050051/*
52 * State of the slab allocator.
53 *
54 * This is used to describe the states of the allocator during bootup.
55 * Allocators use this to gradually bootstrap themselves. Most allocators
56 * have the problem that the structures used for managing slab caches are
57 * allocated from slab caches themselves.
58 */
59enum slab_state {
60 DOWN, /* No slab functionality yet */
61 PARTIAL, /* SLUB: kmem_cache_node available */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +000062 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
Christoph Lameter97d06602012-07-06 15:25:11 -050063 UP, /* Slab caches usable but not all extras yet */
64 FULL /* Everything is working */
65};
66
67extern enum slab_state slab_state;
68
Christoph Lameter18004c52012-07-06 15:25:12 -050069/* The slab cache mutex protects the management structures during changes */
70extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +000071
72/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -050073extern struct list_head slab_caches;
74
Christoph Lameter9b030cb2012-09-05 00:20:33 +000075/* The slab cache that manages slab cache information */
76extern struct kmem_cache *kmem_cache;
77
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080078/* A table of kmalloc cache names and sizes */
79extern const struct kmalloc_info_struct {
Pengfei Licb5d9fb2019-11-30 17:49:21 -080080 const char *name[NR_KMALLOC_TYPES];
Alexey Dobriyan55de8b92018-04-05 16:20:29 -070081 unsigned int size;
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080082} kmalloc_info[];
83
Christoph Lameterf97d5f632013-01-10 19:12:17 +000084#ifndef CONFIG_SLOB
85/* Kmalloc array related functions */
Daniel Sanders34cc6992015-06-24 16:55:57 -070086void setup_kmalloc_cache_index_table(void);
Alexey Dobriyand50112e2017-11-15 17:32:18 -080087void create_kmalloc_caches(slab_flags_t);
Christoph Lameter2c59dd62013-01-10 19:14:19 +000088
89/* Find the kmalloc slab corresponding for a certain size */
90struct kmem_cache *kmalloc_slab(size_t, gfp_t);
Christoph Lameterf97d5f632013-01-10 19:12:17 +000091#endif
92
Long Li44405092020-08-06 23:18:28 -070093gfp_t kmalloc_fix_flags(gfp_t flags);
Christoph Lameterf97d5f632013-01-10 19:12:17 +000094
Christoph Lameter9b030cb2012-09-05 00:20:33 +000095/* Functions provided by the slab allocators */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080096int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
Christoph Lameter97d06602012-07-06 15:25:11 -050097
Alexey Dobriyan55de8b92018-04-05 16:20:29 -070098struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
99 slab_flags_t flags, unsigned int useroffset,
100 unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000101extern void create_boot_cache(struct kmem_cache *, const char *name,
Alexey Dobriyan361d5752018-04-05 16:20:33 -0700102 unsigned int size, slab_flags_t flags,
103 unsigned int useroffset, unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000104
Joonsoo Kim423c9292014-10-09 15:26:22 -0700105int slab_unmergeable(struct kmem_cache *s);
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700106struct kmem_cache *find_mergeable(unsigned size, unsigned align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800107 slab_flags_t flags, const char *name, void (*ctor)(void *));
Joonsoo Kim12220de2014-10-09 15:26:24 -0700108#ifndef CONFIG_SLOB
Glauber Costa2633d7a2012-12-18 14:22:34 -0800109struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700110__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800111 slab_flags_t flags, void (*ctor)(void *));
Joonsoo Kim423c9292014-10-09 15:26:22 -0700112
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700113slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800114 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700115 void (*ctor)(void *));
Christoph Lametercbb79692012-09-05 00:18:32 +0000116#else
Glauber Costa2633d7a2012-12-18 14:22:34 -0800117static inline struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700118__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800119 slab_flags_t flags, void (*ctor)(void *))
Christoph Lametercbb79692012-09-05 00:18:32 +0000120{ return NULL; }
Joonsoo Kim423c9292014-10-09 15:26:22 -0700121
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700122static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800123 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700124 void (*ctor)(void *))
125{
126 return flags;
127}
Christoph Lametercbb79692012-09-05 00:18:32 +0000128#endif
129
130
Glauber Costad8843922012-10-17 15:36:51 +0400131/* Legal flag mask for kmem_cache_create(), for various configurations */
Nicolas Boichat6d6ea1e2019-03-28 20:43:42 -0700132#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
133 SLAB_CACHE_DMA32 | SLAB_PANIC | \
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800134 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
Glauber Costad8843922012-10-17 15:36:51 +0400135
136#if defined(CONFIG_DEBUG_SLAB)
137#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
138#elif defined(CONFIG_SLUB_DEBUG)
139#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Laura Abbottbecfda62016-03-15 14:55:06 -0700140 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
Glauber Costad8843922012-10-17 15:36:51 +0400141#else
142#define SLAB_DEBUG_FLAGS (0)
143#endif
144
145#if defined(CONFIG_SLAB)
146#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
Vladimir Davydov230e9fc2016-01-14 15:18:15 -0800147 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800148 SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400149#elif defined(CONFIG_SLUB)
150#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800151 SLAB_TEMPORARY | SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400152#else
153#define SLAB_CACHE_FLAGS (0)
154#endif
155
Thomas Garniere70954f2016-12-12 16:41:38 -0800156/* Common flags available with current configuration */
Glauber Costad8843922012-10-17 15:36:51 +0400157#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
158
Thomas Garniere70954f2016-12-12 16:41:38 -0800159/* Common flags permitted for kmem_cache_create */
160#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
161 SLAB_RED_ZONE | \
162 SLAB_POISON | \
163 SLAB_STORE_USER | \
164 SLAB_TRACE | \
165 SLAB_CONSISTENCY_CHECKS | \
166 SLAB_MEM_SPREAD | \
167 SLAB_NOLEAKTRACE | \
168 SLAB_RECLAIM_ACCOUNT | \
169 SLAB_TEMPORARY | \
Thomas Garniere70954f2016-12-12 16:41:38 -0800170 SLAB_ACCOUNT)
171
Shakeel Buttf9e13c02018-04-05 16:21:57 -0700172bool __kmem_cache_empty(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000173int __kmem_cache_shutdown(struct kmem_cache *);
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800174void __kmem_cache_release(struct kmem_cache *);
Tejun Heoc9fc5862017-02-22 15:41:27 -0800175int __kmem_cache_shrink(struct kmem_cache *);
Christoph Lameter41a21282014-05-06 12:50:08 -0700176void slab_kmem_cache_release(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000177
Glauber Costab7454ad2012-10-19 18:20:25 +0400178struct seq_file;
179struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +0400180
Glauber Costa0d7561c2012-10-19 18:20:27 +0400181struct slabinfo {
182 unsigned long active_objs;
183 unsigned long num_objs;
184 unsigned long active_slabs;
185 unsigned long num_slabs;
186 unsigned long shared_avail;
187 unsigned int limit;
188 unsigned int batchcount;
189 unsigned int shared;
190 unsigned int objects_per_slab;
191 unsigned int cache_order;
192};
193
194void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
195void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400196ssize_t slabinfo_write(struct file *file, const char __user *buffer,
197 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800198
Christoph Lameter484748f2015-09-04 15:45:34 -0700199/*
200 * Generic implementation of bulk operations
201 * These are useful for situations in which the allocator cannot
Jesper Dangaard Brouer9f706d62016-03-15 14:54:03 -0700202 * perform optimizations. In that case segments of the object listed
Christoph Lameter484748f2015-09-04 15:45:34 -0700203 * may be allocated or freed using these operations.
204 */
205void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800206int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
Christoph Lameter484748f2015-09-04 15:45:34 -0700207
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700208static inline int cache_vmstat_idx(struct kmem_cache *s)
209{
210 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
Roman Gushchind42f3242020-08-06 23:20:39 -0700211 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700212}
213
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700214#ifdef CONFIG_SLUB_DEBUG
215#ifdef CONFIG_SLUB_DEBUG_ON
216DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
217#else
218DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
219#endif
220extern void print_tracking(struct kmem_cache *s, void *object);
221#else
222static inline void print_tracking(struct kmem_cache *s, void *object)
223{
224}
225#endif
226
227/*
228 * Returns true if any of the specified slub_debug flags is enabled for the
229 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
230 * the static key.
231 */
232static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
233{
234#ifdef CONFIG_SLUB_DEBUG
235 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
236 if (static_branch_unlikely(&slub_debug_enabled))
237 return s->flags & flags;
238#endif
239 return false;
240}
241
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700242#ifdef CONFIG_MEMCG_KMEM
Roman Gushchin286e04b2020-08-06 23:20:52 -0700243static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
244{
245 /*
246 * page->mem_cgroup and page->obj_cgroups are sharing the same
247 * space. To distinguish between them in case we don't know for sure
248 * that the page is a slab page (e.g. page_cgroup_ino()), let's
249 * always set the lowest bit of obj_cgroups.
250 */
251 return (struct obj_cgroup **)
252 ((unsigned long)page->obj_cgroups & ~0x1UL);
253}
254
Roman Gushchin98556092020-08-06 23:21:10 -0700255static inline bool page_has_obj_cgroups(struct page *page)
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700256{
Roman Gushchin98556092020-08-06 23:21:10 -0700257 return ((unsigned long)page->obj_cgroups & 0x1UL);
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700258}
259
Roman Gushchin10befea2020-08-06 23:21:27 -0700260int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
261 gfp_t gfp);
Roman Gushchin286e04b2020-08-06 23:20:52 -0700262
263static inline void memcg_free_page_obj_cgroups(struct page *page)
264{
265 kfree(page_obj_cgroups(page));
266 page->obj_cgroups = NULL;
267}
268
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700269static inline size_t obj_full_size(struct kmem_cache *s)
270{
271 /*
272 * For each accounted object there is an extra space which is used
273 * to store obj_cgroup membership. Charge it too.
274 */
275 return s->size + sizeof(struct obj_cgroup *);
276}
277
Roman Gushchin10befea2020-08-06 23:21:27 -0700278static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
279 size_t objects,
280 gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700281{
Roman Gushchin98556092020-08-06 23:21:10 -0700282 struct obj_cgroup *objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700283
Roman Gushchin98556092020-08-06 23:21:10 -0700284 if (memcg_kmem_bypass())
Roman Gushchin10befea2020-08-06 23:21:27 -0700285 return NULL;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700286
Roman Gushchin98556092020-08-06 23:21:10 -0700287 objcg = get_obj_cgroup_from_current();
288 if (!objcg)
Roman Gushchin10befea2020-08-06 23:21:27 -0700289 return NULL;
Roman Gushchin98556092020-08-06 23:21:10 -0700290
291 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
292 obj_cgroup_put(objcg);
Roman Gushchin10befea2020-08-06 23:21:27 -0700293 return NULL;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700294 }
295
Roman Gushchin10befea2020-08-06 23:21:27 -0700296 return objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700297}
298
299static inline void mod_objcg_state(struct obj_cgroup *objcg,
300 struct pglist_data *pgdat,
301 int idx, int nr)
302{
303 struct mem_cgroup *memcg;
304 struct lruvec *lruvec;
305
306 rcu_read_lock();
307 memcg = obj_cgroup_memcg(objcg);
308 lruvec = mem_cgroup_lruvec(memcg, pgdat);
309 mod_memcg_lruvec_state(lruvec, idx, nr);
310 rcu_read_unlock();
311}
312
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700313static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
314 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700315 gfp_t flags, size_t size,
316 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700317{
318 struct page *page;
319 unsigned long off;
320 size_t i;
321
Roman Gushchin10befea2020-08-06 23:21:27 -0700322 if (!objcg)
323 return;
324
325 flags &= ~__GFP_ACCOUNT;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700326 for (i = 0; i < size; i++) {
327 if (likely(p[i])) {
328 page = virt_to_head_page(p[i]);
Roman Gushchin10befea2020-08-06 23:21:27 -0700329
330 if (!page_has_obj_cgroups(page) &&
331 memcg_alloc_page_obj_cgroups(page, s, flags)) {
332 obj_cgroup_uncharge(objcg, obj_full_size(s));
333 continue;
334 }
335
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700336 off = obj_to_index(s, page, p[i]);
337 obj_cgroup_get(objcg);
338 page_obj_cgroups(page)[off] = objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700339 mod_objcg_state(objcg, page_pgdat(page),
340 cache_vmstat_idx(s), obj_full_size(s));
341 } else {
342 obj_cgroup_uncharge(objcg, obj_full_size(s));
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700343 }
344 }
345 obj_cgroup_put(objcg);
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700346}
347
348static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
349 void *p)
350{
351 struct obj_cgroup *objcg;
352 unsigned int off;
353
Roman Gushchin10befea2020-08-06 23:21:27 -0700354 if (!memcg_kmem_enabled())
355 return;
356
357 if (!page_has_obj_cgroups(page))
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700358 return;
359
360 off = obj_to_index(s, page, p);
361 objcg = page_obj_cgroups(page)[off];
362 page_obj_cgroups(page)[off] = NULL;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700363
Roman Gushchin10befea2020-08-06 23:21:27 -0700364 if (!objcg)
365 return;
366
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700367 obj_cgroup_uncharge(objcg, obj_full_size(s));
368 mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
369 -obj_full_size(s));
370
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700371 obj_cgroup_put(objcg);
372}
373
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700374#else /* CONFIG_MEMCG_KMEM */
Roman Gushchin98556092020-08-06 23:21:10 -0700375static inline bool page_has_obj_cgroups(struct page *page)
376{
377 return false;
378}
379
380static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700381{
382 return NULL;
383}
384
Roman Gushchin286e04b2020-08-06 23:20:52 -0700385static inline int memcg_alloc_page_obj_cgroups(struct page *page,
386 struct kmem_cache *s, gfp_t gfp)
387{
388 return 0;
389}
390
391static inline void memcg_free_page_obj_cgroups(struct page *page)
392{
393}
394
Roman Gushchin10befea2020-08-06 23:21:27 -0700395static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
396 size_t objects,
397 gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700398{
399 return NULL;
400}
401
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700402static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
403 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700404 gfp_t flags, size_t size,
405 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700406{
407}
408
409static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
410 void *p)
411{
412}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700413#endif /* CONFIG_MEMCG_KMEM */
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800414
Kees Cooka64b5372019-07-11 20:53:26 -0700415static inline struct kmem_cache *virt_to_cache(const void *obj)
416{
417 struct page *page;
418
419 page = virt_to_head_page(obj);
420 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
421 __func__))
422 return NULL;
423 return page->slab_cache;
424}
425
Roman Gushchin74d555b2020-08-06 23:21:44 -0700426static __always_inline void account_slab_page(struct page *page, int order,
427 struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700428{
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700429 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
430 PAGE_SIZE << order);
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700431}
432
Roman Gushchin74d555b2020-08-06 23:21:44 -0700433static __always_inline void unaccount_slab_page(struct page *page, int order,
434 struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700435{
Roman Gushchin10befea2020-08-06 23:21:27 -0700436 if (memcg_kmem_enabled())
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700437 memcg_free_page_obj_cgroups(page);
Roman Gushchin98556092020-08-06 23:21:10 -0700438
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700439 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
440 -(PAGE_SIZE << order));
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700441}
442
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700443static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
444{
445 struct kmem_cache *cachep;
446
447 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700448 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
449 return s;
450
451 cachep = virt_to_cache(x);
Roman Gushchin10befea2020-08-06 23:21:27 -0700452 if (WARN(cachep && cachep != s,
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700453 "%s: Wrong slab cache. %s but object is from %s\n",
454 __func__, s->name, cachep->name))
455 print_tracking(cachep, x);
456 return cachep;
457}
458
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700459static inline size_t slab_ksize(const struct kmem_cache *s)
460{
461#ifndef CONFIG_SLUB
462 return s->object_size;
463
464#else /* CONFIG_SLUB */
465# ifdef CONFIG_SLUB_DEBUG
466 /*
467 * Debugging requires use of the padding between object
468 * and whatever may come after it.
469 */
470 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
471 return s->object_size;
472# endif
Alexander Potapenko80a92012016-07-28 15:49:07 -0700473 if (s->flags & SLAB_KASAN)
474 return s->object_size;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700475 /*
476 * If we have the need to store the freelist pointer
477 * back there or track user information then we can
478 * only use the space before that information.
479 */
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800480 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700481 return s->inuse;
482 /*
483 * Else we can use all the padding etc for the allocation
484 */
485 return s->size;
486#endif
487}
488
489static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700490 struct obj_cgroup **objcgp,
491 size_t size, gfp_t flags)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700492{
493 flags &= gfp_allowed_mask;
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100494
495 fs_reclaim_acquire(flags);
496 fs_reclaim_release(flags);
497
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700498 might_sleep_if(gfpflags_allow_blocking(flags));
499
Jesper Dangaard Brouerfab99632016-03-15 14:53:38 -0700500 if (should_failslab(s, flags))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700501 return NULL;
502
Vladimir Davydov45264772016-07-26 15:24:21 -0700503 if (memcg_kmem_enabled() &&
504 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
Roman Gushchin10befea2020-08-06 23:21:27 -0700505 *objcgp = memcg_slab_pre_alloc_hook(s, size, flags);
Vladimir Davydov45264772016-07-26 15:24:21 -0700506
507 return s;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700508}
509
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700510static inline void slab_post_alloc_hook(struct kmem_cache *s,
511 struct obj_cgroup *objcg,
512 gfp_t flags, size_t size, void **p)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700513{
514 size_t i;
515
516 flags &= gfp_allowed_mask;
517 for (i = 0; i < size; i++) {
Andrey Konovalov53128242019-02-20 22:19:11 -0800518 p[i] = kasan_slab_alloc(s, p[i], flags);
Andrey Konovalova2f77572019-02-20 22:19:16 -0800519 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
Andrey Konovalov53128242019-02-20 22:19:11 -0800520 kmemleak_alloc_recursive(p[i], s->object_size, 1,
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700521 s->flags, flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700522 }
Vladimir Davydov45264772016-07-26 15:24:21 -0700523
Roman Gushchin10befea2020-08-06 23:21:27 -0700524 if (memcg_kmem_enabled())
525 memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700526}
527
Christoph Lameter44c53562014-08-06 16:04:07 -0700528#ifndef CONFIG_SLOB
Christoph Lameterca349562013-01-10 19:14:19 +0000529/*
530 * The slab lists for all objects.
531 */
532struct kmem_cache_node {
533 spinlock_t list_lock;
534
535#ifdef CONFIG_SLAB
536 struct list_head slabs_partial; /* partial list first, better asm code */
537 struct list_head slabs_full;
538 struct list_head slabs_free;
David Rientjesbf00bd32016-12-12 16:41:44 -0800539 unsigned long total_slabs; /* length of all slab lists */
540 unsigned long free_slabs; /* length of free slab list only */
Christoph Lameterca349562013-01-10 19:14:19 +0000541 unsigned long free_objects;
542 unsigned int free_limit;
543 unsigned int colour_next; /* Per-node cache coloring */
544 struct array_cache *shared; /* shared per node */
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700545 struct alien_cache **alien; /* on other nodes */
Christoph Lameterca349562013-01-10 19:14:19 +0000546 unsigned long next_reap; /* updated without locking */
547 int free_touched; /* updated without locking */
548#endif
549
550#ifdef CONFIG_SLUB
551 unsigned long nr_partial;
552 struct list_head partial;
553#ifdef CONFIG_SLUB_DEBUG
554 atomic_long_t nr_slabs;
555 atomic_long_t total_objects;
556 struct list_head full;
557#endif
558#endif
559
560};
Wanpeng Lie25839f2013-07-04 08:33:23 +0800561
Christoph Lameter44c53562014-08-06 16:04:07 -0700562static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
563{
564 return s->node[node];
565}
566
567/*
568 * Iterator over all nodes. The body will be executed for each node that has
569 * a kmem_cache_node structure allocated (which is true for all online nodes)
570 */
571#define for_each_kmem_cache_node(__s, __node, __n) \
Mikulas Patocka91635822014-10-09 15:26:20 -0700572 for (__node = 0; __node < nr_node_ids; __node++) \
573 if ((__n = get_node(__s, __node)))
Christoph Lameter44c53562014-08-06 16:04:07 -0700574
575#endif
576
Vladimir Davydov1df3b262014-12-10 15:42:16 -0800577void *slab_start(struct seq_file *m, loff_t *pos);
Wanpeng Li276a2432013-07-08 08:08:28 +0800578void *slab_next(struct seq_file *m, void *p, loff_t *pos);
579void slab_stop(struct seq_file *m, void *p);
Vladimir Davydovb0475012014-12-10 15:44:19 -0800580int memcg_slab_show(struct seq_file *m, void *p);
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700581
Yang Shi852d8be2017-11-15 17:32:07 -0800582#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
583void dump_unreclaimable_slab(void);
584#else
585static inline void dump_unreclaimable_slab(void)
586{
587}
588#endif
589
Alexander Potapenko55834c52016-05-20 16:59:11 -0700590void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
591
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700592#ifdef CONFIG_SLAB_FREELIST_RANDOM
593int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
594 gfp_t gfp);
595void cache_random_seq_destroy(struct kmem_cache *cachep);
596#else
597static inline int cache_random_seq_create(struct kmem_cache *cachep,
598 unsigned int count, gfp_t gfp)
599{
600 return 0;
601}
602static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
603#endif /* CONFIG_SLAB_FREELIST_RANDOM */
604
Alexander Potapenko64713842019-07-11 20:59:19 -0700605static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
606{
607 if (static_branch_unlikely(&init_on_alloc)) {
608 if (c->ctor)
609 return false;
610 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
611 return flags & __GFP_ZERO;
612 return true;
613 }
614 return flags & __GFP_ZERO;
615}
616
617static inline bool slab_want_init_on_free(struct kmem_cache *c)
618{
619 if (static_branch_unlikely(&init_on_free))
620 return !(c->ctor ||
621 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
622 return false;
623}
624
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700625#endif /* MM_SLAB_H */