blob: f9977d6613d6189a204daf040a40e126194fb727 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameter97d06602012-07-06 15:25:11 -05002#ifndef MM_SLAB_H
3#define MM_SLAB_H
4/*
5 * Internal slab definitions
6 */
7
Joonsoo Kim07f361b2014-10-09 15:26:00 -07008#ifdef CONFIG_SLOB
9/*
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
14 *
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
19 */
20struct kmem_cache {
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080024 slab_flags_t flags; /* Active flags on the slab */
Alexey Dobriyan7bbdb812018-04-05 16:21:31 -070025 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
Joonsoo Kim07f361b2014-10-09 15:26:00 -070027 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
31};
32
33#endif /* CONFIG_SLOB */
34
35#ifdef CONFIG_SLAB
36#include <linux/slab_def.h>
37#endif
38
39#ifdef CONFIG_SLUB
40#include <linux/slub_def.h>
41#endif
42
43#include <linux/memcontrol.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070044#include <linux/fault-inject.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070045#include <linux/kasan.h>
46#include <linux/kmemleak.h>
Thomas Garnier7c00fce2016-07-26 15:21:56 -070047#include <linux/random.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010048#include <linux/sched/mm.h>
Joonsoo Kim07f361b2014-10-09 15:26:00 -070049
Christoph Lameter97d06602012-07-06 15:25:11 -050050/*
51 * State of the slab allocator.
52 *
53 * This is used to describe the states of the allocator during bootup.
54 * Allocators use this to gradually bootstrap themselves. Most allocators
55 * have the problem that the structures used for managing slab caches are
56 * allocated from slab caches themselves.
57 */
58enum slab_state {
59 DOWN, /* No slab functionality yet */
60 PARTIAL, /* SLUB: kmem_cache_node available */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +000061 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
Christoph Lameter97d06602012-07-06 15:25:11 -050062 UP, /* Slab caches usable but not all extras yet */
63 FULL /* Everything is working */
64};
65
66extern enum slab_state slab_state;
67
Christoph Lameter18004c52012-07-06 15:25:12 -050068/* The slab cache mutex protects the management structures during changes */
69extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +000070
71/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -050072extern struct list_head slab_caches;
73
Christoph Lameter9b030cb2012-09-05 00:20:33 +000074/* The slab cache that manages slab cache information */
75extern struct kmem_cache *kmem_cache;
76
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080077/* A table of kmalloc cache names and sizes */
78extern const struct kmalloc_info_struct {
Pengfei Licb5d9fb2019-11-30 17:49:21 -080079 const char *name[NR_KMALLOC_TYPES];
Alexey Dobriyan55de8b92018-04-05 16:20:29 -070080 unsigned int size;
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080081} kmalloc_info[];
82
Christoph Lameterf97d5f62013-01-10 19:12:17 +000083#ifndef CONFIG_SLOB
84/* Kmalloc array related functions */
Daniel Sanders34cc6992015-06-24 16:55:57 -070085void setup_kmalloc_cache_index_table(void);
Alexey Dobriyand50112e2017-11-15 17:32:18 -080086void create_kmalloc_caches(slab_flags_t);
Christoph Lameter2c59dd62013-01-10 19:14:19 +000087
88/* Find the kmalloc slab corresponding for a certain size */
89struct kmem_cache *kmalloc_slab(size_t, gfp_t);
Christoph Lameterf97d5f62013-01-10 19:12:17 +000090#endif
91
Long Li44405092020-08-06 23:18:28 -070092gfp_t kmalloc_fix_flags(gfp_t flags);
Christoph Lameterf97d5f62013-01-10 19:12:17 +000093
Christoph Lameter9b030cb2012-09-05 00:20:33 +000094/* Functions provided by the slab allocators */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080095int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
Christoph Lameter97d06602012-07-06 15:25:11 -050096
Alexey Dobriyan55de8b92018-04-05 16:20:29 -070097struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
98 slab_flags_t flags, unsigned int useroffset,
99 unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000100extern void create_boot_cache(struct kmem_cache *, const char *name,
Alexey Dobriyan361d5752018-04-05 16:20:33 -0700101 unsigned int size, slab_flags_t flags,
102 unsigned int useroffset, unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000103
Joonsoo Kim423c9292014-10-09 15:26:22 -0700104int slab_unmergeable(struct kmem_cache *s);
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700105struct kmem_cache *find_mergeable(unsigned size, unsigned align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800106 slab_flags_t flags, const char *name, void (*ctor)(void *));
Joonsoo Kim12220de2014-10-09 15:26:24 -0700107#ifndef CONFIG_SLOB
Glauber Costa2633d7a2012-12-18 14:22:34 -0800108struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700109__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800110 slab_flags_t flags, void (*ctor)(void *));
Joonsoo Kim423c9292014-10-09 15:26:22 -0700111
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700112slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800113 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700114 void (*ctor)(void *));
Christoph Lametercbb79692012-09-05 00:18:32 +0000115#else
Glauber Costa2633d7a2012-12-18 14:22:34 -0800116static inline struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700117__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800118 slab_flags_t flags, void (*ctor)(void *))
Christoph Lametercbb79692012-09-05 00:18:32 +0000119{ return NULL; }
Joonsoo Kim423c9292014-10-09 15:26:22 -0700120
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700121static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800122 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700123 void (*ctor)(void *))
124{
125 return flags;
126}
Christoph Lametercbb79692012-09-05 00:18:32 +0000127#endif
128
129
Glauber Costad8843922012-10-17 15:36:51 +0400130/* Legal flag mask for kmem_cache_create(), for various configurations */
Nicolas Boichat6d6ea1e2019-03-28 20:43:42 -0700131#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
132 SLAB_CACHE_DMA32 | SLAB_PANIC | \
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800133 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
Glauber Costad8843922012-10-17 15:36:51 +0400134
135#if defined(CONFIG_DEBUG_SLAB)
136#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
137#elif defined(CONFIG_SLUB_DEBUG)
138#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Laura Abbottbecfda62016-03-15 14:55:06 -0700139 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
Glauber Costad8843922012-10-17 15:36:51 +0400140#else
141#define SLAB_DEBUG_FLAGS (0)
142#endif
143
144#if defined(CONFIG_SLAB)
145#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
Vladimir Davydov230e9fc2016-01-14 15:18:15 -0800146 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800147 SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400148#elif defined(CONFIG_SLUB)
149#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800150 SLAB_TEMPORARY | SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400151#else
152#define SLAB_CACHE_FLAGS (0)
153#endif
154
Thomas Garniere70954f2016-12-12 16:41:38 -0800155/* Common flags available with current configuration */
Glauber Costad8843922012-10-17 15:36:51 +0400156#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
157
Thomas Garniere70954f2016-12-12 16:41:38 -0800158/* Common flags permitted for kmem_cache_create */
159#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
160 SLAB_RED_ZONE | \
161 SLAB_POISON | \
162 SLAB_STORE_USER | \
163 SLAB_TRACE | \
164 SLAB_CONSISTENCY_CHECKS | \
165 SLAB_MEM_SPREAD | \
166 SLAB_NOLEAKTRACE | \
167 SLAB_RECLAIM_ACCOUNT | \
168 SLAB_TEMPORARY | \
Thomas Garniere70954f2016-12-12 16:41:38 -0800169 SLAB_ACCOUNT)
170
Shakeel Buttf9e13c02018-04-05 16:21:57 -0700171bool __kmem_cache_empty(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000172int __kmem_cache_shutdown(struct kmem_cache *);
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800173void __kmem_cache_release(struct kmem_cache *);
Tejun Heoc9fc5862017-02-22 15:41:27 -0800174int __kmem_cache_shrink(struct kmem_cache *);
Christoph Lameter41a21282014-05-06 12:50:08 -0700175void slab_kmem_cache_release(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000176
Glauber Costab7454ad2012-10-19 18:20:25 +0400177struct seq_file;
178struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +0400179
Glauber Costa0d7561c2012-10-19 18:20:27 +0400180struct slabinfo {
181 unsigned long active_objs;
182 unsigned long num_objs;
183 unsigned long active_slabs;
184 unsigned long num_slabs;
185 unsigned long shared_avail;
186 unsigned int limit;
187 unsigned int batchcount;
188 unsigned int shared;
189 unsigned int objects_per_slab;
190 unsigned int cache_order;
191};
192
193void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
194void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400195ssize_t slabinfo_write(struct file *file, const char __user *buffer,
196 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800197
Christoph Lameter484748f2015-09-04 15:45:34 -0700198/*
199 * Generic implementation of bulk operations
200 * These are useful for situations in which the allocator cannot
Jesper Dangaard Brouer9f706d62016-03-15 14:54:03 -0700201 * perform optimizations. In that case segments of the object listed
Christoph Lameter484748f2015-09-04 15:45:34 -0700202 * may be allocated or freed using these operations.
203 */
204void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800205int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
Christoph Lameter484748f2015-09-04 15:45:34 -0700206
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700207static inline int cache_vmstat_idx(struct kmem_cache *s)
208{
209 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
Roman Gushchind42f3242020-08-06 23:20:39 -0700210 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700211}
212
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700213#ifdef CONFIG_SLUB_DEBUG
214#ifdef CONFIG_SLUB_DEBUG_ON
215DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
216#else
217DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
218#endif
219extern void print_tracking(struct kmem_cache *s, void *object);
220#else
221static inline void print_tracking(struct kmem_cache *s, void *object)
222{
223}
224#endif
225
226/*
227 * Returns true if any of the specified slub_debug flags is enabled for the
228 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
229 * the static key.
230 */
231static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
232{
233#ifdef CONFIG_SLUB_DEBUG
234 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
235 if (static_branch_unlikely(&slub_debug_enabled))
236 return s->flags & flags;
237#endif
238 return false;
239}
240
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700241#ifdef CONFIG_MEMCG_KMEM
Roman Gushchin286e04b2020-08-06 23:20:52 -0700242static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
243{
244 /*
245 * page->mem_cgroup and page->obj_cgroups are sharing the same
246 * space. To distinguish between them in case we don't know for sure
247 * that the page is a slab page (e.g. page_cgroup_ino()), let's
248 * always set the lowest bit of obj_cgroups.
249 */
250 return (struct obj_cgroup **)
251 ((unsigned long)page->obj_cgroups & ~0x1UL);
252}
253
Roman Gushchin98556092020-08-06 23:21:10 -0700254static inline bool page_has_obj_cgroups(struct page *page)
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700255{
Roman Gushchin98556092020-08-06 23:21:10 -0700256 return ((unsigned long)page->obj_cgroups & 0x1UL);
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700257}
258
Roman Gushchin10befea2020-08-06 23:21:27 -0700259int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
260 gfp_t gfp);
Roman Gushchin286e04b2020-08-06 23:20:52 -0700261
262static inline void memcg_free_page_obj_cgroups(struct page *page)
263{
264 kfree(page_obj_cgroups(page));
265 page->obj_cgroups = NULL;
266}
267
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700268static inline size_t obj_full_size(struct kmem_cache *s)
269{
270 /*
271 * For each accounted object there is an extra space which is used
272 * to store obj_cgroup membership. Charge it too.
273 */
274 return s->size + sizeof(struct obj_cgroup *);
275}
276
Roman Gushchinbecaba62020-12-05 22:14:45 -0800277/*
278 * Returns false if the allocation should fail.
279 */
280static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
281 struct obj_cgroup **objcgp,
282 size_t objects, gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700283{
Roman Gushchin98556092020-08-06 23:21:10 -0700284 struct obj_cgroup *objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700285
Roman Gushchinbecaba62020-12-05 22:14:45 -0800286 if (!memcg_kmem_enabled())
287 return true;
288
289 if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
290 return true;
291
Roman Gushchin98556092020-08-06 23:21:10 -0700292 objcg = get_obj_cgroup_from_current();
293 if (!objcg)
Roman Gushchinbecaba62020-12-05 22:14:45 -0800294 return true;
Roman Gushchin98556092020-08-06 23:21:10 -0700295
296 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
297 obj_cgroup_put(objcg);
Roman Gushchinbecaba62020-12-05 22:14:45 -0800298 return false;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700299 }
300
Roman Gushchinbecaba62020-12-05 22:14:45 -0800301 *objcgp = objcg;
302 return true;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700303}
304
305static inline void mod_objcg_state(struct obj_cgroup *objcg,
306 struct pglist_data *pgdat,
307 int idx, int nr)
308{
309 struct mem_cgroup *memcg;
310 struct lruvec *lruvec;
311
312 rcu_read_lock();
313 memcg = obj_cgroup_memcg(objcg);
314 lruvec = mem_cgroup_lruvec(memcg, pgdat);
315 mod_memcg_lruvec_state(lruvec, idx, nr);
316 rcu_read_unlock();
317}
318
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700319static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
320 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700321 gfp_t flags, size_t size,
322 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700323{
324 struct page *page;
325 unsigned long off;
326 size_t i;
327
Roman Gushchinbecaba62020-12-05 22:14:45 -0800328 if (!memcg_kmem_enabled() || !objcg)
Roman Gushchin10befea2020-08-06 23:21:27 -0700329 return;
330
331 flags &= ~__GFP_ACCOUNT;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700332 for (i = 0; i < size; i++) {
333 if (likely(p[i])) {
334 page = virt_to_head_page(p[i]);
Roman Gushchin10befea2020-08-06 23:21:27 -0700335
336 if (!page_has_obj_cgroups(page) &&
337 memcg_alloc_page_obj_cgroups(page, s, flags)) {
338 obj_cgroup_uncharge(objcg, obj_full_size(s));
339 continue;
340 }
341
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700342 off = obj_to_index(s, page, p[i]);
343 obj_cgroup_get(objcg);
344 page_obj_cgroups(page)[off] = objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700345 mod_objcg_state(objcg, page_pgdat(page),
346 cache_vmstat_idx(s), obj_full_size(s));
347 } else {
348 obj_cgroup_uncharge(objcg, obj_full_size(s));
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700349 }
350 }
351 obj_cgroup_put(objcg);
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700352}
353
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700354static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
355 void **p, int objects)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700356{
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700357 struct kmem_cache *s;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700358 struct obj_cgroup *objcg;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700359 struct page *page;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700360 unsigned int off;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700361 int i;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700362
Roman Gushchin10befea2020-08-06 23:21:27 -0700363 if (!memcg_kmem_enabled())
364 return;
365
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700366 for (i = 0; i < objects; i++) {
367 if (unlikely(!p[i]))
368 continue;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700369
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700370 page = virt_to_head_page(p[i]);
371 if (!page_has_obj_cgroups(page))
372 continue;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700373
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700374 if (!s_orig)
375 s = page->slab_cache;
376 else
377 s = s_orig;
Roman Gushchin10befea2020-08-06 23:21:27 -0700378
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700379 off = obj_to_index(s, page, p[i]);
380 objcg = page_obj_cgroups(page)[off];
381 if (!objcg)
382 continue;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700383
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700384 page_obj_cgroups(page)[off] = NULL;
385 obj_cgroup_uncharge(objcg, obj_full_size(s));
386 mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
387 -obj_full_size(s));
388 obj_cgroup_put(objcg);
389 }
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700390}
391
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700392#else /* CONFIG_MEMCG_KMEM */
Roman Gushchin98556092020-08-06 23:21:10 -0700393static inline bool page_has_obj_cgroups(struct page *page)
394{
395 return false;
396}
397
398static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700399{
400 return NULL;
401}
402
Roman Gushchin286e04b2020-08-06 23:20:52 -0700403static inline int memcg_alloc_page_obj_cgroups(struct page *page,
404 struct kmem_cache *s, gfp_t gfp)
405{
406 return 0;
407}
408
409static inline void memcg_free_page_obj_cgroups(struct page *page)
410{
411}
412
Roman Gushchinbecaba62020-12-05 22:14:45 -0800413static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
414 struct obj_cgroup **objcgp,
415 size_t objects, gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700416{
Roman Gushchinbecaba62020-12-05 22:14:45 -0800417 return true;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700418}
419
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700420static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
421 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700422 gfp_t flags, size_t size,
423 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700424{
425}
426
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700427static inline void memcg_slab_free_hook(struct kmem_cache *s,
428 void **p, int objects)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700429{
430}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700431#endif /* CONFIG_MEMCG_KMEM */
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800432
Kees Cooka64b5372019-07-11 20:53:26 -0700433static inline struct kmem_cache *virt_to_cache(const void *obj)
434{
435 struct page *page;
436
437 page = virt_to_head_page(obj);
438 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
439 __func__))
440 return NULL;
441 return page->slab_cache;
442}
443
Roman Gushchin74d555b2020-08-06 23:21:44 -0700444static __always_inline void account_slab_page(struct page *page, int order,
445 struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700446{
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700447 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
448 PAGE_SIZE << order);
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700449}
450
Roman Gushchin74d555b2020-08-06 23:21:44 -0700451static __always_inline void unaccount_slab_page(struct page *page, int order,
452 struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700453{
Roman Gushchin10befea2020-08-06 23:21:27 -0700454 if (memcg_kmem_enabled())
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700455 memcg_free_page_obj_cgroups(page);
Roman Gushchin98556092020-08-06 23:21:10 -0700456
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700457 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
458 -(PAGE_SIZE << order));
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700459}
460
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700461static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
462{
463 struct kmem_cache *cachep;
464
465 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700466 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
467 return s;
468
469 cachep = virt_to_cache(x);
Roman Gushchin10befea2020-08-06 23:21:27 -0700470 if (WARN(cachep && cachep != s,
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700471 "%s: Wrong slab cache. %s but object is from %s\n",
472 __func__, s->name, cachep->name))
473 print_tracking(cachep, x);
474 return cachep;
475}
476
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700477static inline size_t slab_ksize(const struct kmem_cache *s)
478{
479#ifndef CONFIG_SLUB
480 return s->object_size;
481
482#else /* CONFIG_SLUB */
483# ifdef CONFIG_SLUB_DEBUG
484 /*
485 * Debugging requires use of the padding between object
486 * and whatever may come after it.
487 */
488 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
489 return s->object_size;
490# endif
Alexander Potapenko80a92012016-07-28 15:49:07 -0700491 if (s->flags & SLAB_KASAN)
492 return s->object_size;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700493 /*
494 * If we have the need to store the freelist pointer
495 * back there or track user information then we can
496 * only use the space before that information.
497 */
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800498 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700499 return s->inuse;
500 /*
501 * Else we can use all the padding etc for the allocation
502 */
503 return s->size;
504#endif
505}
506
507static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700508 struct obj_cgroup **objcgp,
509 size_t size, gfp_t flags)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700510{
511 flags &= gfp_allowed_mask;
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100512
513 fs_reclaim_acquire(flags);
514 fs_reclaim_release(flags);
515
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700516 might_sleep_if(gfpflags_allow_blocking(flags));
517
Jesper Dangaard Brouerfab99632016-03-15 14:53:38 -0700518 if (should_failslab(s, flags))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700519 return NULL;
520
Roman Gushchinbecaba62020-12-05 22:14:45 -0800521 if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
522 return NULL;
Vladimir Davydov45264772016-07-26 15:24:21 -0700523
524 return s;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700525}
526
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700527static inline void slab_post_alloc_hook(struct kmem_cache *s,
528 struct obj_cgroup *objcg,
529 gfp_t flags, size_t size, void **p)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700530{
531 size_t i;
532
533 flags &= gfp_allowed_mask;
534 for (i = 0; i < size; i++) {
Andrey Konovalov53128242019-02-20 22:19:11 -0800535 p[i] = kasan_slab_alloc(s, p[i], flags);
Andrey Konovalova2f77572019-02-20 22:19:16 -0800536 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
Andrey Konovalov53128242019-02-20 22:19:11 -0800537 kmemleak_alloc_recursive(p[i], s->object_size, 1,
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700538 s->flags, flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700539 }
Vladimir Davydov45264772016-07-26 15:24:21 -0700540
Roman Gushchinbecaba62020-12-05 22:14:45 -0800541 memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700542}
543
Christoph Lameter44c53562014-08-06 16:04:07 -0700544#ifndef CONFIG_SLOB
Christoph Lameterca349562013-01-10 19:14:19 +0000545/*
546 * The slab lists for all objects.
547 */
548struct kmem_cache_node {
549 spinlock_t list_lock;
550
551#ifdef CONFIG_SLAB
552 struct list_head slabs_partial; /* partial list first, better asm code */
553 struct list_head slabs_full;
554 struct list_head slabs_free;
David Rientjesbf00bd32016-12-12 16:41:44 -0800555 unsigned long total_slabs; /* length of all slab lists */
556 unsigned long free_slabs; /* length of free slab list only */
Christoph Lameterca349562013-01-10 19:14:19 +0000557 unsigned long free_objects;
558 unsigned int free_limit;
559 unsigned int colour_next; /* Per-node cache coloring */
560 struct array_cache *shared; /* shared per node */
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700561 struct alien_cache **alien; /* on other nodes */
Christoph Lameterca349562013-01-10 19:14:19 +0000562 unsigned long next_reap; /* updated without locking */
563 int free_touched; /* updated without locking */
564#endif
565
566#ifdef CONFIG_SLUB
567 unsigned long nr_partial;
568 struct list_head partial;
569#ifdef CONFIG_SLUB_DEBUG
570 atomic_long_t nr_slabs;
571 atomic_long_t total_objects;
572 struct list_head full;
573#endif
574#endif
575
576};
Wanpeng Lie25839f2013-07-04 08:33:23 +0800577
Christoph Lameter44c53562014-08-06 16:04:07 -0700578static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
579{
580 return s->node[node];
581}
582
583/*
584 * Iterator over all nodes. The body will be executed for each node that has
585 * a kmem_cache_node structure allocated (which is true for all online nodes)
586 */
587#define for_each_kmem_cache_node(__s, __node, __n) \
Mikulas Patocka91635822014-10-09 15:26:20 -0700588 for (__node = 0; __node < nr_node_ids; __node++) \
589 if ((__n = get_node(__s, __node)))
Christoph Lameter44c53562014-08-06 16:04:07 -0700590
591#endif
592
Vladimir Davydov1df3b262014-12-10 15:42:16 -0800593void *slab_start(struct seq_file *m, loff_t *pos);
Wanpeng Li276a2432013-07-08 08:08:28 +0800594void *slab_next(struct seq_file *m, void *p, loff_t *pos);
595void slab_stop(struct seq_file *m, void *p);
Vladimir Davydovb0475012014-12-10 15:44:19 -0800596int memcg_slab_show(struct seq_file *m, void *p);
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700597
Yang Shi852d8be2017-11-15 17:32:07 -0800598#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
599void dump_unreclaimable_slab(void);
600#else
601static inline void dump_unreclaimable_slab(void)
602{
603}
604#endif
605
Alexander Potapenko55834c52016-05-20 16:59:11 -0700606void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
607
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700608#ifdef CONFIG_SLAB_FREELIST_RANDOM
609int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
610 gfp_t gfp);
611void cache_random_seq_destroy(struct kmem_cache *cachep);
612#else
613static inline int cache_random_seq_create(struct kmem_cache *cachep,
614 unsigned int count, gfp_t gfp)
615{
616 return 0;
617}
618static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
619#endif /* CONFIG_SLAB_FREELIST_RANDOM */
620
Alexander Potapenko64713842019-07-11 20:59:19 -0700621static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
622{
623 if (static_branch_unlikely(&init_on_alloc)) {
624 if (c->ctor)
625 return false;
626 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
627 return flags & __GFP_ZERO;
628 return true;
629 }
630 return flags & __GFP_ZERO;
631}
632
633static inline bool slab_want_init_on_free(struct kmem_cache *c)
634{
635 if (static_branch_unlikely(&init_on_free))
636 return !(c->ctor ||
637 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
638 return false;
639}
640
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700641#endif /* MM_SLAB_H */