blob: 6d7c6a5056baac906432f3b65daf6f7297aedc99 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameter97d06602012-07-06 15:25:11 -05002#ifndef MM_SLAB_H
3#define MM_SLAB_H
4/*
5 * Internal slab definitions
6 */
7
Joonsoo Kim07f361b2014-10-09 15:26:00 -07008#ifdef CONFIG_SLOB
9/*
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
14 *
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
19 */
20struct kmem_cache {
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080024 slab_flags_t flags; /* Active flags on the slab */
Alexey Dobriyan7bbdb812018-04-05 16:21:31 -070025 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
Joonsoo Kim07f361b2014-10-09 15:26:00 -070027 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
31};
32
33#endif /* CONFIG_SLOB */
34
35#ifdef CONFIG_SLAB
36#include <linux/slab_def.h>
37#endif
38
39#ifdef CONFIG_SLUB
40#include <linux/slub_def.h>
41#endif
42
43#include <linux/memcontrol.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070044#include <linux/fault-inject.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070045#include <linux/kasan.h>
46#include <linux/kmemleak.h>
Thomas Garnier7c00fce2016-07-26 15:21:56 -070047#include <linux/random.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010048#include <linux/sched/mm.h>
Joonsoo Kim07f361b2014-10-09 15:26:00 -070049
Christoph Lameter97d06602012-07-06 15:25:11 -050050/*
51 * State of the slab allocator.
52 *
53 * This is used to describe the states of the allocator during bootup.
54 * Allocators use this to gradually bootstrap themselves. Most allocators
55 * have the problem that the structures used for managing slab caches are
56 * allocated from slab caches themselves.
57 */
58enum slab_state {
59 DOWN, /* No slab functionality yet */
60 PARTIAL, /* SLUB: kmem_cache_node available */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +000061 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
Christoph Lameter97d06602012-07-06 15:25:11 -050062 UP, /* Slab caches usable but not all extras yet */
63 FULL /* Everything is working */
64};
65
66extern enum slab_state slab_state;
67
Christoph Lameter18004c52012-07-06 15:25:12 -050068/* The slab cache mutex protects the management structures during changes */
69extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +000070
71/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -050072extern struct list_head slab_caches;
73
Christoph Lameter9b030cb2012-09-05 00:20:33 +000074/* The slab cache that manages slab cache information */
75extern struct kmem_cache *kmem_cache;
76
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080077/* A table of kmalloc cache names and sizes */
78extern const struct kmalloc_info_struct {
Pengfei Licb5d9fb2019-11-30 17:49:21 -080079 const char *name[NR_KMALLOC_TYPES];
Alexey Dobriyan55de8b92018-04-05 16:20:29 -070080 unsigned int size;
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080081} kmalloc_info[];
82
Christoph Lameterf97d5f62013-01-10 19:12:17 +000083#ifndef CONFIG_SLOB
84/* Kmalloc array related functions */
Daniel Sanders34cc6992015-06-24 16:55:57 -070085void setup_kmalloc_cache_index_table(void);
Alexey Dobriyand50112e2017-11-15 17:32:18 -080086void create_kmalloc_caches(slab_flags_t);
Christoph Lameter2c59dd62013-01-10 19:14:19 +000087
88/* Find the kmalloc slab corresponding for a certain size */
89struct kmem_cache *kmalloc_slab(size_t, gfp_t);
Christoph Lameterf97d5f62013-01-10 19:12:17 +000090#endif
91
Long Li44405092020-08-06 23:18:28 -070092gfp_t kmalloc_fix_flags(gfp_t flags);
Christoph Lameterf97d5f62013-01-10 19:12:17 +000093
Christoph Lameter9b030cb2012-09-05 00:20:33 +000094/* Functions provided by the slab allocators */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080095int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
Christoph Lameter97d06602012-07-06 15:25:11 -050096
Alexey Dobriyan55de8b92018-04-05 16:20:29 -070097struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
98 slab_flags_t flags, unsigned int useroffset,
99 unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000100extern void create_boot_cache(struct kmem_cache *, const char *name,
Alexey Dobriyan361d5752018-04-05 16:20:33 -0700101 unsigned int size, slab_flags_t flags,
102 unsigned int useroffset, unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000103
Joonsoo Kim423c9292014-10-09 15:26:22 -0700104int slab_unmergeable(struct kmem_cache *s);
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700105struct kmem_cache *find_mergeable(unsigned size, unsigned align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800106 slab_flags_t flags, const char *name, void (*ctor)(void *));
Joonsoo Kim12220de2014-10-09 15:26:24 -0700107#ifndef CONFIG_SLOB
Glauber Costa2633d7a2012-12-18 14:22:34 -0800108struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700109__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800110 slab_flags_t flags, void (*ctor)(void *));
Joonsoo Kim423c9292014-10-09 15:26:22 -0700111
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700112slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800113 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700114 void (*ctor)(void *));
Christoph Lametercbb79692012-09-05 00:18:32 +0000115#else
Glauber Costa2633d7a2012-12-18 14:22:34 -0800116static inline struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700117__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800118 slab_flags_t flags, void (*ctor)(void *))
Christoph Lametercbb79692012-09-05 00:18:32 +0000119{ return NULL; }
Joonsoo Kim423c9292014-10-09 15:26:22 -0700120
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700121static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800122 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700123 void (*ctor)(void *))
124{
125 return flags;
126}
Christoph Lametercbb79692012-09-05 00:18:32 +0000127#endif
128
129
Glauber Costad8843922012-10-17 15:36:51 +0400130/* Legal flag mask for kmem_cache_create(), for various configurations */
Nicolas Boichat6d6ea1e2019-03-28 20:43:42 -0700131#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
132 SLAB_CACHE_DMA32 | SLAB_PANIC | \
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800133 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
Glauber Costad8843922012-10-17 15:36:51 +0400134
135#if defined(CONFIG_DEBUG_SLAB)
136#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
137#elif defined(CONFIG_SLUB_DEBUG)
138#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Laura Abbottbecfda62016-03-15 14:55:06 -0700139 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
Glauber Costad8843922012-10-17 15:36:51 +0400140#else
141#define SLAB_DEBUG_FLAGS (0)
142#endif
143
144#if defined(CONFIG_SLAB)
145#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
Vladimir Davydov230e9fc2016-01-14 15:18:15 -0800146 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800147 SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400148#elif defined(CONFIG_SLUB)
149#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800150 SLAB_TEMPORARY | SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400151#else
152#define SLAB_CACHE_FLAGS (0)
153#endif
154
Thomas Garniere70954f2016-12-12 16:41:38 -0800155/* Common flags available with current configuration */
Glauber Costad8843922012-10-17 15:36:51 +0400156#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
157
Thomas Garniere70954f2016-12-12 16:41:38 -0800158/* Common flags permitted for kmem_cache_create */
159#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
160 SLAB_RED_ZONE | \
161 SLAB_POISON | \
162 SLAB_STORE_USER | \
163 SLAB_TRACE | \
164 SLAB_CONSISTENCY_CHECKS | \
165 SLAB_MEM_SPREAD | \
166 SLAB_NOLEAKTRACE | \
167 SLAB_RECLAIM_ACCOUNT | \
168 SLAB_TEMPORARY | \
Thomas Garniere70954f2016-12-12 16:41:38 -0800169 SLAB_ACCOUNT)
170
Shakeel Buttf9e13c02018-04-05 16:21:57 -0700171bool __kmem_cache_empty(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000172int __kmem_cache_shutdown(struct kmem_cache *);
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800173void __kmem_cache_release(struct kmem_cache *);
Tejun Heoc9fc5862017-02-22 15:41:27 -0800174int __kmem_cache_shrink(struct kmem_cache *);
Christoph Lameter41a21282014-05-06 12:50:08 -0700175void slab_kmem_cache_release(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000176
Glauber Costab7454ad2012-10-19 18:20:25 +0400177struct seq_file;
178struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +0400179
Glauber Costa0d7561c2012-10-19 18:20:27 +0400180struct slabinfo {
181 unsigned long active_objs;
182 unsigned long num_objs;
183 unsigned long active_slabs;
184 unsigned long num_slabs;
185 unsigned long shared_avail;
186 unsigned int limit;
187 unsigned int batchcount;
188 unsigned int shared;
189 unsigned int objects_per_slab;
190 unsigned int cache_order;
191};
192
193void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
194void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400195ssize_t slabinfo_write(struct file *file, const char __user *buffer,
196 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800197
Christoph Lameter484748f2015-09-04 15:45:34 -0700198/*
199 * Generic implementation of bulk operations
200 * These are useful for situations in which the allocator cannot
Jesper Dangaard Brouer9f706d62016-03-15 14:54:03 -0700201 * perform optimizations. In that case segments of the object listed
Christoph Lameter484748f2015-09-04 15:45:34 -0700202 * may be allocated or freed using these operations.
203 */
204void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800205int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
Christoph Lameter484748f2015-09-04 15:45:34 -0700206
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700207static inline int cache_vmstat_idx(struct kmem_cache *s)
208{
209 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
Roman Gushchind42f3242020-08-06 23:20:39 -0700210 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700211}
212
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700213#ifdef CONFIG_SLUB_DEBUG
214#ifdef CONFIG_SLUB_DEBUG_ON
215DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
216#else
217DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
218#endif
219extern void print_tracking(struct kmem_cache *s, void *object);
220#else
221static inline void print_tracking(struct kmem_cache *s, void *object)
222{
223}
224#endif
225
226/*
227 * Returns true if any of the specified slub_debug flags is enabled for the
228 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
229 * the static key.
230 */
231static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
232{
233#ifdef CONFIG_SLUB_DEBUG
234 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
235 if (static_branch_unlikely(&slub_debug_enabled))
236 return s->flags & flags;
237#endif
238 return false;
239}
240
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700241#ifdef CONFIG_MEMCG_KMEM
Roman Gushchin286e04b2020-08-06 23:20:52 -0700242static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
243{
244 /*
245 * page->mem_cgroup and page->obj_cgroups are sharing the same
246 * space. To distinguish between them in case we don't know for sure
247 * that the page is a slab page (e.g. page_cgroup_ino()), let's
248 * always set the lowest bit of obj_cgroups.
249 */
250 return (struct obj_cgroup **)
251 ((unsigned long)page->obj_cgroups & ~0x1UL);
252}
253
Roman Gushchin98556092020-08-06 23:21:10 -0700254static inline bool page_has_obj_cgroups(struct page *page)
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700255{
Roman Gushchin98556092020-08-06 23:21:10 -0700256 return ((unsigned long)page->obj_cgroups & 0x1UL);
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700257}
258
Roman Gushchin10befea2020-08-06 23:21:27 -0700259int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
260 gfp_t gfp);
Roman Gushchin286e04b2020-08-06 23:20:52 -0700261
262static inline void memcg_free_page_obj_cgroups(struct page *page)
263{
264 kfree(page_obj_cgroups(page));
265 page->obj_cgroups = NULL;
266}
267
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700268static inline size_t obj_full_size(struct kmem_cache *s)
269{
270 /*
271 * For each accounted object there is an extra space which is used
272 * to store obj_cgroup membership. Charge it too.
273 */
274 return s->size + sizeof(struct obj_cgroup *);
275}
276
Roman Gushchin10befea2020-08-06 23:21:27 -0700277static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
278 size_t objects,
279 gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700280{
Roman Gushchin98556092020-08-06 23:21:10 -0700281 struct obj_cgroup *objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700282
Roman Gushchin98556092020-08-06 23:21:10 -0700283 objcg = get_obj_cgroup_from_current();
284 if (!objcg)
Roman Gushchin10befea2020-08-06 23:21:27 -0700285 return NULL;
Roman Gushchin98556092020-08-06 23:21:10 -0700286
287 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
288 obj_cgroup_put(objcg);
Roman Gushchin10befea2020-08-06 23:21:27 -0700289 return NULL;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700290 }
291
Roman Gushchin10befea2020-08-06 23:21:27 -0700292 return objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700293}
294
295static inline void mod_objcg_state(struct obj_cgroup *objcg,
296 struct pglist_data *pgdat,
297 int idx, int nr)
298{
299 struct mem_cgroup *memcg;
300 struct lruvec *lruvec;
301
302 rcu_read_lock();
303 memcg = obj_cgroup_memcg(objcg);
304 lruvec = mem_cgroup_lruvec(memcg, pgdat);
305 mod_memcg_lruvec_state(lruvec, idx, nr);
306 rcu_read_unlock();
307}
308
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700309static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
310 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700311 gfp_t flags, size_t size,
312 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700313{
314 struct page *page;
315 unsigned long off;
316 size_t i;
317
Roman Gushchin10befea2020-08-06 23:21:27 -0700318 if (!objcg)
319 return;
320
321 flags &= ~__GFP_ACCOUNT;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700322 for (i = 0; i < size; i++) {
323 if (likely(p[i])) {
324 page = virt_to_head_page(p[i]);
Roman Gushchin10befea2020-08-06 23:21:27 -0700325
326 if (!page_has_obj_cgroups(page) &&
327 memcg_alloc_page_obj_cgroups(page, s, flags)) {
328 obj_cgroup_uncharge(objcg, obj_full_size(s));
329 continue;
330 }
331
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700332 off = obj_to_index(s, page, p[i]);
333 obj_cgroup_get(objcg);
334 page_obj_cgroups(page)[off] = objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700335 mod_objcg_state(objcg, page_pgdat(page),
336 cache_vmstat_idx(s), obj_full_size(s));
337 } else {
338 obj_cgroup_uncharge(objcg, obj_full_size(s));
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700339 }
340 }
341 obj_cgroup_put(objcg);
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700342}
343
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700344static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
345 void **p, int objects)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700346{
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700347 struct kmem_cache *s;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700348 struct obj_cgroup *objcg;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700349 struct page *page;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700350 unsigned int off;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700351 int i;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700352
Roman Gushchin10befea2020-08-06 23:21:27 -0700353 if (!memcg_kmem_enabled())
354 return;
355
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700356 for (i = 0; i < objects; i++) {
357 if (unlikely(!p[i]))
358 continue;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700359
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700360 page = virt_to_head_page(p[i]);
361 if (!page_has_obj_cgroups(page))
362 continue;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700363
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700364 if (!s_orig)
365 s = page->slab_cache;
366 else
367 s = s_orig;
Roman Gushchin10befea2020-08-06 23:21:27 -0700368
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700369 off = obj_to_index(s, page, p[i]);
370 objcg = page_obj_cgroups(page)[off];
371 if (!objcg)
372 continue;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700373
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700374 page_obj_cgroups(page)[off] = NULL;
375 obj_cgroup_uncharge(objcg, obj_full_size(s));
376 mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
377 -obj_full_size(s));
378 obj_cgroup_put(objcg);
379 }
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700380}
381
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700382#else /* CONFIG_MEMCG_KMEM */
Roman Gushchin98556092020-08-06 23:21:10 -0700383static inline bool page_has_obj_cgroups(struct page *page)
384{
385 return false;
386}
387
388static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700389{
390 return NULL;
391}
392
Roman Gushchin286e04b2020-08-06 23:20:52 -0700393static inline int memcg_alloc_page_obj_cgroups(struct page *page,
394 struct kmem_cache *s, gfp_t gfp)
395{
396 return 0;
397}
398
399static inline void memcg_free_page_obj_cgroups(struct page *page)
400{
401}
402
Roman Gushchin10befea2020-08-06 23:21:27 -0700403static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
404 size_t objects,
405 gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700406{
407 return NULL;
408}
409
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700410static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
411 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700412 gfp_t flags, size_t size,
413 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700414{
415}
416
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700417static inline void memcg_slab_free_hook(struct kmem_cache *s,
418 void **p, int objects)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700419{
420}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700421#endif /* CONFIG_MEMCG_KMEM */
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800422
Kees Cooka64b5372019-07-11 20:53:26 -0700423static inline struct kmem_cache *virt_to_cache(const void *obj)
424{
425 struct page *page;
426
427 page = virt_to_head_page(obj);
428 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
429 __func__))
430 return NULL;
431 return page->slab_cache;
432}
433
Roman Gushchin74d555b2020-08-06 23:21:44 -0700434static __always_inline void account_slab_page(struct page *page, int order,
435 struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700436{
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700437 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
438 PAGE_SIZE << order);
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700439}
440
Roman Gushchin74d555b2020-08-06 23:21:44 -0700441static __always_inline void unaccount_slab_page(struct page *page, int order,
442 struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700443{
Roman Gushchin10befea2020-08-06 23:21:27 -0700444 if (memcg_kmem_enabled())
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700445 memcg_free_page_obj_cgroups(page);
Roman Gushchin98556092020-08-06 23:21:10 -0700446
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700447 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
448 -(PAGE_SIZE << order));
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700449}
450
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700451static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
452{
453 struct kmem_cache *cachep;
454
455 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700456 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
457 return s;
458
459 cachep = virt_to_cache(x);
Roman Gushchin10befea2020-08-06 23:21:27 -0700460 if (WARN(cachep && cachep != s,
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700461 "%s: Wrong slab cache. %s but object is from %s\n",
462 __func__, s->name, cachep->name))
463 print_tracking(cachep, x);
464 return cachep;
465}
466
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700467static inline size_t slab_ksize(const struct kmem_cache *s)
468{
469#ifndef CONFIG_SLUB
470 return s->object_size;
471
472#else /* CONFIG_SLUB */
473# ifdef CONFIG_SLUB_DEBUG
474 /*
475 * Debugging requires use of the padding between object
476 * and whatever may come after it.
477 */
478 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
479 return s->object_size;
480# endif
Alexander Potapenko80a92012016-07-28 15:49:07 -0700481 if (s->flags & SLAB_KASAN)
482 return s->object_size;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700483 /*
484 * If we have the need to store the freelist pointer
485 * back there or track user information then we can
486 * only use the space before that information.
487 */
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800488 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700489 return s->inuse;
490 /*
491 * Else we can use all the padding etc for the allocation
492 */
493 return s->size;
494#endif
495}
496
497static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700498 struct obj_cgroup **objcgp,
499 size_t size, gfp_t flags)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700500{
501 flags &= gfp_allowed_mask;
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100502
503 fs_reclaim_acquire(flags);
504 fs_reclaim_release(flags);
505
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700506 might_sleep_if(gfpflags_allow_blocking(flags));
507
Jesper Dangaard Brouerfab99632016-03-15 14:53:38 -0700508 if (should_failslab(s, flags))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700509 return NULL;
510
Vladimir Davydov45264772016-07-26 15:24:21 -0700511 if (memcg_kmem_enabled() &&
512 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
Roman Gushchin10befea2020-08-06 23:21:27 -0700513 *objcgp = memcg_slab_pre_alloc_hook(s, size, flags);
Vladimir Davydov45264772016-07-26 15:24:21 -0700514
515 return s;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700516}
517
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700518static inline void slab_post_alloc_hook(struct kmem_cache *s,
519 struct obj_cgroup *objcg,
520 gfp_t flags, size_t size, void **p)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700521{
522 size_t i;
523
524 flags &= gfp_allowed_mask;
525 for (i = 0; i < size; i++) {
Andrey Konovalov53128242019-02-20 22:19:11 -0800526 p[i] = kasan_slab_alloc(s, p[i], flags);
Andrey Konovalova2f77572019-02-20 22:19:16 -0800527 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
Andrey Konovalov53128242019-02-20 22:19:11 -0800528 kmemleak_alloc_recursive(p[i], s->object_size, 1,
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700529 s->flags, flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700530 }
Vladimir Davydov45264772016-07-26 15:24:21 -0700531
Roman Gushchin10befea2020-08-06 23:21:27 -0700532 if (memcg_kmem_enabled())
533 memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700534}
535
Christoph Lameter44c53562014-08-06 16:04:07 -0700536#ifndef CONFIG_SLOB
Christoph Lameterca349562013-01-10 19:14:19 +0000537/*
538 * The slab lists for all objects.
539 */
540struct kmem_cache_node {
541 spinlock_t list_lock;
542
543#ifdef CONFIG_SLAB
544 struct list_head slabs_partial; /* partial list first, better asm code */
545 struct list_head slabs_full;
546 struct list_head slabs_free;
David Rientjesbf00bd32016-12-12 16:41:44 -0800547 unsigned long total_slabs; /* length of all slab lists */
548 unsigned long free_slabs; /* length of free slab list only */
Christoph Lameterca349562013-01-10 19:14:19 +0000549 unsigned long free_objects;
550 unsigned int free_limit;
551 unsigned int colour_next; /* Per-node cache coloring */
552 struct array_cache *shared; /* shared per node */
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700553 struct alien_cache **alien; /* on other nodes */
Christoph Lameterca349562013-01-10 19:14:19 +0000554 unsigned long next_reap; /* updated without locking */
555 int free_touched; /* updated without locking */
556#endif
557
558#ifdef CONFIG_SLUB
559 unsigned long nr_partial;
560 struct list_head partial;
561#ifdef CONFIG_SLUB_DEBUG
562 atomic_long_t nr_slabs;
563 atomic_long_t total_objects;
564 struct list_head full;
565#endif
566#endif
567
568};
Wanpeng Lie25839f2013-07-04 08:33:23 +0800569
Christoph Lameter44c53562014-08-06 16:04:07 -0700570static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
571{
572 return s->node[node];
573}
574
575/*
576 * Iterator over all nodes. The body will be executed for each node that has
577 * a kmem_cache_node structure allocated (which is true for all online nodes)
578 */
579#define for_each_kmem_cache_node(__s, __node, __n) \
Mikulas Patocka91635822014-10-09 15:26:20 -0700580 for (__node = 0; __node < nr_node_ids; __node++) \
581 if ((__n = get_node(__s, __node)))
Christoph Lameter44c53562014-08-06 16:04:07 -0700582
583#endif
584
Vladimir Davydov1df3b262014-12-10 15:42:16 -0800585void *slab_start(struct seq_file *m, loff_t *pos);
Wanpeng Li276a2432013-07-08 08:08:28 +0800586void *slab_next(struct seq_file *m, void *p, loff_t *pos);
587void slab_stop(struct seq_file *m, void *p);
Vladimir Davydovb0475012014-12-10 15:44:19 -0800588int memcg_slab_show(struct seq_file *m, void *p);
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700589
Yang Shi852d8be2017-11-15 17:32:07 -0800590#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
591void dump_unreclaimable_slab(void);
592#else
593static inline void dump_unreclaimable_slab(void)
594{
595}
596#endif
597
Alexander Potapenko55834c52016-05-20 16:59:11 -0700598void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
599
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700600#ifdef CONFIG_SLAB_FREELIST_RANDOM
601int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
602 gfp_t gfp);
603void cache_random_seq_destroy(struct kmem_cache *cachep);
604#else
605static inline int cache_random_seq_create(struct kmem_cache *cachep,
606 unsigned int count, gfp_t gfp)
607{
608 return 0;
609}
610static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
611#endif /* CONFIG_SLAB_FREELIST_RANDOM */
612
Alexander Potapenko64713842019-07-11 20:59:19 -0700613static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
614{
615 if (static_branch_unlikely(&init_on_alloc)) {
616 if (c->ctor)
617 return false;
618 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
619 return flags & __GFP_ZERO;
620 return true;
621 }
622 return flags & __GFP_ZERO;
623}
624
625static inline bool slab_want_init_on_free(struct kmem_cache *c)
626{
627 if (static_branch_unlikely(&init_on_free))
628 return !(c->ctor ||
629 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
630 return false;
631}
632
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700633#endif /* MM_SLAB_H */