blob: 5bf615cb3f997ca757e107866043132955b6823d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameter97d06602012-07-06 15:25:11 -05002#ifndef MM_SLAB_H
3#define MM_SLAB_H
4/*
5 * Internal slab definitions
6 */
7
Joonsoo Kim07f361b2014-10-09 15:26:00 -07008#ifdef CONFIG_SLOB
9/*
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
14 *
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
19 */
20struct kmem_cache {
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080024 slab_flags_t flags; /* Active flags on the slab */
Alexey Dobriyan7bbdb812018-04-05 16:21:31 -070025 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
Joonsoo Kim07f361b2014-10-09 15:26:00 -070027 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
31};
32
33#endif /* CONFIG_SLOB */
34
35#ifdef CONFIG_SLAB
36#include <linux/slab_def.h>
37#endif
38
39#ifdef CONFIG_SLUB
40#include <linux/slub_def.h>
41#endif
42
43#include <linux/memcontrol.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070044#include <linux/fault-inject.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070045#include <linux/kasan.h>
46#include <linux/kmemleak.h>
Thomas Garnier7c00fce2016-07-26 15:21:56 -070047#include <linux/random.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010048#include <linux/sched/mm.h>
Joonsoo Kim07f361b2014-10-09 15:26:00 -070049
Christoph Lameter97d06602012-07-06 15:25:11 -050050/*
51 * State of the slab allocator.
52 *
53 * This is used to describe the states of the allocator during bootup.
54 * Allocators use this to gradually bootstrap themselves. Most allocators
55 * have the problem that the structures used for managing slab caches are
56 * allocated from slab caches themselves.
57 */
58enum slab_state {
59 DOWN, /* No slab functionality yet */
60 PARTIAL, /* SLUB: kmem_cache_node available */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +000061 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
Christoph Lameter97d06602012-07-06 15:25:11 -050062 UP, /* Slab caches usable but not all extras yet */
63 FULL /* Everything is working */
64};
65
66extern enum slab_state slab_state;
67
Christoph Lameter18004c52012-07-06 15:25:12 -050068/* The slab cache mutex protects the management structures during changes */
69extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +000070
71/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -050072extern struct list_head slab_caches;
73
Christoph Lameter9b030cb2012-09-05 00:20:33 +000074/* The slab cache that manages slab cache information */
75extern struct kmem_cache *kmem_cache;
76
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080077/* A table of kmalloc cache names and sizes */
78extern const struct kmalloc_info_struct {
79 const char *name;
Alexey Dobriyan55de8b92018-04-05 16:20:29 -070080 unsigned int size;
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080081} kmalloc_info[];
82
Christoph Lameterf97d5f62013-01-10 19:12:17 +000083#ifndef CONFIG_SLOB
84/* Kmalloc array related functions */
Daniel Sanders34cc6992015-06-24 16:55:57 -070085void setup_kmalloc_cache_index_table(void);
Alexey Dobriyand50112e2017-11-15 17:32:18 -080086void create_kmalloc_caches(slab_flags_t);
Christoph Lameter2c59dd62013-01-10 19:14:19 +000087
88/* Find the kmalloc slab corresponding for a certain size */
89struct kmem_cache *kmalloc_slab(size_t, gfp_t);
Christoph Lameterf97d5f62013-01-10 19:12:17 +000090#endif
91
92
Christoph Lameter9b030cb2012-09-05 00:20:33 +000093/* Functions provided by the slab allocators */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080094int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
Christoph Lameter97d06602012-07-06 15:25:11 -050095
Alexey Dobriyan55de8b92018-04-05 16:20:29 -070096struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
97 slab_flags_t flags, unsigned int useroffset,
98 unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +000099extern void create_boot_cache(struct kmem_cache *, const char *name,
Alexey Dobriyan361d5752018-04-05 16:20:33 -0700100 unsigned int size, slab_flags_t flags,
101 unsigned int useroffset, unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000102
Joonsoo Kim423c9292014-10-09 15:26:22 -0700103int slab_unmergeable(struct kmem_cache *s);
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700104struct kmem_cache *find_mergeable(unsigned size, unsigned align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800105 slab_flags_t flags, const char *name, void (*ctor)(void *));
Joonsoo Kim12220de2014-10-09 15:26:24 -0700106#ifndef CONFIG_SLOB
Glauber Costa2633d7a2012-12-18 14:22:34 -0800107struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700108__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800109 slab_flags_t flags, void (*ctor)(void *));
Joonsoo Kim423c9292014-10-09 15:26:22 -0700110
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700111slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800112 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700113 void (*ctor)(void *));
Christoph Lametercbb79692012-09-05 00:18:32 +0000114#else
Glauber Costa2633d7a2012-12-18 14:22:34 -0800115static inline struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700116__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800117 slab_flags_t flags, void (*ctor)(void *))
Christoph Lametercbb79692012-09-05 00:18:32 +0000118{ return NULL; }
Joonsoo Kim423c9292014-10-09 15:26:22 -0700119
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700120static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800121 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700122 void (*ctor)(void *))
123{
124 return flags;
125}
Christoph Lametercbb79692012-09-05 00:18:32 +0000126#endif
127
128
Glauber Costad8843922012-10-17 15:36:51 +0400129/* Legal flag mask for kmem_cache_create(), for various configurations */
Nicolas Boichat6d6ea1e2019-03-28 20:43:42 -0700130#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
131 SLAB_CACHE_DMA32 | SLAB_PANIC | \
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800132 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
Glauber Costad8843922012-10-17 15:36:51 +0400133
134#if defined(CONFIG_DEBUG_SLAB)
135#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
136#elif defined(CONFIG_SLUB_DEBUG)
137#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Laura Abbottbecfda62016-03-15 14:55:06 -0700138 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
Glauber Costad8843922012-10-17 15:36:51 +0400139#else
140#define SLAB_DEBUG_FLAGS (0)
141#endif
142
143#if defined(CONFIG_SLAB)
144#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
Vladimir Davydov230e9fc2016-01-14 15:18:15 -0800145 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800146 SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400147#elif defined(CONFIG_SLUB)
148#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800149 SLAB_TEMPORARY | SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400150#else
151#define SLAB_CACHE_FLAGS (0)
152#endif
153
Thomas Garniere70954f2016-12-12 16:41:38 -0800154/* Common flags available with current configuration */
Glauber Costad8843922012-10-17 15:36:51 +0400155#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
156
Thomas Garniere70954f2016-12-12 16:41:38 -0800157/* Common flags permitted for kmem_cache_create */
158#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
159 SLAB_RED_ZONE | \
160 SLAB_POISON | \
161 SLAB_STORE_USER | \
162 SLAB_TRACE | \
163 SLAB_CONSISTENCY_CHECKS | \
164 SLAB_MEM_SPREAD | \
165 SLAB_NOLEAKTRACE | \
166 SLAB_RECLAIM_ACCOUNT | \
167 SLAB_TEMPORARY | \
Thomas Garniere70954f2016-12-12 16:41:38 -0800168 SLAB_ACCOUNT)
169
Shakeel Buttf9e13c02018-04-05 16:21:57 -0700170bool __kmem_cache_empty(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000171int __kmem_cache_shutdown(struct kmem_cache *);
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800172void __kmem_cache_release(struct kmem_cache *);
Tejun Heoc9fc5862017-02-22 15:41:27 -0800173int __kmem_cache_shrink(struct kmem_cache *);
174void __kmemcg_cache_deactivate(struct kmem_cache *s);
Roman Gushchin43486692019-07-11 20:56:09 -0700175void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
Christoph Lameter41a21282014-05-06 12:50:08 -0700176void slab_kmem_cache_release(struct kmem_cache *);
Waiman Long04f768a2019-09-23 15:33:46 -0700177void kmem_cache_shrink_all(struct kmem_cache *s);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000178
Glauber Costab7454ad2012-10-19 18:20:25 +0400179struct seq_file;
180struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +0400181
Glauber Costa0d7561c2012-10-19 18:20:27 +0400182struct slabinfo {
183 unsigned long active_objs;
184 unsigned long num_objs;
185 unsigned long active_slabs;
186 unsigned long num_slabs;
187 unsigned long shared_avail;
188 unsigned int limit;
189 unsigned int batchcount;
190 unsigned int shared;
191 unsigned int objects_per_slab;
192 unsigned int cache_order;
193};
194
195void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
196void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400197ssize_t slabinfo_write(struct file *file, const char __user *buffer,
198 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800199
Christoph Lameter484748f2015-09-04 15:45:34 -0700200/*
201 * Generic implementation of bulk operations
202 * These are useful for situations in which the allocator cannot
Jesper Dangaard Brouer9f706d62016-03-15 14:54:03 -0700203 * perform optimizations. In that case segments of the object listed
Christoph Lameter484748f2015-09-04 15:45:34 -0700204 * may be allocated or freed using these operations.
205 */
206void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800207int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
Christoph Lameter484748f2015-09-04 15:45:34 -0700208
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700209static inline int cache_vmstat_idx(struct kmem_cache *s)
210{
211 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
212 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE;
213}
214
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700215#ifdef CONFIG_MEMCG_KMEM
Tejun Heo510ded32017-02-22 15:41:24 -0800216
217/* List of all root caches. */
218extern struct list_head slab_root_caches;
219#define root_caches_node memcg_params.__root_caches_node
220
Vladimir Davydov426589f2015-02-12 14:59:23 -0800221/*
222 * Iterate over all memcg caches of the given root cache. The caller must hold
223 * slab_mutex.
224 */
225#define for_each_memcg_cache(iter, root) \
Tejun Heo9eeadc82017-02-22 15:41:17 -0800226 list_for_each_entry(iter, &(root)->memcg_params.children, \
227 memcg_params.children_node)
Vladimir Davydov426589f2015-02-12 14:59:23 -0800228
Glauber Costaba6c4962012-12-18 14:22:27 -0800229static inline bool is_root_cache(struct kmem_cache *s)
230{
Tejun Heo9eeadc82017-02-22 15:41:17 -0800231 return !s->memcg_params.root_cache;
Glauber Costaba6c4962012-12-18 14:22:27 -0800232}
Glauber Costa2633d7a2012-12-18 14:22:34 -0800233
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800234static inline bool slab_equal_or_root(struct kmem_cache *s,
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800235 struct kmem_cache *p)
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800236{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800237 return p == s || p == s->memcg_params.root_cache;
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800238}
Glauber Costa749c5412012-12-18 14:23:01 -0800239
240/*
241 * We use suffixes to the name in memcg because we can't have caches
242 * created in the system with the same name. But when we print them
243 * locally, better refer to them with the base name
244 */
245static inline const char *cache_name(struct kmem_cache *s)
246{
247 if (!is_root_cache(s))
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800248 s = s->memcg_params.root_cache;
Glauber Costa749c5412012-12-18 14:23:01 -0800249 return s->name;
250}
251
Glauber Costa943a4512012-12-18 14:23:03 -0800252static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
253{
254 if (is_root_cache(s))
255 return s;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800256 return s->memcg_params.root_cache;
Glauber Costa943a4512012-12-18 14:23:03 -0800257}
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700258
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700259/*
260 * Expects a pointer to a slab page. Please note, that PageSlab() check
261 * isn't sufficient, as it returns true also for tail compound slab pages,
262 * which do not have slab_cache pointer set.
263 * So this function assumes that the page can pass PageHead() and PageSlab()
264 * checks.
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700265 *
266 * The kmem_cache can be reparented asynchronously. The caller must ensure
267 * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700268 */
269static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
270{
271 struct kmem_cache *s;
272
273 s = READ_ONCE(page->slab_cache);
274 if (s && !is_root_cache(s))
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700275 return READ_ONCE(s->memcg_params.memcg);
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700276
277 return NULL;
278}
279
280/*
281 * Charge the slab page belonging to the non-root kmem_cache.
282 * Can be called for non-root kmem_caches only.
283 */
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -0800284static __always_inline int memcg_charge_slab(struct page *page,
285 gfp_t gfp, int order,
286 struct kmem_cache *s)
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700287{
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700288 struct mem_cgroup *memcg;
289 struct lruvec *lruvec;
Roman Gushchinf0a3a242019-07-11 20:56:27 -0700290 int ret;
291
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700292 rcu_read_lock();
293 memcg = READ_ONCE(s->memcg_params.memcg);
294 while (memcg && !css_tryget_online(&memcg->css))
295 memcg = parent_mem_cgroup(memcg);
296 rcu_read_unlock();
297
298 if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
299 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
300 (1 << order));
301 percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
302 return 0;
303 }
304
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700305 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
Roman Gushchinf0a3a242019-07-11 20:56:27 -0700306 if (ret)
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700307 goto out;
Roman Gushchinf0a3a242019-07-11 20:56:27 -0700308
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700309 lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
310 mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);
311
312 /* transer try_charge() page references to kmem_cache */
Roman Gushchinf0a3a242019-07-11 20:56:27 -0700313 percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700314 css_put_many(&memcg->css, 1 << order);
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700315out:
316 css_put(&memcg->css);
317 return ret;
Vladimir Davydov27ee57c2016-03-17 14:17:35 -0700318}
319
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700320/*
321 * Uncharge a slab page belonging to a non-root kmem_cache.
322 * Can be called for non-root kmem_caches only.
323 */
Vladimir Davydov27ee57c2016-03-17 14:17:35 -0700324static __always_inline void memcg_uncharge_slab(struct page *page, int order,
325 struct kmem_cache *s)
326{
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700327 struct mem_cgroup *memcg;
328 struct lruvec *lruvec;
329
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700330 rcu_read_lock();
331 memcg = READ_ONCE(s->memcg_params.memcg);
332 if (likely(!mem_cgroup_is_root(memcg))) {
333 lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
334 mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
335 memcg_kmem_uncharge_memcg(page, order, memcg);
336 } else {
337 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
338 -(1 << order));
339 }
340 rcu_read_unlock();
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700341
342 percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700343}
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800344
345extern void slab_init_memcg_params(struct kmem_cache *);
Roman Gushchinc03914b2019-07-11 20:56:02 -0700346extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800347
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700348#else /* CONFIG_MEMCG_KMEM */
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800349
Tejun Heo510ded32017-02-22 15:41:24 -0800350/* If !memcg, all caches are root. */
351#define slab_root_caches slab_caches
352#define root_caches_node list
353
Vladimir Davydov426589f2015-02-12 14:59:23 -0800354#define for_each_memcg_cache(iter, root) \
355 for ((void)(iter), (void)(root); 0; )
Vladimir Davydov426589f2015-02-12 14:59:23 -0800356
Glauber Costaba6c4962012-12-18 14:22:27 -0800357static inline bool is_root_cache(struct kmem_cache *s)
358{
359 return true;
360}
361
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800362static inline bool slab_equal_or_root(struct kmem_cache *s,
363 struct kmem_cache *p)
364{
Kees Cook598a0712019-07-11 20:53:23 -0700365 return s == p;
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800366}
Glauber Costa749c5412012-12-18 14:23:01 -0800367
368static inline const char *cache_name(struct kmem_cache *s)
369{
370 return s->name;
371}
372
Glauber Costa943a4512012-12-18 14:23:03 -0800373static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
374{
375 return s;
376}
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700377
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700378static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
379{
380 return NULL;
381}
382
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -0800383static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
384 struct kmem_cache *s)
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700385{
386 return 0;
387}
388
Vladimir Davydov27ee57c2016-03-17 14:17:35 -0700389static inline void memcg_uncharge_slab(struct page *page, int order,
390 struct kmem_cache *s)
391{
392}
393
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800394static inline void slab_init_memcg_params(struct kmem_cache *s)
395{
396}
Tejun Heo510ded32017-02-22 15:41:24 -0800397
Roman Gushchinc03914b2019-07-11 20:56:02 -0700398static inline void memcg_link_cache(struct kmem_cache *s,
399 struct mem_cgroup *memcg)
Tejun Heo510ded32017-02-22 15:41:24 -0800400{
401}
402
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700403#endif /* CONFIG_MEMCG_KMEM */
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800404
Kees Cooka64b5372019-07-11 20:53:26 -0700405static inline struct kmem_cache *virt_to_cache(const void *obj)
406{
407 struct page *page;
408
409 page = virt_to_head_page(obj);
410 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
411 __func__))
412 return NULL;
413 return page->slab_cache;
414}
415
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700416static __always_inline int charge_slab_page(struct page *page,
417 gfp_t gfp, int order,
418 struct kmem_cache *s)
419{
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700420 if (is_root_cache(s)) {
421 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
422 1 << order);
423 return 0;
424 }
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700425
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700426 return memcg_charge_slab(page, gfp, order, s);
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700427}
428
429static __always_inline void uncharge_slab_page(struct page *page, int order,
430 struct kmem_cache *s)
431{
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700432 if (is_root_cache(s)) {
433 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
434 -(1 << order));
435 return;
436 }
437
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700438 memcg_uncharge_slab(page, order, s);
439}
440
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800441static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
442{
443 struct kmem_cache *cachep;
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800444
445 /*
446 * When kmemcg is not being used, both assignments should return the
447 * same value. but we don't want to pay the assignment price in that
448 * case. If it is not compiled in, the compiler should be smart enough
449 * to not do even the assignment. In that case, slab_equal_or_root
450 * will also be a constant.
451 */
Laura Abbottbecfda62016-03-15 14:55:06 -0700452 if (!memcg_kmem_enabled() &&
Kees Cook598a0712019-07-11 20:53:23 -0700453 !IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
Laura Abbottbecfda62016-03-15 14:55:06 -0700454 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800455 return s;
456
Kees Cooka64b5372019-07-11 20:53:26 -0700457 cachep = virt_to_cache(x);
458 WARN_ONCE(cachep && !slab_equal_or_root(cachep, s),
Kees Cook598a0712019-07-11 20:53:23 -0700459 "%s: Wrong slab cache. %s but object is from %s\n",
460 __func__, s->name, cachep->name);
461 return cachep;
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800462}
Christoph Lameterca349562013-01-10 19:14:19 +0000463
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700464static inline size_t slab_ksize(const struct kmem_cache *s)
465{
466#ifndef CONFIG_SLUB
467 return s->object_size;
468
469#else /* CONFIG_SLUB */
470# ifdef CONFIG_SLUB_DEBUG
471 /*
472 * Debugging requires use of the padding between object
473 * and whatever may come after it.
474 */
475 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
476 return s->object_size;
477# endif
Alexander Potapenko80a92012016-07-28 15:49:07 -0700478 if (s->flags & SLAB_KASAN)
479 return s->object_size;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700480 /*
481 * If we have the need to store the freelist pointer
482 * back there or track user information then we can
483 * only use the space before that information.
484 */
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800485 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700486 return s->inuse;
487 /*
488 * Else we can use all the padding etc for the allocation
489 */
490 return s->size;
491#endif
492}
493
494static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
495 gfp_t flags)
496{
497 flags &= gfp_allowed_mask;
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100498
499 fs_reclaim_acquire(flags);
500 fs_reclaim_release(flags);
501
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700502 might_sleep_if(gfpflags_allow_blocking(flags));
503
Jesper Dangaard Brouerfab99632016-03-15 14:53:38 -0700504 if (should_failslab(s, flags))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700505 return NULL;
506
Vladimir Davydov45264772016-07-26 15:24:21 -0700507 if (memcg_kmem_enabled() &&
508 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
509 return memcg_kmem_get_cache(s);
510
511 return s;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700512}
513
514static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
515 size_t size, void **p)
516{
517 size_t i;
518
519 flags &= gfp_allowed_mask;
520 for (i = 0; i < size; i++) {
Andrey Konovalov53128242019-02-20 22:19:11 -0800521 p[i] = kasan_slab_alloc(s, p[i], flags);
Andrey Konovalova2f77572019-02-20 22:19:16 -0800522 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
Andrey Konovalov53128242019-02-20 22:19:11 -0800523 kmemleak_alloc_recursive(p[i], s->object_size, 1,
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700524 s->flags, flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700525 }
Vladimir Davydov45264772016-07-26 15:24:21 -0700526
527 if (memcg_kmem_enabled())
528 memcg_kmem_put_cache(s);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700529}
530
Christoph Lameter44c53562014-08-06 16:04:07 -0700531#ifndef CONFIG_SLOB
Christoph Lameterca349562013-01-10 19:14:19 +0000532/*
533 * The slab lists for all objects.
534 */
535struct kmem_cache_node {
536 spinlock_t list_lock;
537
538#ifdef CONFIG_SLAB
539 struct list_head slabs_partial; /* partial list first, better asm code */
540 struct list_head slabs_full;
541 struct list_head slabs_free;
David Rientjesbf00bd32016-12-12 16:41:44 -0800542 unsigned long total_slabs; /* length of all slab lists */
543 unsigned long free_slabs; /* length of free slab list only */
Christoph Lameterca349562013-01-10 19:14:19 +0000544 unsigned long free_objects;
545 unsigned int free_limit;
546 unsigned int colour_next; /* Per-node cache coloring */
547 struct array_cache *shared; /* shared per node */
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700548 struct alien_cache **alien; /* on other nodes */
Christoph Lameterca349562013-01-10 19:14:19 +0000549 unsigned long next_reap; /* updated without locking */
550 int free_touched; /* updated without locking */
551#endif
552
553#ifdef CONFIG_SLUB
554 unsigned long nr_partial;
555 struct list_head partial;
556#ifdef CONFIG_SLUB_DEBUG
557 atomic_long_t nr_slabs;
558 atomic_long_t total_objects;
559 struct list_head full;
560#endif
561#endif
562
563};
Wanpeng Lie25839f2013-07-04 08:33:23 +0800564
Christoph Lameter44c53562014-08-06 16:04:07 -0700565static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
566{
567 return s->node[node];
568}
569
570/*
571 * Iterator over all nodes. The body will be executed for each node that has
572 * a kmem_cache_node structure allocated (which is true for all online nodes)
573 */
574#define for_each_kmem_cache_node(__s, __node, __n) \
Mikulas Patocka91635822014-10-09 15:26:20 -0700575 for (__node = 0; __node < nr_node_ids; __node++) \
576 if ((__n = get_node(__s, __node)))
Christoph Lameter44c53562014-08-06 16:04:07 -0700577
578#endif
579
Vladimir Davydov1df3b262014-12-10 15:42:16 -0800580void *slab_start(struct seq_file *m, loff_t *pos);
Wanpeng Li276a2432013-07-08 08:08:28 +0800581void *slab_next(struct seq_file *m, void *p, loff_t *pos);
582void slab_stop(struct seq_file *m, void *p);
Tejun Heobc2791f2017-02-22 15:41:21 -0800583void *memcg_slab_start(struct seq_file *m, loff_t *pos);
584void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
585void memcg_slab_stop(struct seq_file *m, void *p);
Vladimir Davydovb0475012014-12-10 15:44:19 -0800586int memcg_slab_show(struct seq_file *m, void *p);
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700587
Yang Shi852d8be2017-11-15 17:32:07 -0800588#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
589void dump_unreclaimable_slab(void);
590#else
591static inline void dump_unreclaimable_slab(void)
592{
593}
594#endif
595
Alexander Potapenko55834c52016-05-20 16:59:11 -0700596void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
597
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700598#ifdef CONFIG_SLAB_FREELIST_RANDOM
599int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
600 gfp_t gfp);
601void cache_random_seq_destroy(struct kmem_cache *cachep);
602#else
603static inline int cache_random_seq_create(struct kmem_cache *cachep,
604 unsigned int count, gfp_t gfp)
605{
606 return 0;
607}
608static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
609#endif /* CONFIG_SLAB_FREELIST_RANDOM */
610
Alexander Potapenko64713842019-07-11 20:59:19 -0700611static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
612{
613 if (static_branch_unlikely(&init_on_alloc)) {
614 if (c->ctor)
615 return false;
616 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
617 return flags & __GFP_ZERO;
618 return true;
619 }
620 return flags & __GFP_ZERO;
621}
622
623static inline bool slab_want_init_on_free(struct kmem_cache *c)
624{
625 if (static_branch_unlikely(&init_on_free))
626 return !(c->ctor ||
627 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
628 return false;
629}
630
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700631#endif /* MM_SLAB_H */