blob: 9631bb27c7725ea172e21af5a0419111b8bde410 [file] [log] [blame]
Christoph Lameter97d06602012-07-06 15:25:11 -05001#ifndef MM_SLAB_H
2#define MM_SLAB_H
3/*
4 * Internal slab definitions
5 */
6
Joonsoo Kim07f361b2014-10-09 15:26:00 -07007#ifdef CONFIG_SLOB
8/*
9 * Common fields provided in kmem_cache by all slab allocators
10 * This struct is either used directly by the allocator (SLOB)
11 * or the allocator must include definitions for all fields
12 * provided in kmem_cache_common in their definition of kmem_cache.
13 *
14 * Once we can do anonymous structs (C11 standard) we could put a
15 * anonymous struct definition in these allocators so that the
16 * separate allocations in the kmem_cache structure of SLAB and
17 * SLUB is no longer needed.
18 */
19struct kmem_cache {
20 unsigned int object_size;/* The original size of the object */
21 unsigned int size; /* The aligned/padded/added on size */
22 unsigned int align; /* Alignment as calculated */
23 unsigned long flags; /* Active flags on the slab */
24 const char *name; /* Slab name for sysfs */
25 int refcount; /* Use counter */
26 void (*ctor)(void *); /* Called on object slot creation */
27 struct list_head list; /* List of all slab caches on the system */
28};
29
30#endif /* CONFIG_SLOB */
31
32#ifdef CONFIG_SLAB
33#include <linux/slab_def.h>
34#endif
35
36#ifdef CONFIG_SLUB
37#include <linux/slub_def.h>
38#endif
39
40#include <linux/memcontrol.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070041#include <linux/fault-inject.h>
42#include <linux/kmemcheck.h>
43#include <linux/kasan.h>
44#include <linux/kmemleak.h>
Thomas Garnier7c00fce2016-07-26 15:21:56 -070045#include <linux/random.h>
Joonsoo Kim07f361b2014-10-09 15:26:00 -070046
Christoph Lameter97d06602012-07-06 15:25:11 -050047/*
48 * State of the slab allocator.
49 *
50 * This is used to describe the states of the allocator during bootup.
51 * Allocators use this to gradually bootstrap themselves. Most allocators
52 * have the problem that the structures used for managing slab caches are
53 * allocated from slab caches themselves.
54 */
55enum slab_state {
56 DOWN, /* No slab functionality yet */
57 PARTIAL, /* SLUB: kmem_cache_node available */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +000058 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
Christoph Lameter97d06602012-07-06 15:25:11 -050059 UP, /* Slab caches usable but not all extras yet */
60 FULL /* Everything is working */
61};
62
63extern enum slab_state slab_state;
64
Christoph Lameter18004c52012-07-06 15:25:12 -050065/* The slab cache mutex protects the management structures during changes */
66extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +000067
68/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -050069extern struct list_head slab_caches;
70
Christoph Lameter9b030cb2012-09-05 00:20:33 +000071/* The slab cache that manages slab cache information */
72extern struct kmem_cache *kmem_cache;
73
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080074/* A table of kmalloc cache names and sizes */
75extern const struct kmalloc_info_struct {
76 const char *name;
77 unsigned long size;
78} kmalloc_info[];
79
Christoph Lameter45906852012-11-28 16:23:16 +000080unsigned long calculate_alignment(unsigned long flags,
81 unsigned long align, unsigned long size);
82
Christoph Lameterf97d5f62013-01-10 19:12:17 +000083#ifndef CONFIG_SLOB
84/* Kmalloc array related functions */
Daniel Sanders34cc6992015-06-24 16:55:57 -070085void setup_kmalloc_cache_index_table(void);
Christoph Lameterf97d5f62013-01-10 19:12:17 +000086void create_kmalloc_caches(unsigned long);
Christoph Lameter2c59dd62013-01-10 19:14:19 +000087
88/* Find the kmalloc slab corresponding for a certain size */
89struct kmem_cache *kmalloc_slab(size_t, gfp_t);
Christoph Lameterf97d5f62013-01-10 19:12:17 +000090#endif
91
92
Christoph Lameter9b030cb2012-09-05 00:20:33 +000093/* Functions provided by the slab allocators */
Christoph Lameter8a13a4c2012-09-04 23:18:33 +000094extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
Christoph Lameter97d06602012-07-06 15:25:11 -050095
Christoph Lameter45530c42012-11-28 16:23:07 +000096extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
97 unsigned long flags);
98extern void create_boot_cache(struct kmem_cache *, const char *name,
99 size_t size, unsigned long flags);
100
Joonsoo Kim423c9292014-10-09 15:26:22 -0700101int slab_unmergeable(struct kmem_cache *s);
102struct kmem_cache *find_mergeable(size_t size, size_t align,
103 unsigned long flags, const char *name, void (*ctor)(void *));
Joonsoo Kim12220de2014-10-09 15:26:24 -0700104#ifndef CONFIG_SLOB
Glauber Costa2633d7a2012-12-18 14:22:34 -0800105struct kmem_cache *
Vladimir Davydova44cb9442014-04-07 15:39:23 -0700106__kmem_cache_alias(const char *name, size_t size, size_t align,
107 unsigned long flags, void (*ctor)(void *));
Joonsoo Kim423c9292014-10-09 15:26:22 -0700108
109unsigned long kmem_cache_flags(unsigned long object_size,
110 unsigned long flags, const char *name,
111 void (*ctor)(void *));
Christoph Lametercbb79692012-09-05 00:18:32 +0000112#else
Glauber Costa2633d7a2012-12-18 14:22:34 -0800113static inline struct kmem_cache *
Vladimir Davydova44cb9442014-04-07 15:39:23 -0700114__kmem_cache_alias(const char *name, size_t size, size_t align,
115 unsigned long flags, void (*ctor)(void *))
Christoph Lametercbb79692012-09-05 00:18:32 +0000116{ return NULL; }
Joonsoo Kim423c9292014-10-09 15:26:22 -0700117
118static inline unsigned long kmem_cache_flags(unsigned long object_size,
119 unsigned long flags, const char *name,
120 void (*ctor)(void *))
121{
122 return flags;
123}
Christoph Lametercbb79692012-09-05 00:18:32 +0000124#endif
125
126
Glauber Costad8843922012-10-17 15:36:51 +0400127/* Legal flag mask for kmem_cache_create(), for various configurations */
128#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
129 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
130
131#if defined(CONFIG_DEBUG_SLAB)
132#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
133#elif defined(CONFIG_SLUB_DEBUG)
134#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Laura Abbottbecfda62016-03-15 14:55:06 -0700135 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
Glauber Costad8843922012-10-17 15:36:51 +0400136#else
137#define SLAB_DEBUG_FLAGS (0)
138#endif
139
140#if defined(CONFIG_SLAB)
141#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
Vladimir Davydov230e9fc2016-01-14 15:18:15 -0800142 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
143 SLAB_NOTRACK | SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400144#elif defined(CONFIG_SLUB)
145#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
Vladimir Davydov230e9fc2016-01-14 15:18:15 -0800146 SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400147#else
148#define SLAB_CACHE_FLAGS (0)
149#endif
150
Thomas Garniere70954f2016-12-12 16:41:38 -0800151/* Common flags available with current configuration */
Glauber Costad8843922012-10-17 15:36:51 +0400152#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
153
Thomas Garniere70954f2016-12-12 16:41:38 -0800154/* Common flags permitted for kmem_cache_create */
155#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
156 SLAB_RED_ZONE | \
157 SLAB_POISON | \
158 SLAB_STORE_USER | \
159 SLAB_TRACE | \
160 SLAB_CONSISTENCY_CHECKS | \
161 SLAB_MEM_SPREAD | \
162 SLAB_NOLEAKTRACE | \
163 SLAB_RECLAIM_ACCOUNT | \
164 SLAB_TEMPORARY | \
165 SLAB_NOTRACK | \
166 SLAB_ACCOUNT)
167
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000168int __kmem_cache_shutdown(struct kmem_cache *);
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800169void __kmem_cache_release(struct kmem_cache *);
Tejun Heo290b6a52017-02-22 15:41:08 -0800170int __kmem_cache_shrink(struct kmem_cache *, bool);
Christoph Lameter41a21282014-05-06 12:50:08 -0700171void slab_kmem_cache_release(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000172
Glauber Costab7454ad2012-10-19 18:20:25 +0400173struct seq_file;
174struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +0400175
Glauber Costa0d7561c2012-10-19 18:20:27 +0400176struct slabinfo {
177 unsigned long active_objs;
178 unsigned long num_objs;
179 unsigned long active_slabs;
180 unsigned long num_slabs;
181 unsigned long shared_avail;
182 unsigned int limit;
183 unsigned int batchcount;
184 unsigned int shared;
185 unsigned int objects_per_slab;
186 unsigned int cache_order;
187};
188
189void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
190void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400191ssize_t slabinfo_write(struct file *file, const char __user *buffer,
192 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800193
Christoph Lameter484748f2015-09-04 15:45:34 -0700194/*
195 * Generic implementation of bulk operations
196 * These are useful for situations in which the allocator cannot
Jesper Dangaard Brouer9f706d62016-03-15 14:54:03 -0700197 * perform optimizations. In that case segments of the object listed
Christoph Lameter484748f2015-09-04 15:45:34 -0700198 * may be allocated or freed using these operations.
199 */
200void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800201int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
Christoph Lameter484748f2015-09-04 15:45:34 -0700202
Johannes Weiner127424c2016-01-20 15:02:32 -0800203#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
Tejun Heo510ded32017-02-22 15:41:24 -0800204
205/* List of all root caches. */
206extern struct list_head slab_root_caches;
207#define root_caches_node memcg_params.__root_caches_node
208
Vladimir Davydov426589f2015-02-12 14:59:23 -0800209/*
210 * Iterate over all memcg caches of the given root cache. The caller must hold
211 * slab_mutex.
212 */
213#define for_each_memcg_cache(iter, root) \
Tejun Heo9eeadc82017-02-22 15:41:17 -0800214 list_for_each_entry(iter, &(root)->memcg_params.children, \
215 memcg_params.children_node)
Vladimir Davydov426589f2015-02-12 14:59:23 -0800216
Glauber Costaba6c4962012-12-18 14:22:27 -0800217static inline bool is_root_cache(struct kmem_cache *s)
218{
Tejun Heo9eeadc82017-02-22 15:41:17 -0800219 return !s->memcg_params.root_cache;
Glauber Costaba6c4962012-12-18 14:22:27 -0800220}
Glauber Costa2633d7a2012-12-18 14:22:34 -0800221
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800222static inline bool slab_equal_or_root(struct kmem_cache *s,
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800223 struct kmem_cache *p)
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800224{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800225 return p == s || p == s->memcg_params.root_cache;
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800226}
Glauber Costa749c5412012-12-18 14:23:01 -0800227
228/*
229 * We use suffixes to the name in memcg because we can't have caches
230 * created in the system with the same name. But when we print them
231 * locally, better refer to them with the base name
232 */
233static inline const char *cache_name(struct kmem_cache *s)
234{
235 if (!is_root_cache(s))
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800236 s = s->memcg_params.root_cache;
Glauber Costa749c5412012-12-18 14:23:01 -0800237 return s->name;
238}
239
Vladimir Davydovf8570262014-01-23 15:53:06 -0800240/*
241 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800242 * That said the caller must assure the memcg's cache won't go away by either
243 * taking a css reference to the owner cgroup, or holding the slab_mutex.
Vladimir Davydovf8570262014-01-23 15:53:06 -0800244 */
Qiang Huang2ade4de2013-11-12 15:08:23 -0800245static inline struct kmem_cache *
246cache_from_memcg_idx(struct kmem_cache *s, int idx)
Glauber Costa749c5412012-12-18 14:23:01 -0800247{
Vladimir Davydov959c8962014-01-23 15:52:59 -0800248 struct kmem_cache *cachep;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800249 struct memcg_cache_array *arr;
Vladimir Davydovf8570262014-01-23 15:53:06 -0800250
251 rcu_read_lock();
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800252 arr = rcu_dereference(s->memcg_params.memcg_caches);
Vladimir Davydov959c8962014-01-23 15:52:59 -0800253
254 /*
255 * Make sure we will access the up-to-date value. The code updating
256 * memcg_caches issues a write barrier to match this (see
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800257 * memcg_create_kmem_cache()).
Vladimir Davydov959c8962014-01-23 15:52:59 -0800258 */
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800259 cachep = lockless_dereference(arr->entries[idx]);
Pranith Kumar8df0c2d2014-12-10 15:42:28 -0800260 rcu_read_unlock();
261
Vladimir Davydov959c8962014-01-23 15:52:59 -0800262 return cachep;
Glauber Costa749c5412012-12-18 14:23:01 -0800263}
Glauber Costa943a4512012-12-18 14:23:03 -0800264
265static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
266{
267 if (is_root_cache(s))
268 return s;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800269 return s->memcg_params.root_cache;
Glauber Costa943a4512012-12-18 14:23:03 -0800270}
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700271
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -0800272static __always_inline int memcg_charge_slab(struct page *page,
273 gfp_t gfp, int order,
274 struct kmem_cache *s)
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700275{
Vladimir Davydov27ee57c2016-03-17 14:17:35 -0700276 int ret;
277
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700278 if (!memcg_kmem_enabled())
279 return 0;
280 if (is_root_cache(s))
281 return 0;
Vladimir Davydov27ee57c2016-03-17 14:17:35 -0700282
Vladimir Davydov45264772016-07-26 15:24:21 -0700283 ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
Vladimir Davydov27ee57c2016-03-17 14:17:35 -0700284 if (ret)
285 return ret;
286
287 memcg_kmem_update_page_stat(page,
288 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
289 MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
290 1 << order);
291 return 0;
292}
293
294static __always_inline void memcg_uncharge_slab(struct page *page, int order,
295 struct kmem_cache *s)
296{
Vladimir Davydov45264772016-07-26 15:24:21 -0700297 if (!memcg_kmem_enabled())
298 return;
299
Vladimir Davydov27ee57c2016-03-17 14:17:35 -0700300 memcg_kmem_update_page_stat(page,
301 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
302 MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
303 -(1 << order));
304 memcg_kmem_uncharge(page, order);
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700305}
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800306
307extern void slab_init_memcg_params(struct kmem_cache *);
Tejun Heo510ded32017-02-22 15:41:24 -0800308extern void memcg_link_cache(struct kmem_cache *s);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800309
Johannes Weiner127424c2016-01-20 15:02:32 -0800310#else /* CONFIG_MEMCG && !CONFIG_SLOB */
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800311
Tejun Heo510ded32017-02-22 15:41:24 -0800312/* If !memcg, all caches are root. */
313#define slab_root_caches slab_caches
314#define root_caches_node list
315
Vladimir Davydov426589f2015-02-12 14:59:23 -0800316#define for_each_memcg_cache(iter, root) \
317 for ((void)(iter), (void)(root); 0; )
Vladimir Davydov426589f2015-02-12 14:59:23 -0800318
Glauber Costaba6c4962012-12-18 14:22:27 -0800319static inline bool is_root_cache(struct kmem_cache *s)
320{
321 return true;
322}
323
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800324static inline bool slab_equal_or_root(struct kmem_cache *s,
325 struct kmem_cache *p)
326{
327 return true;
328}
Glauber Costa749c5412012-12-18 14:23:01 -0800329
330static inline const char *cache_name(struct kmem_cache *s)
331{
332 return s->name;
333}
334
Qiang Huang2ade4de2013-11-12 15:08:23 -0800335static inline struct kmem_cache *
336cache_from_memcg_idx(struct kmem_cache *s, int idx)
Glauber Costa749c5412012-12-18 14:23:01 -0800337{
338 return NULL;
339}
Glauber Costa943a4512012-12-18 14:23:03 -0800340
341static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
342{
343 return s;
344}
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700345
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -0800346static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
347 struct kmem_cache *s)
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700348{
349 return 0;
350}
351
Vladimir Davydov27ee57c2016-03-17 14:17:35 -0700352static inline void memcg_uncharge_slab(struct page *page, int order,
353 struct kmem_cache *s)
354{
355}
356
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800357static inline void slab_init_memcg_params(struct kmem_cache *s)
358{
359}
Tejun Heo510ded32017-02-22 15:41:24 -0800360
361static inline void memcg_link_cache(struct kmem_cache *s)
362{
363}
364
Johannes Weiner127424c2016-01-20 15:02:32 -0800365#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800366
367static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
368{
369 struct kmem_cache *cachep;
370 struct page *page;
371
372 /*
373 * When kmemcg is not being used, both assignments should return the
374 * same value. but we don't want to pay the assignment price in that
375 * case. If it is not compiled in, the compiler should be smart enough
376 * to not do even the assignment. In that case, slab_equal_or_root
377 * will also be a constant.
378 */
Laura Abbottbecfda62016-03-15 14:55:06 -0700379 if (!memcg_kmem_enabled() &&
380 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800381 return s;
382
383 page = virt_to_head_page(x);
384 cachep = page->slab_cache;
385 if (slab_equal_or_root(cachep, s))
386 return cachep;
387
388 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
Daniel Borkmann2d16e0fd2015-09-04 15:45:57 -0700389 __func__, s->name, cachep->name);
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800390 WARN_ON_ONCE(1);
391 return s;
392}
Christoph Lameterca349562013-01-10 19:14:19 +0000393
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700394static inline size_t slab_ksize(const struct kmem_cache *s)
395{
396#ifndef CONFIG_SLUB
397 return s->object_size;
398
399#else /* CONFIG_SLUB */
400# ifdef CONFIG_SLUB_DEBUG
401 /*
402 * Debugging requires use of the padding between object
403 * and whatever may come after it.
404 */
405 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
406 return s->object_size;
407# endif
Alexander Potapenko80a92012016-07-28 15:49:07 -0700408 if (s->flags & SLAB_KASAN)
409 return s->object_size;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700410 /*
411 * If we have the need to store the freelist pointer
412 * back there or track user information then we can
413 * only use the space before that information.
414 */
415 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
416 return s->inuse;
417 /*
418 * Else we can use all the padding etc for the allocation
419 */
420 return s->size;
421#endif
422}
423
424static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
425 gfp_t flags)
426{
427 flags &= gfp_allowed_mask;
428 lockdep_trace_alloc(flags);
429 might_sleep_if(gfpflags_allow_blocking(flags));
430
Jesper Dangaard Brouerfab99632016-03-15 14:53:38 -0700431 if (should_failslab(s, flags))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700432 return NULL;
433
Vladimir Davydov45264772016-07-26 15:24:21 -0700434 if (memcg_kmem_enabled() &&
435 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
436 return memcg_kmem_get_cache(s);
437
438 return s;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700439}
440
441static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
442 size_t size, void **p)
443{
444 size_t i;
445
446 flags &= gfp_allowed_mask;
447 for (i = 0; i < size; i++) {
448 void *object = p[i];
449
450 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
451 kmemleak_alloc_recursive(object, s->object_size, 1,
452 s->flags, flags);
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700453 kasan_slab_alloc(s, object, flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700454 }
Vladimir Davydov45264772016-07-26 15:24:21 -0700455
456 if (memcg_kmem_enabled())
457 memcg_kmem_put_cache(s);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700458}
459
Christoph Lameter44c53562014-08-06 16:04:07 -0700460#ifndef CONFIG_SLOB
Christoph Lameterca349562013-01-10 19:14:19 +0000461/*
462 * The slab lists for all objects.
463 */
464struct kmem_cache_node {
465 spinlock_t list_lock;
466
467#ifdef CONFIG_SLAB
468 struct list_head slabs_partial; /* partial list first, better asm code */
469 struct list_head slabs_full;
470 struct list_head slabs_free;
David Rientjesbf00bd32016-12-12 16:41:44 -0800471 unsigned long total_slabs; /* length of all slab lists */
472 unsigned long free_slabs; /* length of free slab list only */
Christoph Lameterca349562013-01-10 19:14:19 +0000473 unsigned long free_objects;
474 unsigned int free_limit;
475 unsigned int colour_next; /* Per-node cache coloring */
476 struct array_cache *shared; /* shared per node */
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700477 struct alien_cache **alien; /* on other nodes */
Christoph Lameterca349562013-01-10 19:14:19 +0000478 unsigned long next_reap; /* updated without locking */
479 int free_touched; /* updated without locking */
480#endif
481
482#ifdef CONFIG_SLUB
483 unsigned long nr_partial;
484 struct list_head partial;
485#ifdef CONFIG_SLUB_DEBUG
486 atomic_long_t nr_slabs;
487 atomic_long_t total_objects;
488 struct list_head full;
489#endif
490#endif
491
492};
Wanpeng Lie25839f2013-07-04 08:33:23 +0800493
Christoph Lameter44c53562014-08-06 16:04:07 -0700494static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
495{
496 return s->node[node];
497}
498
499/*
500 * Iterator over all nodes. The body will be executed for each node that has
501 * a kmem_cache_node structure allocated (which is true for all online nodes)
502 */
503#define for_each_kmem_cache_node(__s, __node, __n) \
Mikulas Patocka91635822014-10-09 15:26:20 -0700504 for (__node = 0; __node < nr_node_ids; __node++) \
505 if ((__n = get_node(__s, __node)))
Christoph Lameter44c53562014-08-06 16:04:07 -0700506
507#endif
508
Vladimir Davydov1df3b262014-12-10 15:42:16 -0800509void *slab_start(struct seq_file *m, loff_t *pos);
Wanpeng Li276a2432013-07-08 08:08:28 +0800510void *slab_next(struct seq_file *m, void *p, loff_t *pos);
511void slab_stop(struct seq_file *m, void *p);
Tejun Heobc2791f2017-02-22 15:41:21 -0800512void *memcg_slab_start(struct seq_file *m, loff_t *pos);
513void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
514void memcg_slab_stop(struct seq_file *m, void *p);
Vladimir Davydovb0475012014-12-10 15:44:19 -0800515int memcg_slab_show(struct seq_file *m, void *p);
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700516
Alexander Potapenko55834c52016-05-20 16:59:11 -0700517void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
518
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700519#ifdef CONFIG_SLAB_FREELIST_RANDOM
520int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
521 gfp_t gfp);
522void cache_random_seq_destroy(struct kmem_cache *cachep);
523#else
524static inline int cache_random_seq_create(struct kmem_cache *cachep,
525 unsigned int count, gfp_t gfp)
526{
527 return 0;
528}
529static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
530#endif /* CONFIG_SLAB_FREELIST_RANDOM */
531
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700532#endif /* MM_SLAB_H */