blob: cc0faf3a90bee30cdf1d482cf76e2c4edf47fd89 [file] [log] [blame]
Christoph Lameter81819f02007-05-06 14:49:36 -07001#ifndef _LINUX_SLUB_DEF_H
2#define _LINUX_SLUB_DEF_H
3
4/*
5 * SLUB : A Slab allocator without object queues.
6 *
Christoph Lametercde53532008-07-04 09:59:22 -07007 * (C) 2007 SGI, Christoph Lameter
Christoph Lameter81819f02007-05-06 14:49:36 -07008 */
Christoph Lameter81819f02007-05-06 14:49:36 -07009#include <linux/kobject.h>
10
Christoph Lameter8ff12cf2008-02-07 17:47:41 -080011enum stat_item {
12 ALLOC_FASTPATH, /* Allocation from cpu slab */
13 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
Zhi Yong Wua941f832013-11-08 20:47:36 +080014 FREE_FASTPATH, /* Free to cpu slab */
Christoph Lameter8ff12cf2008-02-07 17:47:41 -080015 FREE_SLOWPATH, /* Freeing not to cpu slab */
16 FREE_FROZEN, /* Freeing to frozen slab */
17 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
18 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
Alex Shi8028dce2012-02-03 23:34:56 +080019 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
Christoph Lameter8ff12cf2008-02-07 17:47:41 -080020 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
21 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
Christoph Lametere36a2652011-06-01 12:25:57 -050022 ALLOC_NODE_MISMATCH, /* Switching cpu slab */
Christoph Lameter8ff12cf2008-02-07 17:47:41 -080023 FREE_SLAB, /* Slab freed to the page allocator */
24 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
25 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
26 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
27 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
28 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
29 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
Christoph Lameter03e404a2011-06-01 12:25:58 -050030 DEACTIVATE_BYPASS, /* Implicit deactivation */
Christoph Lameter65c33762008-04-14 19:11:40 +030031 ORDER_FALLBACK, /* Number of times fallback was necessary */
Christoph Lameter4fdccdf2011-03-22 13:35:00 -050032 CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
Christoph Lameterb789ef52011-06-01 12:25:49 -050033 CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
Christoph Lameter49e22582011-08-09 16:12:27 -050034 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
Alex Shi8028dce2012-02-03 23:34:56 +080035 CPU_PARTIAL_FREE, /* Refill cpu partial on free */
36 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
37 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
Christoph Lameter8ff12cf2008-02-07 17:47:41 -080038 NR_SLUB_STAT_ITEMS };
39
Christoph Lameterdfb4f092007-10-16 01:26:05 -070040struct kmem_cache_cpu {
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -060041 void **freelist; /* Pointer to next available object */
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -060042 unsigned long tid; /* Globally unique transaction id */
Christoph Lameterda89b792008-01-07 23:20:31 -080043 struct page *page; /* The slab from which we are allocating */
Wei Yanga93cf072017-07-06 15:36:31 -070044#ifdef CONFIG_SLUB_CPU_PARTIAL
Christoph Lameter49e22582011-08-09 16:12:27 -050045 struct page *partial; /* Partially allocated frozen slabs */
Wei Yanga93cf072017-07-06 15:36:31 -070046#endif
Christoph Lameter8ff12cf2008-02-07 17:47:41 -080047#ifdef CONFIG_SLUB_STATS
48 unsigned stat[NR_SLUB_STAT_ITEMS];
49#endif
Christoph Lameter4c93c3552007-10-16 01:26:08 -070050};
Christoph Lameterdfb4f092007-10-16 01:26:05 -070051
Wei Yanga93cf072017-07-06 15:36:31 -070052#ifdef CONFIG_SLUB_CPU_PARTIAL
53#define slub_percpu_partial(c) ((c)->partial)
54
55#define slub_set_percpu_partial(c, p) \
56({ \
57 slub_percpu_partial(c) = (p)->next; \
58})
59
60#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
61#else
62#define slub_percpu_partial(c) NULL
63
64#define slub_set_percpu_partial(c, p)
65
66#define slub_percpu_partial_read_once(c) NULL
67#endif // CONFIG_SLUB_CPU_PARTIAL
68
Christoph Lameter81819f02007-05-06 14:49:36 -070069/*
Christoph Lameter834f3d12008-04-14 19:11:31 +030070 * Word size structure that can be atomically updated or read and that
71 * contains both the order and the number of objects that a slab of the
72 * given order would contain.
73 */
74struct kmem_cache_order_objects {
75 unsigned long x;
76};
77
78/*
Christoph Lameter81819f02007-05-06 14:49:36 -070079 * Slab cache management.
80 */
81struct kmem_cache {
Namhyung Kim1b5ad242010-08-07 14:29:22 +020082 struct kmem_cache_cpu __percpu *cpu_slab;
Christoph Lameter81819f02007-05-06 14:49:36 -070083 /* Used for retriving partial slabs etc */
84 unsigned long flags;
Christoph Lameter1a757fe2011-02-25 11:38:51 -060085 unsigned long min_partial;
Christoph Lameter81819f02007-05-06 14:49:36 -070086 int size; /* The size of an object including meta data */
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050087 int object_size; /* The size of an object without meta data */
Christoph Lameter81819f02007-05-06 14:49:36 -070088 int offset; /* Free pointer offset. */
Wei Yange6d0e1d2017-07-06 15:36:34 -070089#ifdef CONFIG_SLUB_CPU_PARTIAL
Alex Shi9f264902011-09-01 11:32:18 +080090 int cpu_partial; /* Number of per cpu partial objects to keep around */
Wei Yange6d0e1d2017-07-06 15:36:34 -070091#endif
Christoph Lameter834f3d12008-04-14 19:11:31 +030092 struct kmem_cache_order_objects oo;
Christoph Lameter81819f02007-05-06 14:49:36 -070093
Christoph Lameter81819f02007-05-06 14:49:36 -070094 /* Allocation and freeing of slabs */
Christoph Lameter205ab992008-04-14 19:11:40 +030095 struct kmem_cache_order_objects max;
Christoph Lameter65c33762008-04-14 19:11:40 +030096 struct kmem_cache_order_objects min;
Christoph Lameterb7a49f02008-02-14 14:21:32 -080097 gfp_t allocflags; /* gfp flags to use on each alloc */
Christoph Lameter81819f02007-05-06 14:49:36 -070098 int refcount; /* Refcount for slab cache destroy */
Alexey Dobriyan51cc5062008-07-25 19:45:34 -070099 void (*ctor)(void *);
Christoph Lameter81819f02007-05-06 14:49:36 -0700100 int inuse; /* Offset to metadata */
101 int align; /* Alignment */
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800102 int reserved; /* Reserved bytes at the end of slabs */
Wei Yangd3111e62017-07-06 15:36:28 -0700103 int red_left_pad; /* Left redzone padding size */
Christoph Lameter81819f02007-05-06 14:49:36 -0700104 const char *name; /* Name (only for display!) */
105 struct list_head list; /* List of slab caches */
Christoph Lameterab4d5ed2010-10-05 13:57:26 -0500106#ifdef CONFIG_SYSFS
Christoph Lameter81819f02007-05-06 14:49:36 -0700107 struct kobject kobj; /* For sysfs */
Tejun Heo3b7b3142017-06-23 15:08:52 -0700108 struct work_struct kobj_remove_work;
Christoph Lameter0c710012007-07-17 04:03:24 -0700109#endif
Johannes Weiner127424c2016-01-20 15:02:32 -0800110#ifdef CONFIG_MEMCG
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800111 struct memcg_cache_params memcg_params;
Glauber Costa107dab52012-12-18 14:23:05 -0800112 int max_attr_size; /* for propagation, maximum size of a stored attr */
Vladimir Davydov9a417072014-04-07 15:39:31 -0700113#ifdef CONFIG_SYSFS
114 struct kset *memcg_kset;
115#endif
Glauber Costaba6c4962012-12-18 14:22:27 -0800116#endif
Christoph Lameter81819f02007-05-06 14:49:36 -0700117
118#ifdef CONFIG_NUMA
Christoph Lameter98246012008-01-07 23:20:26 -0800119 /*
120 * Defragmentation by allocating from a remote node.
121 */
122 int remote_node_defrag_ratio;
Christoph Lameter81819f02007-05-06 14:49:36 -0700123#endif
Thomas Garnier210e7a42016-07-26 15:21:59 -0700124
125#ifdef CONFIG_SLAB_FREELIST_RANDOM
126 unsigned int *random_seq;
127#endif
128
Alexander Potapenko80a92012016-07-28 15:49:07 -0700129#ifdef CONFIG_KASAN
130 struct kasan_cache kasan_info;
131#endif
132
Christoph Lameter7340cc82010-09-28 08:10:26 -0500133 struct kmem_cache_node *node[MAX_NUMNODES];
Christoph Lameter81819f02007-05-06 14:49:36 -0700134};
135
Wei Yange6d0e1d2017-07-06 15:36:34 -0700136#ifdef CONFIG_SLUB_CPU_PARTIAL
137#define slub_cpu_partial(s) ((s)->cpu_partial)
138#define slub_set_cpu_partial(s, n) \
139({ \
140 slub_cpu_partial(s) = (n); \
141})
142#else
143#define slub_cpu_partial(s) (0)
144#define slub_set_cpu_partial(s, n)
145#endif // CONFIG_SLUB_CPU_PARTIAL
146
Christoph Lameter41a21282014-05-06 12:50:08 -0700147#ifdef CONFIG_SYSFS
148#define SLAB_SUPPORTS_SYSFS
Tejun Heobf5eb3d2017-02-22 15:41:11 -0800149void sysfs_slab_release(struct kmem_cache *);
Christoph Lameter41a21282014-05-06 12:50:08 -0700150#else
Tejun Heobf5eb3d2017-02-22 15:41:11 -0800151static inline void sysfs_slab_release(struct kmem_cache *s)
Christoph Lameter41a21282014-05-06 12:50:08 -0700152{
153}
154#endif
155
Andrey Ryabinin75c66de2015-02-13 14:39:35 -0800156void object_err(struct kmem_cache *s, struct page *page,
157 u8 *object, char *reason);
158
Alexander Potapenkoc146a2b2016-07-28 15:49:04 -0700159void *fixup_red_left(struct kmem_cache *s, void *p);
160
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700161static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
162 void *x) {
163 void *object = x - (x - page_address(page)) % cache->size;
164 void *last_object = page_address(page) +
165 (page->objects - 1) * cache->size;
Alexander Potapenkoc146a2b2016-07-28 15:49:04 -0700166 void *result = (unlikely(object > last_object)) ? last_object : object;
167
168 result = fixup_red_left(cache, result);
169 return result;
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700170}
171
Christoph Lameter81819f02007-05-06 14:49:36 -0700172#endif /* _LINUX_SLUB_DEF_H */