Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 1 | #ifndef MM_SLAB_H |
| 2 | #define MM_SLAB_H |
| 3 | /* |
| 4 | * Internal slab definitions |
| 5 | */ |
| 6 | |
Joonsoo Kim | 07f361b | 2014-10-09 15:26:00 -0700 | [diff] [blame] | 7 | #ifdef CONFIG_SLOB |
| 8 | /* |
| 9 | * Common fields provided in kmem_cache by all slab allocators |
| 10 | * This struct is either used directly by the allocator (SLOB) |
| 11 | * or the allocator must include definitions for all fields |
| 12 | * provided in kmem_cache_common in their definition of kmem_cache. |
| 13 | * |
| 14 | * Once we can do anonymous structs (C11 standard) we could put a |
| 15 | * anonymous struct definition in these allocators so that the |
| 16 | * separate allocations in the kmem_cache structure of SLAB and |
| 17 | * SLUB is no longer needed. |
| 18 | */ |
| 19 | struct kmem_cache { |
| 20 | unsigned int object_size;/* The original size of the object */ |
| 21 | unsigned int size; /* The aligned/padded/added on size */ |
| 22 | unsigned int align; /* Alignment as calculated */ |
| 23 | unsigned long flags; /* Active flags on the slab */ |
| 24 | const char *name; /* Slab name for sysfs */ |
| 25 | int refcount; /* Use counter */ |
| 26 | void (*ctor)(void *); /* Called on object slot creation */ |
| 27 | struct list_head list; /* List of all slab caches on the system */ |
| 28 | }; |
| 29 | |
| 30 | #endif /* CONFIG_SLOB */ |
| 31 | |
| 32 | #ifdef CONFIG_SLAB |
| 33 | #include <linux/slab_def.h> |
| 34 | #endif |
| 35 | |
| 36 | #ifdef CONFIG_SLUB |
| 37 | #include <linux/slub_def.h> |
| 38 | #endif |
| 39 | |
| 40 | #include <linux/memcontrol.h> |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 41 | #include <linux/fault-inject.h> |
| 42 | #include <linux/kmemcheck.h> |
| 43 | #include <linux/kasan.h> |
| 44 | #include <linux/kmemleak.h> |
Joonsoo Kim | 07f361b | 2014-10-09 15:26:00 -0700 | [diff] [blame] | 45 | |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 46 | /* |
| 47 | * State of the slab allocator. |
| 48 | * |
| 49 | * This is used to describe the states of the allocator during bootup. |
| 50 | * Allocators use this to gradually bootstrap themselves. Most allocators |
| 51 | * have the problem that the structures used for managing slab caches are |
| 52 | * allocated from slab caches themselves. |
| 53 | */ |
| 54 | enum slab_state { |
| 55 | DOWN, /* No slab functionality yet */ |
| 56 | PARTIAL, /* SLUB: kmem_cache_node available */ |
Christoph Lameter | ce8eb6c | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 57 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 58 | UP, /* Slab caches usable but not all extras yet */ |
| 59 | FULL /* Everything is working */ |
| 60 | }; |
| 61 | |
| 62 | extern enum slab_state slab_state; |
| 63 | |
Christoph Lameter | 18004c5 | 2012-07-06 15:25:12 -0500 | [diff] [blame] | 64 | /* The slab cache mutex protects the management structures during changes */ |
| 65 | extern struct mutex slab_mutex; |
Christoph Lameter | 9b030cb | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 66 | |
| 67 | /* The list of all slab caches on the system */ |
Christoph Lameter | 18004c5 | 2012-07-06 15:25:12 -0500 | [diff] [blame] | 68 | extern struct list_head slab_caches; |
| 69 | |
Christoph Lameter | 9b030cb | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 70 | /* The slab cache that manages slab cache information */ |
| 71 | extern struct kmem_cache *kmem_cache; |
| 72 | |
Christoph Lameter | 4590685 | 2012-11-28 16:23:16 +0000 | [diff] [blame] | 73 | unsigned long calculate_alignment(unsigned long flags, |
| 74 | unsigned long align, unsigned long size); |
| 75 | |
Christoph Lameter | f97d5f63 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 76 | #ifndef CONFIG_SLOB |
| 77 | /* Kmalloc array related functions */ |
Daniel Sanders | 34cc699 | 2015-06-24 16:55:57 -0700 | [diff] [blame] | 78 | void setup_kmalloc_cache_index_table(void); |
Christoph Lameter | f97d5f63 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 79 | void create_kmalloc_caches(unsigned long); |
Christoph Lameter | 2c59dd6 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 80 | |
| 81 | /* Find the kmalloc slab corresponding for a certain size */ |
| 82 | struct kmem_cache *kmalloc_slab(size_t, gfp_t); |
Christoph Lameter | f97d5f63 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 83 | #endif |
| 84 | |
| 85 | |
Christoph Lameter | 9b030cb | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 86 | /* Functions provided by the slab allocators */ |
Christoph Lameter | 8a13a4c | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 87 | extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 88 | |
Christoph Lameter | 45530c4 | 2012-11-28 16:23:07 +0000 | [diff] [blame] | 89 | extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, |
| 90 | unsigned long flags); |
| 91 | extern void create_boot_cache(struct kmem_cache *, const char *name, |
| 92 | size_t size, unsigned long flags); |
| 93 | |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 94 | int slab_unmergeable(struct kmem_cache *s); |
| 95 | struct kmem_cache *find_mergeable(size_t size, size_t align, |
| 96 | unsigned long flags, const char *name, void (*ctor)(void *)); |
Joonsoo Kim | 12220de | 2014-10-09 15:26:24 -0700 | [diff] [blame] | 97 | #ifndef CONFIG_SLOB |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 98 | struct kmem_cache * |
Vladimir Davydov | a44cb944 | 2014-04-07 15:39:23 -0700 | [diff] [blame] | 99 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
| 100 | unsigned long flags, void (*ctor)(void *)); |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 101 | |
| 102 | unsigned long kmem_cache_flags(unsigned long object_size, |
| 103 | unsigned long flags, const char *name, |
| 104 | void (*ctor)(void *)); |
Christoph Lameter | cbb7969 | 2012-09-05 00:18:32 +0000 | [diff] [blame] | 105 | #else |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 106 | static inline struct kmem_cache * |
Vladimir Davydov | a44cb944 | 2014-04-07 15:39:23 -0700 | [diff] [blame] | 107 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
| 108 | unsigned long flags, void (*ctor)(void *)) |
Christoph Lameter | cbb7969 | 2012-09-05 00:18:32 +0000 | [diff] [blame] | 109 | { return NULL; } |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 110 | |
| 111 | static inline unsigned long kmem_cache_flags(unsigned long object_size, |
| 112 | unsigned long flags, const char *name, |
| 113 | void (*ctor)(void *)) |
| 114 | { |
| 115 | return flags; |
| 116 | } |
Christoph Lameter | cbb7969 | 2012-09-05 00:18:32 +0000 | [diff] [blame] | 117 | #endif |
| 118 | |
| 119 | |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 120 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
| 121 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ |
| 122 | SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) |
| 123 | |
| 124 | #if defined(CONFIG_DEBUG_SLAB) |
| 125 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) |
| 126 | #elif defined(CONFIG_SLUB_DEBUG) |
| 127 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ |
Laura Abbott | becfda6 | 2016-03-15 14:55:06 -0700 | [diff] [blame] | 128 | SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 129 | #else |
| 130 | #define SLAB_DEBUG_FLAGS (0) |
| 131 | #endif |
| 132 | |
| 133 | #if defined(CONFIG_SLAB) |
| 134 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ |
Vladimir Davydov | 230e9fc | 2016-01-14 15:18:15 -0800 | [diff] [blame] | 135 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ |
| 136 | SLAB_NOTRACK | SLAB_ACCOUNT) |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 137 | #elif defined(CONFIG_SLUB) |
| 138 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ |
Vladimir Davydov | 230e9fc | 2016-01-14 15:18:15 -0800 | [diff] [blame] | 139 | SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT) |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 140 | #else |
| 141 | #define SLAB_CACHE_FLAGS (0) |
| 142 | #endif |
| 143 | |
| 144 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) |
| 145 | |
Christoph Lameter | 945cf2b | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 146 | int __kmem_cache_shutdown(struct kmem_cache *); |
Dmitry Safonov | 52b4b95 | 2016-02-17 13:11:37 -0800 | [diff] [blame] | 147 | void __kmem_cache_release(struct kmem_cache *); |
Vladimir Davydov | d6e0b7f | 2015-02-12 14:59:47 -0800 | [diff] [blame] | 148 | int __kmem_cache_shrink(struct kmem_cache *, bool); |
Christoph Lameter | 41a2128 | 2014-05-06 12:50:08 -0700 | [diff] [blame] | 149 | void slab_kmem_cache_release(struct kmem_cache *); |
Christoph Lameter | 945cf2b | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 150 | |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 151 | struct seq_file; |
| 152 | struct file; |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 153 | |
Glauber Costa | 0d7561c | 2012-10-19 18:20:27 +0400 | [diff] [blame] | 154 | struct slabinfo { |
| 155 | unsigned long active_objs; |
| 156 | unsigned long num_objs; |
| 157 | unsigned long active_slabs; |
| 158 | unsigned long num_slabs; |
| 159 | unsigned long shared_avail; |
| 160 | unsigned int limit; |
| 161 | unsigned int batchcount; |
| 162 | unsigned int shared; |
| 163 | unsigned int objects_per_slab; |
| 164 | unsigned int cache_order; |
| 165 | }; |
| 166 | |
| 167 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); |
| 168 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 169 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
| 170 | size_t count, loff_t *ppos); |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 171 | |
Christoph Lameter | 484748f | 2015-09-04 15:45:34 -0700 | [diff] [blame] | 172 | /* |
| 173 | * Generic implementation of bulk operations |
| 174 | * These are useful for situations in which the allocator cannot |
Jesper Dangaard Brouer | 9f706d6 | 2016-03-15 14:54:03 -0700 | [diff] [blame] | 175 | * perform optimizations. In that case segments of the object listed |
Christoph Lameter | 484748f | 2015-09-04 15:45:34 -0700 | [diff] [blame] | 176 | * may be allocated or freed using these operations. |
| 177 | */ |
| 178 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); |
Jesper Dangaard Brouer | 865762a | 2015-11-20 15:57:58 -0800 | [diff] [blame] | 179 | int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
Christoph Lameter | 484748f | 2015-09-04 15:45:34 -0700 | [diff] [blame] | 180 | |
Johannes Weiner | 127424c | 2016-01-20 15:02:32 -0800 | [diff] [blame] | 181 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
Vladimir Davydov | 426589f | 2015-02-12 14:59:23 -0800 | [diff] [blame] | 182 | /* |
| 183 | * Iterate over all memcg caches of the given root cache. The caller must hold |
| 184 | * slab_mutex. |
| 185 | */ |
| 186 | #define for_each_memcg_cache(iter, root) \ |
| 187 | list_for_each_entry(iter, &(root)->memcg_params.list, \ |
| 188 | memcg_params.list) |
| 189 | |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 190 | static inline bool is_root_cache(struct kmem_cache *s) |
| 191 | { |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 192 | return s->memcg_params.is_root_cache; |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 193 | } |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 194 | |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 195 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 196 | struct kmem_cache *p) |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 197 | { |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 198 | return p == s || p == s->memcg_params.root_cache; |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 199 | } |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 200 | |
| 201 | /* |
| 202 | * We use suffixes to the name in memcg because we can't have caches |
| 203 | * created in the system with the same name. But when we print them |
| 204 | * locally, better refer to them with the base name |
| 205 | */ |
| 206 | static inline const char *cache_name(struct kmem_cache *s) |
| 207 | { |
| 208 | if (!is_root_cache(s)) |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 209 | s = s->memcg_params.root_cache; |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 210 | return s->name; |
| 211 | } |
| 212 | |
Vladimir Davydov | f857026 | 2014-01-23 15:53:06 -0800 | [diff] [blame] | 213 | /* |
| 214 | * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 215 | * That said the caller must assure the memcg's cache won't go away by either |
| 216 | * taking a css reference to the owner cgroup, or holding the slab_mutex. |
Vladimir Davydov | f857026 | 2014-01-23 15:53:06 -0800 | [diff] [blame] | 217 | */ |
Qiang Huang | 2ade4de | 2013-11-12 15:08:23 -0800 | [diff] [blame] | 218 | static inline struct kmem_cache * |
| 219 | cache_from_memcg_idx(struct kmem_cache *s, int idx) |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 220 | { |
Vladimir Davydov | 959c896 | 2014-01-23 15:52:59 -0800 | [diff] [blame] | 221 | struct kmem_cache *cachep; |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 222 | struct memcg_cache_array *arr; |
Vladimir Davydov | f857026 | 2014-01-23 15:53:06 -0800 | [diff] [blame] | 223 | |
| 224 | rcu_read_lock(); |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 225 | arr = rcu_dereference(s->memcg_params.memcg_caches); |
Vladimir Davydov | 959c896 | 2014-01-23 15:52:59 -0800 | [diff] [blame] | 226 | |
| 227 | /* |
| 228 | * Make sure we will access the up-to-date value. The code updating |
| 229 | * memcg_caches issues a write barrier to match this (see |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 230 | * memcg_create_kmem_cache()). |
Vladimir Davydov | 959c896 | 2014-01-23 15:52:59 -0800 | [diff] [blame] | 231 | */ |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 232 | cachep = lockless_dereference(arr->entries[idx]); |
Pranith Kumar | 8df0c2d | 2014-12-10 15:42:28 -0800 | [diff] [blame] | 233 | rcu_read_unlock(); |
| 234 | |
Vladimir Davydov | 959c896 | 2014-01-23 15:52:59 -0800 | [diff] [blame] | 235 | return cachep; |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 236 | } |
Glauber Costa | 943a451 | 2012-12-18 14:23:03 -0800 | [diff] [blame] | 237 | |
| 238 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) |
| 239 | { |
| 240 | if (is_root_cache(s)) |
| 241 | return s; |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 242 | return s->memcg_params.root_cache; |
Glauber Costa | 943a451 | 2012-12-18 14:23:03 -0800 | [diff] [blame] | 243 | } |
Vladimir Davydov | 5dfb417 | 2014-06-04 16:06:38 -0700 | [diff] [blame] | 244 | |
Vladimir Davydov | f3ccb2c4 | 2015-11-05 18:49:01 -0800 | [diff] [blame] | 245 | static __always_inline int memcg_charge_slab(struct page *page, |
| 246 | gfp_t gfp, int order, |
| 247 | struct kmem_cache *s) |
Vladimir Davydov | 5dfb417 | 2014-06-04 16:06:38 -0700 | [diff] [blame] | 248 | { |
Vladimir Davydov | 27ee57c | 2016-03-17 14:17:35 -0700 | [diff] [blame] | 249 | int ret; |
| 250 | |
Vladimir Davydov | 5dfb417 | 2014-06-04 16:06:38 -0700 | [diff] [blame] | 251 | if (!memcg_kmem_enabled()) |
| 252 | return 0; |
| 253 | if (is_root_cache(s)) |
| 254 | return 0; |
Vladimir Davydov | 27ee57c | 2016-03-17 14:17:35 -0700 | [diff] [blame] | 255 | |
| 256 | ret = __memcg_kmem_charge_memcg(page, gfp, order, |
| 257 | s->memcg_params.memcg); |
| 258 | if (ret) |
| 259 | return ret; |
| 260 | |
| 261 | memcg_kmem_update_page_stat(page, |
| 262 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
| 263 | MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE, |
| 264 | 1 << order); |
| 265 | return 0; |
| 266 | } |
| 267 | |
| 268 | static __always_inline void memcg_uncharge_slab(struct page *page, int order, |
| 269 | struct kmem_cache *s) |
| 270 | { |
| 271 | memcg_kmem_update_page_stat(page, |
| 272 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
| 273 | MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE, |
| 274 | -(1 << order)); |
| 275 | memcg_kmem_uncharge(page, order); |
Vladimir Davydov | 5dfb417 | 2014-06-04 16:06:38 -0700 | [diff] [blame] | 276 | } |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 277 | |
| 278 | extern void slab_init_memcg_params(struct kmem_cache *); |
| 279 | |
Johannes Weiner | 127424c | 2016-01-20 15:02:32 -0800 | [diff] [blame] | 280 | #else /* CONFIG_MEMCG && !CONFIG_SLOB */ |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 281 | |
Vladimir Davydov | 426589f | 2015-02-12 14:59:23 -0800 | [diff] [blame] | 282 | #define for_each_memcg_cache(iter, root) \ |
| 283 | for ((void)(iter), (void)(root); 0; ) |
Vladimir Davydov | 426589f | 2015-02-12 14:59:23 -0800 | [diff] [blame] | 284 | |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 285 | static inline bool is_root_cache(struct kmem_cache *s) |
| 286 | { |
| 287 | return true; |
| 288 | } |
| 289 | |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 290 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
| 291 | struct kmem_cache *p) |
| 292 | { |
| 293 | return true; |
| 294 | } |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 295 | |
| 296 | static inline const char *cache_name(struct kmem_cache *s) |
| 297 | { |
| 298 | return s->name; |
| 299 | } |
| 300 | |
Qiang Huang | 2ade4de | 2013-11-12 15:08:23 -0800 | [diff] [blame] | 301 | static inline struct kmem_cache * |
| 302 | cache_from_memcg_idx(struct kmem_cache *s, int idx) |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 303 | { |
| 304 | return NULL; |
| 305 | } |
Glauber Costa | 943a451 | 2012-12-18 14:23:03 -0800 | [diff] [blame] | 306 | |
| 307 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) |
| 308 | { |
| 309 | return s; |
| 310 | } |
Vladimir Davydov | 5dfb417 | 2014-06-04 16:06:38 -0700 | [diff] [blame] | 311 | |
Vladimir Davydov | f3ccb2c4 | 2015-11-05 18:49:01 -0800 | [diff] [blame] | 312 | static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, |
| 313 | struct kmem_cache *s) |
Vladimir Davydov | 5dfb417 | 2014-06-04 16:06:38 -0700 | [diff] [blame] | 314 | { |
| 315 | return 0; |
| 316 | } |
| 317 | |
Vladimir Davydov | 27ee57c | 2016-03-17 14:17:35 -0700 | [diff] [blame] | 318 | static inline void memcg_uncharge_slab(struct page *page, int order, |
| 319 | struct kmem_cache *s) |
| 320 | { |
| 321 | } |
| 322 | |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 323 | static inline void slab_init_memcg_params(struct kmem_cache *s) |
| 324 | { |
| 325 | } |
Johannes Weiner | 127424c | 2016-01-20 15:02:32 -0800 | [diff] [blame] | 326 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 327 | |
| 328 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
| 329 | { |
| 330 | struct kmem_cache *cachep; |
| 331 | struct page *page; |
| 332 | |
| 333 | /* |
| 334 | * When kmemcg is not being used, both assignments should return the |
| 335 | * same value. but we don't want to pay the assignment price in that |
| 336 | * case. If it is not compiled in, the compiler should be smart enough |
| 337 | * to not do even the assignment. In that case, slab_equal_or_root |
| 338 | * will also be a constant. |
| 339 | */ |
Laura Abbott | becfda6 | 2016-03-15 14:55:06 -0700 | [diff] [blame] | 340 | if (!memcg_kmem_enabled() && |
| 341 | !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 342 | return s; |
| 343 | |
| 344 | page = virt_to_head_page(x); |
| 345 | cachep = page->slab_cache; |
| 346 | if (slab_equal_or_root(cachep, s)) |
| 347 | return cachep; |
| 348 | |
| 349 | pr_err("%s: Wrong slab cache. %s but object is from %s\n", |
Daniel Borkmann | 2d16e0fd | 2015-09-04 15:45:57 -0700 | [diff] [blame] | 350 | __func__, s->name, cachep->name); |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 351 | WARN_ON_ONCE(1); |
| 352 | return s; |
| 353 | } |
Christoph Lameter | ca34956 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 354 | |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 355 | static inline size_t slab_ksize(const struct kmem_cache *s) |
| 356 | { |
| 357 | #ifndef CONFIG_SLUB |
| 358 | return s->object_size; |
| 359 | |
| 360 | #else /* CONFIG_SLUB */ |
| 361 | # ifdef CONFIG_SLUB_DEBUG |
| 362 | /* |
| 363 | * Debugging requires use of the padding between object |
| 364 | * and whatever may come after it. |
| 365 | */ |
| 366 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) |
| 367 | return s->object_size; |
| 368 | # endif |
| 369 | /* |
| 370 | * If we have the need to store the freelist pointer |
| 371 | * back there or track user information then we can |
| 372 | * only use the space before that information. |
| 373 | */ |
| 374 | if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) |
| 375 | return s->inuse; |
| 376 | /* |
| 377 | * Else we can use all the padding etc for the allocation |
| 378 | */ |
| 379 | return s->size; |
| 380 | #endif |
| 381 | } |
| 382 | |
| 383 | static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, |
| 384 | gfp_t flags) |
| 385 | { |
| 386 | flags &= gfp_allowed_mask; |
| 387 | lockdep_trace_alloc(flags); |
| 388 | might_sleep_if(gfpflags_allow_blocking(flags)); |
| 389 | |
Jesper Dangaard Brouer | fab9963 | 2016-03-15 14:53:38 -0700 | [diff] [blame] | 390 | if (should_failslab(s, flags)) |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 391 | return NULL; |
| 392 | |
| 393 | return memcg_kmem_get_cache(s, flags); |
| 394 | } |
| 395 | |
| 396 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, |
| 397 | size_t size, void **p) |
| 398 | { |
| 399 | size_t i; |
| 400 | |
| 401 | flags &= gfp_allowed_mask; |
| 402 | for (i = 0; i < size; i++) { |
| 403 | void *object = p[i]; |
| 404 | |
| 405 | kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); |
| 406 | kmemleak_alloc_recursive(object, s->object_size, 1, |
| 407 | s->flags, flags); |
Alexander Potapenko | 505f5dc | 2016-03-25 14:22:02 -0700 | [diff] [blame] | 408 | kasan_slab_alloc(s, object, flags); |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 409 | } |
| 410 | memcg_kmem_put_cache(s); |
| 411 | } |
| 412 | |
Christoph Lameter | 44c5356 | 2014-08-06 16:04:07 -0700 | [diff] [blame] | 413 | #ifndef CONFIG_SLOB |
Christoph Lameter | ca34956 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 414 | /* |
| 415 | * The slab lists for all objects. |
| 416 | */ |
| 417 | struct kmem_cache_node { |
| 418 | spinlock_t list_lock; |
| 419 | |
| 420 | #ifdef CONFIG_SLAB |
| 421 | struct list_head slabs_partial; /* partial list first, better asm code */ |
| 422 | struct list_head slabs_full; |
| 423 | struct list_head slabs_free; |
| 424 | unsigned long free_objects; |
| 425 | unsigned int free_limit; |
| 426 | unsigned int colour_next; /* Per-node cache coloring */ |
| 427 | struct array_cache *shared; /* shared per node */ |
Joonsoo Kim | c8522a3 | 2014-08-06 16:04:29 -0700 | [diff] [blame] | 428 | struct alien_cache **alien; /* on other nodes */ |
Christoph Lameter | ca34956 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 429 | unsigned long next_reap; /* updated without locking */ |
| 430 | int free_touched; /* updated without locking */ |
| 431 | #endif |
| 432 | |
| 433 | #ifdef CONFIG_SLUB |
| 434 | unsigned long nr_partial; |
| 435 | struct list_head partial; |
| 436 | #ifdef CONFIG_SLUB_DEBUG |
| 437 | atomic_long_t nr_slabs; |
| 438 | atomic_long_t total_objects; |
| 439 | struct list_head full; |
| 440 | #endif |
| 441 | #endif |
| 442 | |
| 443 | }; |
Wanpeng Li | e25839f | 2013-07-04 08:33:23 +0800 | [diff] [blame] | 444 | |
Christoph Lameter | 44c5356 | 2014-08-06 16:04:07 -0700 | [diff] [blame] | 445 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
| 446 | { |
| 447 | return s->node[node]; |
| 448 | } |
| 449 | |
| 450 | /* |
| 451 | * Iterator over all nodes. The body will be executed for each node that has |
| 452 | * a kmem_cache_node structure allocated (which is true for all online nodes) |
| 453 | */ |
| 454 | #define for_each_kmem_cache_node(__s, __node, __n) \ |
Mikulas Patocka | 9163582 | 2014-10-09 15:26:20 -0700 | [diff] [blame] | 455 | for (__node = 0; __node < nr_node_ids; __node++) \ |
| 456 | if ((__n = get_node(__s, __node))) |
Christoph Lameter | 44c5356 | 2014-08-06 16:04:07 -0700 | [diff] [blame] | 457 | |
| 458 | #endif |
| 459 | |
Vladimir Davydov | 1df3b26 | 2014-12-10 15:42:16 -0800 | [diff] [blame] | 460 | void *slab_start(struct seq_file *m, loff_t *pos); |
Wanpeng Li | 276a243 | 2013-07-08 08:08:28 +0800 | [diff] [blame] | 461 | void *slab_next(struct seq_file *m, void *p, loff_t *pos); |
| 462 | void slab_stop(struct seq_file *m, void *p); |
Vladimir Davydov | b047501 | 2014-12-10 15:44:19 -0800 | [diff] [blame] | 463 | int memcg_slab_show(struct seq_file *m, void *p); |
Andrey Ryabinin | 5240ab4 | 2014-08-06 16:04:14 -0700 | [diff] [blame] | 464 | |
| 465 | #endif /* MM_SLAB_H */ |