Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Christoph Lameter | 039363f | 2012-07-06 15:25:10 -0500 | [diff] [blame] | 2 | /* |
| 3 | * Slab allocator functions that are independent of the allocator strategy |
| 4 | * |
| 5 | * (C) 2012 Christoph Lameter <cl@linux.com> |
| 6 | */ |
| 7 | #include <linux/slab.h> |
| 8 | |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/poison.h> |
| 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/memory.h> |
Alexey Dobriyan | 1c99ba2 | 2018-04-05 16:20:11 -0700 | [diff] [blame] | 13 | #include <linux/cache.h> |
Christoph Lameter | 039363f | 2012-07-06 15:25:10 -0500 | [diff] [blame] | 14 | #include <linux/compiler.h> |
| 15 | #include <linux/module.h> |
Christoph Lameter | 20cea96 | 2012-07-06 15:25:13 -0500 | [diff] [blame] | 16 | #include <linux/cpu.h> |
| 17 | #include <linux/uaccess.h> |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 18 | #include <linux/seq_file.h> |
| 19 | #include <linux/proc_fs.h> |
Christoph Lameter | 039363f | 2012-07-06 15:25:10 -0500 | [diff] [blame] | 20 | #include <asm/cacheflush.h> |
| 21 | #include <asm/tlbflush.h> |
| 22 | #include <asm/page.h> |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 23 | #include <linux/memcontrol.h> |
Andrey Ryabinin | 928cec9 | 2014-08-06 16:04:44 -0700 | [diff] [blame] | 24 | |
| 25 | #define CREATE_TRACE_POINTS |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 26 | #include <trace/events/kmem.h> |
Christoph Lameter | 039363f | 2012-07-06 15:25:10 -0500 | [diff] [blame] | 27 | |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 28 | #include "slab.h" |
| 29 | |
| 30 | enum slab_state slab_state; |
Christoph Lameter | 18004c5 | 2012-07-06 15:25:12 -0500 | [diff] [blame] | 31 | LIST_HEAD(slab_caches); |
| 32 | DEFINE_MUTEX(slab_mutex); |
Christoph Lameter | 9b030cb | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 33 | struct kmem_cache *kmem_cache; |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 34 | |
Kees Cook | 2d891fb | 2017-11-30 13:04:32 -0800 | [diff] [blame] | 35 | #ifdef CONFIG_HARDENED_USERCOPY |
| 36 | bool usercopy_fallback __ro_after_init = |
| 37 | IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK); |
| 38 | module_param(usercopy_fallback, bool, 0400); |
| 39 | MODULE_PARM_DESC(usercopy_fallback, |
| 40 | "WARN instead of reject usercopy whitelist violations"); |
| 41 | #endif |
| 42 | |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 43 | static LIST_HEAD(slab_caches_to_rcu_destroy); |
| 44 | static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work); |
| 45 | static DECLARE_WORK(slab_caches_to_rcu_destroy_work, |
| 46 | slab_caches_to_rcu_destroy_workfn); |
| 47 | |
Joonsoo Kim | 07f361b | 2014-10-09 15:26:00 -0700 | [diff] [blame] | 48 | /* |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 49 | * Set of flags that will prevent slab merging |
| 50 | */ |
| 51 | #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 52 | SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 53 | SLAB_FAILSLAB | SLAB_KASAN) |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 54 | |
Vladimir Davydov | 230e9fc | 2016-01-14 15:18:15 -0800 | [diff] [blame] | 55 | #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ |
Nicolas Boichat | 6d6ea1e | 2019-03-28 20:43:42 -0700 | [diff] [blame] | 56 | SLAB_CACHE_DMA32 | SLAB_ACCOUNT) |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 57 | |
| 58 | /* |
| 59 | * Merge control. If this is set then no merging of slab caches will occur. |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 60 | */ |
Kees Cook | 7660a6f | 2017-07-06 15:36:40 -0700 | [diff] [blame] | 61 | static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT); |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 62 | |
| 63 | static int __init setup_slab_nomerge(char *str) |
| 64 | { |
Kees Cook | 7660a6f | 2017-07-06 15:36:40 -0700 | [diff] [blame] | 65 | slab_nomerge = true; |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 66 | return 1; |
| 67 | } |
| 68 | |
| 69 | #ifdef CONFIG_SLUB |
| 70 | __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); |
| 71 | #endif |
| 72 | |
| 73 | __setup("slab_nomerge", setup_slab_nomerge); |
| 74 | |
| 75 | /* |
Joonsoo Kim | 07f361b | 2014-10-09 15:26:00 -0700 | [diff] [blame] | 76 | * Determine the size of a slab object |
| 77 | */ |
| 78 | unsigned int kmem_cache_size(struct kmem_cache *s) |
| 79 | { |
| 80 | return s->object_size; |
| 81 | } |
| 82 | EXPORT_SYMBOL(kmem_cache_size); |
| 83 | |
Shuah Khan | 77be4b1 | 2012-08-16 00:09:46 -0700 | [diff] [blame] | 84 | #ifdef CONFIG_DEBUG_VM |
Alexey Dobriyan | f4957d5 | 2018-04-05 16:20:37 -0700 | [diff] [blame] | 85 | static int kmem_cache_sanity_check(const char *name, unsigned int size) |
Shuah Khan | 77be4b1 | 2012-08-16 00:09:46 -0700 | [diff] [blame] | 86 | { |
Shuah Khan | 77be4b1 | 2012-08-16 00:09:46 -0700 | [diff] [blame] | 87 | if (!name || in_interrupt() || size < sizeof(void *) || |
| 88 | size > KMALLOC_MAX_SIZE) { |
| 89 | pr_err("kmem_cache_create(%s) integrity check failed\n", name); |
| 90 | return -EINVAL; |
| 91 | } |
| 92 | |
Shuah Khan | 77be4b1 | 2012-08-16 00:09:46 -0700 | [diff] [blame] | 93 | WARN_ON(strchr(name, ' ')); /* It confuses parsers */ |
| 94 | return 0; |
| 95 | } |
| 96 | #else |
Alexey Dobriyan | f4957d5 | 2018-04-05 16:20:37 -0700 | [diff] [blame] | 97 | static inline int kmem_cache_sanity_check(const char *name, unsigned int size) |
Shuah Khan | 77be4b1 | 2012-08-16 00:09:46 -0700 | [diff] [blame] | 98 | { |
| 99 | return 0; |
| 100 | } |
| 101 | #endif |
| 102 | |
Christoph Lameter | 484748f | 2015-09-04 15:45:34 -0700 | [diff] [blame] | 103 | void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) |
| 104 | { |
| 105 | size_t i; |
| 106 | |
Jesper Dangaard Brouer | ca25719 | 2016-03-15 14:54:00 -0700 | [diff] [blame] | 107 | for (i = 0; i < nr; i++) { |
| 108 | if (s) |
| 109 | kmem_cache_free(s, p[i]); |
| 110 | else |
| 111 | kfree(p[i]); |
| 112 | } |
Christoph Lameter | 484748f | 2015-09-04 15:45:34 -0700 | [diff] [blame] | 113 | } |
| 114 | |
Jesper Dangaard Brouer | 865762a | 2015-11-20 15:57:58 -0800 | [diff] [blame] | 115 | int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, |
Christoph Lameter | 484748f | 2015-09-04 15:45:34 -0700 | [diff] [blame] | 116 | void **p) |
| 117 | { |
| 118 | size_t i; |
| 119 | |
| 120 | for (i = 0; i < nr; i++) { |
| 121 | void *x = p[i] = kmem_cache_alloc(s, flags); |
| 122 | if (!x) { |
| 123 | __kmem_cache_free_bulk(s, i, p); |
Jesper Dangaard Brouer | 865762a | 2015-11-20 15:57:58 -0800 | [diff] [blame] | 124 | return 0; |
Christoph Lameter | 484748f | 2015-09-04 15:45:34 -0700 | [diff] [blame] | 125 | } |
| 126 | } |
Jesper Dangaard Brouer | 865762a | 2015-11-20 15:57:58 -0800 | [diff] [blame] | 127 | return i; |
Christoph Lameter | 484748f | 2015-09-04 15:45:34 -0700 | [diff] [blame] | 128 | } |
| 129 | |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 130 | #ifdef CONFIG_MEMCG_KMEM |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 131 | |
| 132 | LIST_HEAD(slab_root_caches); |
Roman Gushchin | 63b02ef | 2019-07-11 20:56:24 -0700 | [diff] [blame] | 133 | static DEFINE_SPINLOCK(memcg_kmem_wq_lock); |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 134 | |
Roman Gushchin | f0a3a24 | 2019-07-11 20:56:27 -0700 | [diff] [blame^] | 135 | static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref); |
| 136 | |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 137 | void slab_init_memcg_params(struct kmem_cache *s) |
Vladimir Davydov | 33a690c | 2014-10-09 15:28:43 -0700 | [diff] [blame] | 138 | { |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 139 | s->memcg_params.root_cache = NULL; |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 140 | RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL); |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 141 | INIT_LIST_HEAD(&s->memcg_params.children); |
Shakeel Butt | 92ee383 | 2018-06-14 15:26:27 -0700 | [diff] [blame] | 142 | s->memcg_params.dying = false; |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 143 | } |
Vladimir Davydov | 33a690c | 2014-10-09 15:28:43 -0700 | [diff] [blame] | 144 | |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 145 | static int init_memcg_params(struct kmem_cache *s, |
Roman Gushchin | c03914b | 2019-07-11 20:56:02 -0700 | [diff] [blame] | 146 | struct kmem_cache *root_cache) |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 147 | { |
| 148 | struct memcg_cache_array *arr; |
Vladimir Davydov | 33a690c | 2014-10-09 15:28:43 -0700 | [diff] [blame] | 149 | |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 150 | if (root_cache) { |
Roman Gushchin | f0a3a24 | 2019-07-11 20:56:27 -0700 | [diff] [blame^] | 151 | int ret = percpu_ref_init(&s->memcg_params.refcnt, |
| 152 | kmemcg_cache_shutdown, |
| 153 | 0, GFP_KERNEL); |
| 154 | if (ret) |
| 155 | return ret; |
| 156 | |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 157 | s->memcg_params.root_cache = root_cache; |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 158 | INIT_LIST_HEAD(&s->memcg_params.children_node); |
Tejun Heo | bc2791f | 2017-02-22 15:41:21 -0800 | [diff] [blame] | 159 | INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node); |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 160 | return 0; |
| 161 | } |
Vladimir Davydov | 33a690c | 2014-10-09 15:28:43 -0700 | [diff] [blame] | 162 | |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 163 | slab_init_memcg_params(s); |
| 164 | |
| 165 | if (!memcg_nr_cache_ids) |
| 166 | return 0; |
| 167 | |
Johannes Weiner | f80c7da | 2017-10-03 16:16:10 -0700 | [diff] [blame] | 168 | arr = kvzalloc(sizeof(struct memcg_cache_array) + |
| 169 | memcg_nr_cache_ids * sizeof(void *), |
| 170 | GFP_KERNEL); |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 171 | if (!arr) |
| 172 | return -ENOMEM; |
| 173 | |
| 174 | RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr); |
Vladimir Davydov | 33a690c | 2014-10-09 15:28:43 -0700 | [diff] [blame] | 175 | return 0; |
| 176 | } |
| 177 | |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 178 | static void destroy_memcg_params(struct kmem_cache *s) |
Vladimir Davydov | 33a690c | 2014-10-09 15:28:43 -0700 | [diff] [blame] | 179 | { |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 180 | if (is_root_cache(s)) |
Johannes Weiner | f80c7da | 2017-10-03 16:16:10 -0700 | [diff] [blame] | 181 | kvfree(rcu_access_pointer(s->memcg_params.memcg_caches)); |
Roman Gushchin | f0a3a24 | 2019-07-11 20:56:27 -0700 | [diff] [blame^] | 182 | else |
| 183 | percpu_ref_exit(&s->memcg_params.refcnt); |
Johannes Weiner | f80c7da | 2017-10-03 16:16:10 -0700 | [diff] [blame] | 184 | } |
| 185 | |
| 186 | static void free_memcg_params(struct rcu_head *rcu) |
| 187 | { |
| 188 | struct memcg_cache_array *old; |
| 189 | |
| 190 | old = container_of(rcu, struct memcg_cache_array, rcu); |
| 191 | kvfree(old); |
Vladimir Davydov | 33a690c | 2014-10-09 15:28:43 -0700 | [diff] [blame] | 192 | } |
| 193 | |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 194 | static int update_memcg_params(struct kmem_cache *s, int new_array_size) |
Vladimir Davydov | 6f817f4 | 2014-10-09 15:28:47 -0700 | [diff] [blame] | 195 | { |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 196 | struct memcg_cache_array *old, *new; |
Vladimir Davydov | 6f817f4 | 2014-10-09 15:28:47 -0700 | [diff] [blame] | 197 | |
Johannes Weiner | f80c7da | 2017-10-03 16:16:10 -0700 | [diff] [blame] | 198 | new = kvzalloc(sizeof(struct memcg_cache_array) + |
| 199 | new_array_size * sizeof(void *), GFP_KERNEL); |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 200 | if (!new) |
Vladimir Davydov | 6f817f4 | 2014-10-09 15:28:47 -0700 | [diff] [blame] | 201 | return -ENOMEM; |
| 202 | |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 203 | old = rcu_dereference_protected(s->memcg_params.memcg_caches, |
| 204 | lockdep_is_held(&slab_mutex)); |
| 205 | if (old) |
| 206 | memcpy(new->entries, old->entries, |
| 207 | memcg_nr_cache_ids * sizeof(void *)); |
Vladimir Davydov | 6f817f4 | 2014-10-09 15:28:47 -0700 | [diff] [blame] | 208 | |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 209 | rcu_assign_pointer(s->memcg_params.memcg_caches, new); |
| 210 | if (old) |
Johannes Weiner | f80c7da | 2017-10-03 16:16:10 -0700 | [diff] [blame] | 211 | call_rcu(&old->rcu, free_memcg_params); |
Vladimir Davydov | 6f817f4 | 2014-10-09 15:28:47 -0700 | [diff] [blame] | 212 | return 0; |
| 213 | } |
| 214 | |
Glauber Costa | 55007d8 | 2012-12-18 14:22:38 -0800 | [diff] [blame] | 215 | int memcg_update_all_caches(int num_memcgs) |
| 216 | { |
| 217 | struct kmem_cache *s; |
| 218 | int ret = 0; |
Glauber Costa | 55007d8 | 2012-12-18 14:22:38 -0800 | [diff] [blame] | 219 | |
Vladimir Davydov | 05257a1 | 2015-02-12 14:59:01 -0800 | [diff] [blame] | 220 | mutex_lock(&slab_mutex); |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 221 | list_for_each_entry(s, &slab_root_caches, root_caches_node) { |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 222 | ret = update_memcg_params(s, num_memcgs); |
Glauber Costa | 55007d8 | 2012-12-18 14:22:38 -0800 | [diff] [blame] | 223 | /* |
Glauber Costa | 55007d8 | 2012-12-18 14:22:38 -0800 | [diff] [blame] | 224 | * Instead of freeing the memory, we'll just leave the caches |
| 225 | * up to this point in an updated state. |
| 226 | */ |
| 227 | if (ret) |
Vladimir Davydov | 05257a1 | 2015-02-12 14:59:01 -0800 | [diff] [blame] | 228 | break; |
Glauber Costa | 55007d8 | 2012-12-18 14:22:38 -0800 | [diff] [blame] | 229 | } |
Glauber Costa | 55007d8 | 2012-12-18 14:22:38 -0800 | [diff] [blame] | 230 | mutex_unlock(&slab_mutex); |
| 231 | return ret; |
| 232 | } |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 233 | |
Roman Gushchin | c03914b | 2019-07-11 20:56:02 -0700 | [diff] [blame] | 234 | void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg) |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 235 | { |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 236 | if (is_root_cache(s)) { |
| 237 | list_add(&s->root_caches_node, &slab_root_caches); |
| 238 | } else { |
Roman Gushchin | f0a3a24 | 2019-07-11 20:56:27 -0700 | [diff] [blame^] | 239 | css_get(&memcg->css); |
Roman Gushchin | c03914b | 2019-07-11 20:56:02 -0700 | [diff] [blame] | 240 | s->memcg_params.memcg = memcg; |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 241 | list_add(&s->memcg_params.children_node, |
| 242 | &s->memcg_params.root_cache->memcg_params.children); |
| 243 | list_add(&s->memcg_params.kmem_caches_node, |
| 244 | &s->memcg_params.memcg->kmem_caches); |
| 245 | } |
| 246 | } |
| 247 | |
| 248 | static void memcg_unlink_cache(struct kmem_cache *s) |
| 249 | { |
| 250 | if (is_root_cache(s)) { |
| 251 | list_del(&s->root_caches_node); |
| 252 | } else { |
| 253 | list_del(&s->memcg_params.children_node); |
| 254 | list_del(&s->memcg_params.kmem_caches_node); |
Roman Gushchin | f0a3a24 | 2019-07-11 20:56:27 -0700 | [diff] [blame^] | 255 | css_put(&s->memcg_params.memcg->css); |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 256 | } |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 257 | } |
Vladimir Davydov | 33a690c | 2014-10-09 15:28:43 -0700 | [diff] [blame] | 258 | #else |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 259 | static inline int init_memcg_params(struct kmem_cache *s, |
Roman Gushchin | c03914b | 2019-07-11 20:56:02 -0700 | [diff] [blame] | 260 | struct kmem_cache *root_cache) |
Vladimir Davydov | 33a690c | 2014-10-09 15:28:43 -0700 | [diff] [blame] | 261 | { |
| 262 | return 0; |
| 263 | } |
| 264 | |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 265 | static inline void destroy_memcg_params(struct kmem_cache *s) |
Vladimir Davydov | 33a690c | 2014-10-09 15:28:43 -0700 | [diff] [blame] | 266 | { |
| 267 | } |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 268 | |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 269 | static inline void memcg_unlink_cache(struct kmem_cache *s) |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 270 | { |
| 271 | } |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 272 | #endif /* CONFIG_MEMCG_KMEM */ |
Glauber Costa | 55007d8 | 2012-12-18 14:22:38 -0800 | [diff] [blame] | 273 | |
Christoph Lameter | 039363f | 2012-07-06 15:25:10 -0500 | [diff] [blame] | 274 | /* |
Byongho Lee | 692ae74 | 2018-01-31 16:15:36 -0800 | [diff] [blame] | 275 | * Figure out what the alignment of the objects will be given a set of |
| 276 | * flags, a user specified alignment and the size of the objects. |
| 277 | */ |
Alexey Dobriyan | f4957d5 | 2018-04-05 16:20:37 -0700 | [diff] [blame] | 278 | static unsigned int calculate_alignment(slab_flags_t flags, |
| 279 | unsigned int align, unsigned int size) |
Byongho Lee | 692ae74 | 2018-01-31 16:15:36 -0800 | [diff] [blame] | 280 | { |
| 281 | /* |
| 282 | * If the user wants hardware cache aligned objects then follow that |
| 283 | * suggestion if the object is sufficiently large. |
| 284 | * |
| 285 | * The hardware cache alignment cannot override the specified |
| 286 | * alignment though. If that is greater then use it. |
| 287 | */ |
| 288 | if (flags & SLAB_HWCACHE_ALIGN) { |
Alexey Dobriyan | f4957d5 | 2018-04-05 16:20:37 -0700 | [diff] [blame] | 289 | unsigned int ralign; |
Byongho Lee | 692ae74 | 2018-01-31 16:15:36 -0800 | [diff] [blame] | 290 | |
| 291 | ralign = cache_line_size(); |
| 292 | while (size <= ralign / 2) |
| 293 | ralign /= 2; |
| 294 | align = max(align, ralign); |
| 295 | } |
| 296 | |
| 297 | if (align < ARCH_SLAB_MINALIGN) |
| 298 | align = ARCH_SLAB_MINALIGN; |
| 299 | |
| 300 | return ALIGN(align, sizeof(void *)); |
| 301 | } |
| 302 | |
| 303 | /* |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 304 | * Find a mergeable slab cache |
| 305 | */ |
| 306 | int slab_unmergeable(struct kmem_cache *s) |
| 307 | { |
| 308 | if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) |
| 309 | return 1; |
| 310 | |
| 311 | if (!is_root_cache(s)) |
| 312 | return 1; |
| 313 | |
| 314 | if (s->ctor) |
| 315 | return 1; |
| 316 | |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 317 | if (s->usersize) |
| 318 | return 1; |
| 319 | |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 320 | /* |
| 321 | * We may have set a slab to be unmergeable during bootstrap. |
| 322 | */ |
| 323 | if (s->refcount < 0) |
| 324 | return 1; |
| 325 | |
| 326 | return 0; |
| 327 | } |
| 328 | |
Alexey Dobriyan | f4957d5 | 2018-04-05 16:20:37 -0700 | [diff] [blame] | 329 | struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 330 | slab_flags_t flags, const char *name, void (*ctor)(void *)) |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 331 | { |
| 332 | struct kmem_cache *s; |
| 333 | |
Grygorii Maistrenko | c6e2889 | 2017-02-22 15:40:59 -0800 | [diff] [blame] | 334 | if (slab_nomerge) |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 335 | return NULL; |
| 336 | |
| 337 | if (ctor) |
| 338 | return NULL; |
| 339 | |
| 340 | size = ALIGN(size, sizeof(void *)); |
| 341 | align = calculate_alignment(flags, align, size); |
| 342 | size = ALIGN(size, align); |
| 343 | flags = kmem_cache_flags(size, flags, name, NULL); |
| 344 | |
Grygorii Maistrenko | c6e2889 | 2017-02-22 15:40:59 -0800 | [diff] [blame] | 345 | if (flags & SLAB_NEVER_MERGE) |
| 346 | return NULL; |
| 347 | |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 348 | list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) { |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 349 | if (slab_unmergeable(s)) |
| 350 | continue; |
| 351 | |
| 352 | if (size > s->size) |
| 353 | continue; |
| 354 | |
| 355 | if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) |
| 356 | continue; |
| 357 | /* |
| 358 | * Check if alignment is compatible. |
| 359 | * Courtesy of Adrian Drzewiecki |
| 360 | */ |
| 361 | if ((s->size & ~(align - 1)) != s->size) |
| 362 | continue; |
| 363 | |
| 364 | if (s->size - size >= sizeof(void *)) |
| 365 | continue; |
| 366 | |
Joonsoo Kim | 95069ac8 | 2014-11-13 15:19:25 -0800 | [diff] [blame] | 367 | if (IS_ENABLED(CONFIG_SLAB) && align && |
| 368 | (align > s->align || s->align % align)) |
| 369 | continue; |
| 370 | |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 371 | return s; |
| 372 | } |
| 373 | return NULL; |
| 374 | } |
| 375 | |
Vladimir Davydov | c9a77a7 | 2015-11-05 18:45:08 -0800 | [diff] [blame] | 376 | static struct kmem_cache *create_cache(const char *name, |
Shakeel Butt | 613a5eb | 2018-04-05 16:21:50 -0700 | [diff] [blame] | 377 | unsigned int object_size, unsigned int align, |
Alexey Dobriyan | 7bbdb81 | 2018-04-05 16:21:31 -0700 | [diff] [blame] | 378 | slab_flags_t flags, unsigned int useroffset, |
| 379 | unsigned int usersize, void (*ctor)(void *), |
Vladimir Davydov | c9a77a7 | 2015-11-05 18:45:08 -0800 | [diff] [blame] | 380 | struct mem_cgroup *memcg, struct kmem_cache *root_cache) |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 381 | { |
| 382 | struct kmem_cache *s; |
| 383 | int err; |
| 384 | |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 385 | if (WARN_ON(useroffset + usersize > object_size)) |
| 386 | useroffset = usersize = 0; |
| 387 | |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 388 | err = -ENOMEM; |
| 389 | s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); |
| 390 | if (!s) |
| 391 | goto out; |
| 392 | |
| 393 | s->name = name; |
Shakeel Butt | 613a5eb | 2018-04-05 16:21:50 -0700 | [diff] [blame] | 394 | s->size = s->object_size = object_size; |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 395 | s->align = align; |
| 396 | s->ctor = ctor; |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 397 | s->useroffset = useroffset; |
| 398 | s->usersize = usersize; |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 399 | |
Roman Gushchin | c03914b | 2019-07-11 20:56:02 -0700 | [diff] [blame] | 400 | err = init_memcg_params(s, root_cache); |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 401 | if (err) |
| 402 | goto out_free_cache; |
| 403 | |
| 404 | err = __kmem_cache_create(s, flags); |
| 405 | if (err) |
| 406 | goto out_free_cache; |
| 407 | |
| 408 | s->refcount = 1; |
| 409 | list_add(&s->list, &slab_caches); |
Roman Gushchin | c03914b | 2019-07-11 20:56:02 -0700 | [diff] [blame] | 410 | memcg_link_cache(s, memcg); |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 411 | out: |
| 412 | if (err) |
| 413 | return ERR_PTR(err); |
| 414 | return s; |
| 415 | |
| 416 | out_free_cache: |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 417 | destroy_memcg_params(s); |
Vaishali Thakkar | 7c4da06 | 2015-02-10 14:09:40 -0800 | [diff] [blame] | 418 | kmem_cache_free(kmem_cache, s); |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 419 | goto out; |
| 420 | } |
Christoph Lameter | 4590685 | 2012-11-28 16:23:16 +0000 | [diff] [blame] | 421 | |
Mike Rapoport | f496990 | 2018-12-06 23:13:00 +0200 | [diff] [blame] | 422 | /** |
| 423 | * kmem_cache_create_usercopy - Create a cache with a region suitable |
| 424 | * for copying to userspace |
Christoph Lameter | 039363f | 2012-07-06 15:25:10 -0500 | [diff] [blame] | 425 | * @name: A string which is used in /proc/slabinfo to identify this cache. |
| 426 | * @size: The size of objects to be created in this cache. |
| 427 | * @align: The required alignment for the objects. |
| 428 | * @flags: SLAB flags |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 429 | * @useroffset: Usercopy region offset |
| 430 | * @usersize: Usercopy region size |
Christoph Lameter | 039363f | 2012-07-06 15:25:10 -0500 | [diff] [blame] | 431 | * @ctor: A constructor for the objects. |
| 432 | * |
Christoph Lameter | 039363f | 2012-07-06 15:25:10 -0500 | [diff] [blame] | 433 | * Cannot be called within a interrupt, but can be interrupted. |
| 434 | * The @ctor is run when new pages are allocated by the cache. |
| 435 | * |
| 436 | * The flags are |
| 437 | * |
| 438 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) |
| 439 | * to catch references to uninitialised memory. |
| 440 | * |
Mike Rapoport | f496990 | 2018-12-06 23:13:00 +0200 | [diff] [blame] | 441 | * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check |
Christoph Lameter | 039363f | 2012-07-06 15:25:10 -0500 | [diff] [blame] | 442 | * for buffer overruns. |
| 443 | * |
| 444 | * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware |
| 445 | * cacheline. This can be beneficial if you're counting cycles as closely |
| 446 | * as davem. |
Mike Rapoport | f496990 | 2018-12-06 23:13:00 +0200 | [diff] [blame] | 447 | * |
| 448 | * Return: a pointer to the cache on success, NULL on failure. |
Christoph Lameter | 039363f | 2012-07-06 15:25:10 -0500 | [diff] [blame] | 449 | */ |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 450 | struct kmem_cache * |
Alexey Dobriyan | f4957d5 | 2018-04-05 16:20:37 -0700 | [diff] [blame] | 451 | kmem_cache_create_usercopy(const char *name, |
| 452 | unsigned int size, unsigned int align, |
Alexey Dobriyan | 7bbdb81 | 2018-04-05 16:21:31 -0700 | [diff] [blame] | 453 | slab_flags_t flags, |
| 454 | unsigned int useroffset, unsigned int usersize, |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 455 | void (*ctor)(void *)) |
Christoph Lameter | 039363f | 2012-07-06 15:25:10 -0500 | [diff] [blame] | 456 | { |
Alexandru Moise | 40911a7 | 2015-11-05 18:45:43 -0800 | [diff] [blame] | 457 | struct kmem_cache *s = NULL; |
Andrzej Hajda | 3dec16e | 2015-02-13 14:36:38 -0800 | [diff] [blame] | 458 | const char *cache_name; |
Vladimir Davydov | 3965fc3 | 2014-01-23 15:52:55 -0800 | [diff] [blame] | 459 | int err; |
Christoph Lameter | 039363f | 2012-07-06 15:25:10 -0500 | [diff] [blame] | 460 | |
Pekka Enberg | b920536 | 2012-08-16 10:12:18 +0300 | [diff] [blame] | 461 | get_online_cpus(); |
Vladimir Davydov | 03afc0e | 2014-06-04 16:07:20 -0700 | [diff] [blame] | 462 | get_online_mems(); |
Vladimir Davydov | 05257a1 | 2015-02-12 14:59:01 -0800 | [diff] [blame] | 463 | memcg_get_cache_ids(); |
Vladimir Davydov | 03afc0e | 2014-06-04 16:07:20 -0700 | [diff] [blame] | 464 | |
Pekka Enberg | b920536 | 2012-08-16 10:12:18 +0300 | [diff] [blame] | 465 | mutex_lock(&slab_mutex); |
Christoph Lameter | 686d550 | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 466 | |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 467 | err = kmem_cache_sanity_check(name, size); |
Andrew Morton | 3aa24f5 | 2014-10-09 15:25:58 -0700 | [diff] [blame] | 468 | if (err) { |
Vladimir Davydov | 3965fc3 | 2014-01-23 15:52:55 -0800 | [diff] [blame] | 469 | goto out_unlock; |
Andrew Morton | 3aa24f5 | 2014-10-09 15:25:58 -0700 | [diff] [blame] | 470 | } |
Christoph Lameter | 686d550 | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 471 | |
Thomas Garnier | e70954f | 2016-12-12 16:41:38 -0800 | [diff] [blame] | 472 | /* Refuse requests with allocator specific flags */ |
| 473 | if (flags & ~SLAB_FLAGS_PERMITTED) { |
| 474 | err = -EINVAL; |
| 475 | goto out_unlock; |
| 476 | } |
| 477 | |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 478 | /* |
| 479 | * Some allocators will constraint the set of valid flags to a subset |
| 480 | * of all flags. We expect them to define CACHE_CREATE_MASK in this |
| 481 | * case, and we'll just provide them with a sanitized version of the |
| 482 | * passed flags. |
| 483 | */ |
| 484 | flags &= CACHE_CREATE_MASK; |
Christoph Lameter | 686d550 | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 485 | |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 486 | /* Fail closed on bad usersize of useroffset values. */ |
| 487 | if (WARN_ON(!usersize && useroffset) || |
| 488 | WARN_ON(size < usersize || size - usersize < useroffset)) |
| 489 | usersize = useroffset = 0; |
| 490 | |
| 491 | if (!usersize) |
| 492 | s = __kmem_cache_alias(name, size, align, flags, ctor); |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 493 | if (s) |
Vladimir Davydov | 3965fc3 | 2014-01-23 15:52:55 -0800 | [diff] [blame] | 494 | goto out_unlock; |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 495 | |
Andrzej Hajda | 3dec16e | 2015-02-13 14:36:38 -0800 | [diff] [blame] | 496 | cache_name = kstrdup_const(name, GFP_KERNEL); |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 497 | if (!cache_name) { |
| 498 | err = -ENOMEM; |
| 499 | goto out_unlock; |
| 500 | } |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 501 | |
Shakeel Butt | 613a5eb | 2018-04-05 16:21:50 -0700 | [diff] [blame] | 502 | s = create_cache(cache_name, size, |
Vladimir Davydov | c9a77a7 | 2015-11-05 18:45:08 -0800 | [diff] [blame] | 503 | calculate_alignment(flags, align, size), |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 504 | flags, useroffset, usersize, ctor, NULL, NULL); |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 505 | if (IS_ERR(s)) { |
| 506 | err = PTR_ERR(s); |
Andrzej Hajda | 3dec16e | 2015-02-13 14:36:38 -0800 | [diff] [blame] | 507 | kfree_const(cache_name); |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 508 | } |
Vladimir Davydov | 3965fc3 | 2014-01-23 15:52:55 -0800 | [diff] [blame] | 509 | |
| 510 | out_unlock: |
Christoph Lameter | 20cea96 | 2012-07-06 15:25:13 -0500 | [diff] [blame] | 511 | mutex_unlock(&slab_mutex); |
Vladimir Davydov | 03afc0e | 2014-06-04 16:07:20 -0700 | [diff] [blame] | 512 | |
Vladimir Davydov | 05257a1 | 2015-02-12 14:59:01 -0800 | [diff] [blame] | 513 | memcg_put_cache_ids(); |
Vladimir Davydov | 03afc0e | 2014-06-04 16:07:20 -0700 | [diff] [blame] | 514 | put_online_mems(); |
Christoph Lameter | 20cea96 | 2012-07-06 15:25:13 -0500 | [diff] [blame] | 515 | put_online_cpus(); |
| 516 | |
Dave Jones | ba3253c | 2014-01-29 14:05:48 -0800 | [diff] [blame] | 517 | if (err) { |
Christoph Lameter | 686d550 | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 518 | if (flags & SLAB_PANIC) |
| 519 | panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", |
| 520 | name, err); |
| 521 | else { |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 522 | pr_warn("kmem_cache_create(%s) failed with error %d\n", |
Christoph Lameter | 686d550 | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 523 | name, err); |
| 524 | dump_stack(); |
| 525 | } |
Christoph Lameter | 686d550 | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 526 | return NULL; |
| 527 | } |
Christoph Lameter | 039363f | 2012-07-06 15:25:10 -0500 | [diff] [blame] | 528 | return s; |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 529 | } |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 530 | EXPORT_SYMBOL(kmem_cache_create_usercopy); |
| 531 | |
Mike Rapoport | f496990 | 2018-12-06 23:13:00 +0200 | [diff] [blame] | 532 | /** |
| 533 | * kmem_cache_create - Create a cache. |
| 534 | * @name: A string which is used in /proc/slabinfo to identify this cache. |
| 535 | * @size: The size of objects to be created in this cache. |
| 536 | * @align: The required alignment for the objects. |
| 537 | * @flags: SLAB flags |
| 538 | * @ctor: A constructor for the objects. |
| 539 | * |
| 540 | * Cannot be called within a interrupt, but can be interrupted. |
| 541 | * The @ctor is run when new pages are allocated by the cache. |
| 542 | * |
| 543 | * The flags are |
| 544 | * |
| 545 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) |
| 546 | * to catch references to uninitialised memory. |
| 547 | * |
| 548 | * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check |
| 549 | * for buffer overruns. |
| 550 | * |
| 551 | * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware |
| 552 | * cacheline. This can be beneficial if you're counting cycles as closely |
| 553 | * as davem. |
| 554 | * |
| 555 | * Return: a pointer to the cache on success, NULL on failure. |
| 556 | */ |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 557 | struct kmem_cache * |
Alexey Dobriyan | f4957d5 | 2018-04-05 16:20:37 -0700 | [diff] [blame] | 558 | kmem_cache_create(const char *name, unsigned int size, unsigned int align, |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 559 | slab_flags_t flags, void (*ctor)(void *)) |
| 560 | { |
Kees Cook | 6d07d1c | 2017-06-14 16:12:04 -0700 | [diff] [blame] | 561 | return kmem_cache_create_usercopy(name, size, align, flags, 0, 0, |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 562 | ctor); |
| 563 | } |
Christoph Lameter | 039363f | 2012-07-06 15:25:10 -0500 | [diff] [blame] | 564 | EXPORT_SYMBOL(kmem_cache_create); |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 565 | |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 566 | static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) |
Vladimir Davydov | d5b3cf7 | 2015-02-10 14:11:47 -0800 | [diff] [blame] | 567 | { |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 568 | LIST_HEAD(to_destroy); |
Vladimir Davydov | d5b3cf7 | 2015-02-10 14:11:47 -0800 | [diff] [blame] | 569 | struct kmem_cache *s, *s2; |
| 570 | |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 571 | /* |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 572 | * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 573 | * @slab_caches_to_rcu_destroy list. The slab pages are freed |
| 574 | * through RCU and and the associated kmem_cache are dereferenced |
| 575 | * while freeing the pages, so the kmem_caches should be freed only |
| 576 | * after the pending RCU operations are finished. As rcu_barrier() |
| 577 | * is a pretty slow operation, we batch all pending destructions |
| 578 | * asynchronously. |
| 579 | */ |
| 580 | mutex_lock(&slab_mutex); |
| 581 | list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy); |
| 582 | mutex_unlock(&slab_mutex); |
Vladimir Davydov | d5b3cf7 | 2015-02-10 14:11:47 -0800 | [diff] [blame] | 583 | |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 584 | if (list_empty(&to_destroy)) |
| 585 | return; |
| 586 | |
| 587 | rcu_barrier(); |
| 588 | |
| 589 | list_for_each_entry_safe(s, s2, &to_destroy, list) { |
Vladimir Davydov | d5b3cf7 | 2015-02-10 14:11:47 -0800 | [diff] [blame] | 590 | #ifdef SLAB_SUPPORTS_SYSFS |
Tejun Heo | bf5eb3d | 2017-02-22 15:41:11 -0800 | [diff] [blame] | 591 | sysfs_slab_release(s); |
Vladimir Davydov | d5b3cf7 | 2015-02-10 14:11:47 -0800 | [diff] [blame] | 592 | #else |
| 593 | slab_kmem_cache_release(s); |
| 594 | #endif |
| 595 | } |
| 596 | } |
| 597 | |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 598 | static int shutdown_cache(struct kmem_cache *s) |
| 599 | { |
Greg Thelen | f9fa1d9 | 2017-02-24 15:00:05 -0800 | [diff] [blame] | 600 | /* free asan quarantined objects */ |
| 601 | kasan_cache_shutdown(s); |
| 602 | |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 603 | if (__kmem_cache_shutdown(s) != 0) |
| 604 | return -EBUSY; |
| 605 | |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 606 | memcg_unlink_cache(s); |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 607 | list_del(&s->list); |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 608 | |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 609 | if (s->flags & SLAB_TYPESAFE_BY_RCU) { |
Mikulas Patocka | d50d82f | 2018-06-27 23:26:09 -0700 | [diff] [blame] | 610 | #ifdef SLAB_SUPPORTS_SYSFS |
| 611 | sysfs_slab_unlink(s); |
| 612 | #endif |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 613 | list_add_tail(&s->list, &slab_caches_to_rcu_destroy); |
| 614 | schedule_work(&slab_caches_to_rcu_destroy_work); |
| 615 | } else { |
| 616 | #ifdef SLAB_SUPPORTS_SYSFS |
Mikulas Patocka | d50d82f | 2018-06-27 23:26:09 -0700 | [diff] [blame] | 617 | sysfs_slab_unlink(s); |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 618 | sysfs_slab_release(s); |
| 619 | #else |
| 620 | slab_kmem_cache_release(s); |
| 621 | #endif |
| 622 | } |
| 623 | |
| 624 | return 0; |
| 625 | } |
| 626 | |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 627 | #ifdef CONFIG_MEMCG_KMEM |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 628 | /* |
Vladimir Davydov | 776ed0f | 2014-06-04 16:10:02 -0700 | [diff] [blame] | 629 | * memcg_create_kmem_cache - Create a cache for a memory cgroup. |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 630 | * @memcg: The memory cgroup the new cache is for. |
| 631 | * @root_cache: The parent of the new cache. |
| 632 | * |
| 633 | * This function attempts to create a kmem cache that will serve allocation |
| 634 | * requests going from @memcg to @root_cache. The new cache inherits properties |
| 635 | * from its parent. |
| 636 | */ |
Vladimir Davydov | d5b3cf7 | 2015-02-10 14:11:47 -0800 | [diff] [blame] | 637 | void memcg_create_kmem_cache(struct mem_cgroup *memcg, |
| 638 | struct kmem_cache *root_cache) |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 639 | { |
Vladimir Davydov | 3e0350a | 2015-02-10 14:11:44 -0800 | [diff] [blame] | 640 | static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */ |
Michal Hocko | 33398cf | 2015-09-08 15:01:02 -0700 | [diff] [blame] | 641 | struct cgroup_subsys_state *css = &memcg->css; |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 642 | struct memcg_cache_array *arr; |
Vladimir Davydov | bd67314 | 2014-06-04 16:07:40 -0700 | [diff] [blame] | 643 | struct kmem_cache *s = NULL; |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 644 | char *cache_name; |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 645 | int idx; |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 646 | |
| 647 | get_online_cpus(); |
Vladimir Davydov | 03afc0e | 2014-06-04 16:07:20 -0700 | [diff] [blame] | 648 | get_online_mems(); |
| 649 | |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 650 | mutex_lock(&slab_mutex); |
| 651 | |
Vladimir Davydov | 2a4db7e | 2015-02-12 14:59:32 -0800 | [diff] [blame] | 652 | /* |
Johannes Weiner | 567e9ab | 2016-01-20 15:02:24 -0800 | [diff] [blame] | 653 | * The memory cgroup could have been offlined while the cache |
Vladimir Davydov | 2a4db7e | 2015-02-12 14:59:32 -0800 | [diff] [blame] | 654 | * creation work was pending. |
| 655 | */ |
Roman Gushchin | 5703329 | 2019-07-11 20:56:20 -0700 | [diff] [blame] | 656 | if (memcg->kmem_state != KMEM_ONLINE) |
Vladimir Davydov | 2a4db7e | 2015-02-12 14:59:32 -0800 | [diff] [blame] | 657 | goto out_unlock; |
| 658 | |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 659 | idx = memcg_cache_id(memcg); |
| 660 | arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches, |
| 661 | lockdep_is_held(&slab_mutex)); |
| 662 | |
Vladimir Davydov | d5b3cf7 | 2015-02-10 14:11:47 -0800 | [diff] [blame] | 663 | /* |
| 664 | * Since per-memcg caches are created asynchronously on first |
| 665 | * allocation (see memcg_kmem_get_cache()), several threads can try to |
| 666 | * create the same cache, but only one of them may succeed. |
| 667 | */ |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 668 | if (arr->entries[idx]) |
Vladimir Davydov | d5b3cf7 | 2015-02-10 14:11:47 -0800 | [diff] [blame] | 669 | goto out_unlock; |
| 670 | |
Vladimir Davydov | f100836 | 2015-02-12 14:59:29 -0800 | [diff] [blame] | 671 | cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf)); |
Johannes Weiner | 73f576c | 2016-07-20 15:44:57 -0700 | [diff] [blame] | 672 | cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name, |
| 673 | css->serial_nr, memcg_name_buf); |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 674 | if (!cache_name) |
| 675 | goto out_unlock; |
| 676 | |
Vladimir Davydov | c9a77a7 | 2015-11-05 18:45:08 -0800 | [diff] [blame] | 677 | s = create_cache(cache_name, root_cache->object_size, |
Shakeel Butt | 613a5eb | 2018-04-05 16:21:50 -0700 | [diff] [blame] | 678 | root_cache->align, |
Greg Thelen | f773e36 | 2016-11-10 10:46:41 -0800 | [diff] [blame] | 679 | root_cache->flags & CACHE_CREATE_MASK, |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 680 | root_cache->useroffset, root_cache->usersize, |
Greg Thelen | f773e36 | 2016-11-10 10:46:41 -0800 | [diff] [blame] | 681 | root_cache->ctor, memcg, root_cache); |
Vladimir Davydov | d5b3cf7 | 2015-02-10 14:11:47 -0800 | [diff] [blame] | 682 | /* |
| 683 | * If we could not create a memcg cache, do not complain, because |
| 684 | * that's not critical at all as we can always proceed with the root |
| 685 | * cache. |
| 686 | */ |
Vladimir Davydov | bd67314 | 2014-06-04 16:07:40 -0700 | [diff] [blame] | 687 | if (IS_ERR(s)) { |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 688 | kfree(cache_name); |
Vladimir Davydov | d5b3cf7 | 2015-02-10 14:11:47 -0800 | [diff] [blame] | 689 | goto out_unlock; |
Vladimir Davydov | bd67314 | 2014-06-04 16:07:40 -0700 | [diff] [blame] | 690 | } |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 691 | |
Vladimir Davydov | d5b3cf7 | 2015-02-10 14:11:47 -0800 | [diff] [blame] | 692 | /* |
Roman Gushchin | f0a3a24 | 2019-07-11 20:56:27 -0700 | [diff] [blame^] | 693 | * Since readers won't lock (see memcg_kmem_get_cache()), we need a |
Vladimir Davydov | d5b3cf7 | 2015-02-10 14:11:47 -0800 | [diff] [blame] | 694 | * barrier here to ensure nobody will see the kmem_cache partially |
| 695 | * initialized. |
| 696 | */ |
| 697 | smp_wmb(); |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 698 | arr->entries[idx] = s; |
Vladimir Davydov | d5b3cf7 | 2015-02-10 14:11:47 -0800 | [diff] [blame] | 699 | |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 700 | out_unlock: |
| 701 | mutex_unlock(&slab_mutex); |
Vladimir Davydov | 03afc0e | 2014-06-04 16:07:20 -0700 | [diff] [blame] | 702 | |
| 703 | put_online_mems(); |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 704 | put_online_cpus(); |
| 705 | } |
Vladimir Davydov | b852990 | 2014-04-07 15:39:28 -0700 | [diff] [blame] | 706 | |
Roman Gushchin | 0b14e8a | 2019-07-11 20:56:06 -0700 | [diff] [blame] | 707 | static void kmemcg_workfn(struct work_struct *work) |
Tejun Heo | 01fb58b | 2017-02-22 15:41:30 -0800 | [diff] [blame] | 708 | { |
| 709 | struct kmem_cache *s = container_of(work, struct kmem_cache, |
Roman Gushchin | 0b14e8a | 2019-07-11 20:56:06 -0700 | [diff] [blame] | 710 | memcg_params.work); |
Tejun Heo | 01fb58b | 2017-02-22 15:41:30 -0800 | [diff] [blame] | 711 | |
| 712 | get_online_cpus(); |
| 713 | get_online_mems(); |
| 714 | |
| 715 | mutex_lock(&slab_mutex); |
Roman Gushchin | 0b14e8a | 2019-07-11 20:56:06 -0700 | [diff] [blame] | 716 | s->memcg_params.work_fn(s); |
Tejun Heo | 01fb58b | 2017-02-22 15:41:30 -0800 | [diff] [blame] | 717 | mutex_unlock(&slab_mutex); |
| 718 | |
| 719 | put_online_mems(); |
| 720 | put_online_cpus(); |
Tejun Heo | 01fb58b | 2017-02-22 15:41:30 -0800 | [diff] [blame] | 721 | } |
| 722 | |
Roman Gushchin | 0b14e8a | 2019-07-11 20:56:06 -0700 | [diff] [blame] | 723 | static void kmemcg_rcufn(struct rcu_head *head) |
Tejun Heo | 01fb58b | 2017-02-22 15:41:30 -0800 | [diff] [blame] | 724 | { |
| 725 | struct kmem_cache *s = container_of(head, struct kmem_cache, |
Roman Gushchin | 0b14e8a | 2019-07-11 20:56:06 -0700 | [diff] [blame] | 726 | memcg_params.rcu_head); |
Tejun Heo | 01fb58b | 2017-02-22 15:41:30 -0800 | [diff] [blame] | 727 | |
| 728 | /* |
Roman Gushchin | 0b14e8a | 2019-07-11 20:56:06 -0700 | [diff] [blame] | 729 | * We need to grab blocking locks. Bounce to ->work. The |
Tejun Heo | 01fb58b | 2017-02-22 15:41:30 -0800 | [diff] [blame] | 730 | * work item shares the space with the RCU head and can't be |
| 731 | * initialized eariler. |
| 732 | */ |
Roman Gushchin | 0b14e8a | 2019-07-11 20:56:06 -0700 | [diff] [blame] | 733 | INIT_WORK(&s->memcg_params.work, kmemcg_workfn); |
| 734 | queue_work(memcg_kmem_cache_wq, &s->memcg_params.work); |
Tejun Heo | 01fb58b | 2017-02-22 15:41:30 -0800 | [diff] [blame] | 735 | } |
| 736 | |
Roman Gushchin | f0a3a24 | 2019-07-11 20:56:27 -0700 | [diff] [blame^] | 737 | static void kmemcg_cache_shutdown_fn(struct kmem_cache *s) |
| 738 | { |
| 739 | WARN_ON(shutdown_cache(s)); |
| 740 | } |
| 741 | |
| 742 | static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref) |
| 743 | { |
| 744 | struct kmem_cache *s = container_of(percpu_ref, struct kmem_cache, |
| 745 | memcg_params.refcnt); |
| 746 | unsigned long flags; |
| 747 | |
| 748 | spin_lock_irqsave(&memcg_kmem_wq_lock, flags); |
| 749 | if (s->memcg_params.root_cache->memcg_params.dying) |
| 750 | goto unlock; |
| 751 | |
| 752 | s->memcg_params.work_fn = kmemcg_cache_shutdown_fn; |
| 753 | INIT_WORK(&s->memcg_params.work, kmemcg_workfn); |
| 754 | queue_work(memcg_kmem_cache_wq, &s->memcg_params.work); |
| 755 | |
| 756 | unlock: |
| 757 | spin_unlock_irqrestore(&memcg_kmem_wq_lock, flags); |
| 758 | } |
| 759 | |
| 760 | static void kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s) |
| 761 | { |
| 762 | __kmemcg_cache_deactivate_after_rcu(s); |
| 763 | percpu_ref_kill(&s->memcg_params.refcnt); |
| 764 | } |
| 765 | |
Roman Gushchin | 4348669 | 2019-07-11 20:56:09 -0700 | [diff] [blame] | 766 | static void kmemcg_cache_deactivate(struct kmem_cache *s) |
Tejun Heo | 01fb58b | 2017-02-22 15:41:30 -0800 | [diff] [blame] | 767 | { |
Roman Gushchin | f0a3a24 | 2019-07-11 20:56:27 -0700 | [diff] [blame^] | 768 | if (WARN_ON_ONCE(is_root_cache(s))) |
Tejun Heo | 01fb58b | 2017-02-22 15:41:30 -0800 | [diff] [blame] | 769 | return; |
| 770 | |
Roman Gushchin | 4348669 | 2019-07-11 20:56:09 -0700 | [diff] [blame] | 771 | __kmemcg_cache_deactivate(s); |
| 772 | |
Roman Gushchin | 63b02ef | 2019-07-11 20:56:24 -0700 | [diff] [blame] | 773 | /* |
| 774 | * memcg_kmem_wq_lock is used to synchronize memcg_params.dying |
| 775 | * flag and make sure that no new kmem_cache deactivation tasks |
| 776 | * are queued (see flush_memcg_workqueue() ). |
| 777 | */ |
| 778 | spin_lock_irq(&memcg_kmem_wq_lock); |
Shakeel Butt | 92ee383 | 2018-06-14 15:26:27 -0700 | [diff] [blame] | 779 | if (s->memcg_params.root_cache->memcg_params.dying) |
Roman Gushchin | 63b02ef | 2019-07-11 20:56:24 -0700 | [diff] [blame] | 780 | goto unlock; |
Shakeel Butt | 92ee383 | 2018-06-14 15:26:27 -0700 | [diff] [blame] | 781 | |
Roman Gushchin | f0a3a24 | 2019-07-11 20:56:27 -0700 | [diff] [blame^] | 782 | s->memcg_params.work_fn = kmemcg_cache_deactivate_after_rcu; |
Roman Gushchin | 0b14e8a | 2019-07-11 20:56:06 -0700 | [diff] [blame] | 783 | call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn); |
Roman Gushchin | 63b02ef | 2019-07-11 20:56:24 -0700 | [diff] [blame] | 784 | unlock: |
| 785 | spin_unlock_irq(&memcg_kmem_wq_lock); |
Tejun Heo | 01fb58b | 2017-02-22 15:41:30 -0800 | [diff] [blame] | 786 | } |
| 787 | |
Vladimir Davydov | 2a4db7e | 2015-02-12 14:59:32 -0800 | [diff] [blame] | 788 | void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) |
| 789 | { |
| 790 | int idx; |
| 791 | struct memcg_cache_array *arr; |
Vladimir Davydov | d6e0b7f | 2015-02-12 14:59:47 -0800 | [diff] [blame] | 792 | struct kmem_cache *s, *c; |
Vladimir Davydov | 2a4db7e | 2015-02-12 14:59:32 -0800 | [diff] [blame] | 793 | |
| 794 | idx = memcg_cache_id(memcg); |
| 795 | |
Vladimir Davydov | d6e0b7f | 2015-02-12 14:59:47 -0800 | [diff] [blame] | 796 | get_online_cpus(); |
| 797 | get_online_mems(); |
| 798 | |
Vladimir Davydov | 2a4db7e | 2015-02-12 14:59:32 -0800 | [diff] [blame] | 799 | mutex_lock(&slab_mutex); |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 800 | list_for_each_entry(s, &slab_root_caches, root_caches_node) { |
Vladimir Davydov | 2a4db7e | 2015-02-12 14:59:32 -0800 | [diff] [blame] | 801 | arr = rcu_dereference_protected(s->memcg_params.memcg_caches, |
| 802 | lockdep_is_held(&slab_mutex)); |
Vladimir Davydov | d6e0b7f | 2015-02-12 14:59:47 -0800 | [diff] [blame] | 803 | c = arr->entries[idx]; |
| 804 | if (!c) |
| 805 | continue; |
| 806 | |
Roman Gushchin | 4348669 | 2019-07-11 20:56:09 -0700 | [diff] [blame] | 807 | kmemcg_cache_deactivate(c); |
Vladimir Davydov | 2a4db7e | 2015-02-12 14:59:32 -0800 | [diff] [blame] | 808 | arr->entries[idx] = NULL; |
| 809 | } |
| 810 | mutex_unlock(&slab_mutex); |
Vladimir Davydov | d6e0b7f | 2015-02-12 14:59:47 -0800 | [diff] [blame] | 811 | |
| 812 | put_online_mems(); |
| 813 | put_online_cpus(); |
Vladimir Davydov | 2a4db7e | 2015-02-12 14:59:32 -0800 | [diff] [blame] | 814 | } |
| 815 | |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 816 | static int shutdown_memcg_caches(struct kmem_cache *s) |
Vladimir Davydov | d60fdcc | 2015-11-05 18:45:11 -0800 | [diff] [blame] | 817 | { |
| 818 | struct memcg_cache_array *arr; |
| 819 | struct kmem_cache *c, *c2; |
| 820 | LIST_HEAD(busy); |
| 821 | int i; |
| 822 | |
| 823 | BUG_ON(!is_root_cache(s)); |
| 824 | |
| 825 | /* |
| 826 | * First, shutdown active caches, i.e. caches that belong to online |
| 827 | * memory cgroups. |
| 828 | */ |
| 829 | arr = rcu_dereference_protected(s->memcg_params.memcg_caches, |
| 830 | lockdep_is_held(&slab_mutex)); |
| 831 | for_each_memcg_cache_index(i) { |
| 832 | c = arr->entries[i]; |
| 833 | if (!c) |
| 834 | continue; |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 835 | if (shutdown_cache(c)) |
Vladimir Davydov | d60fdcc | 2015-11-05 18:45:11 -0800 | [diff] [blame] | 836 | /* |
| 837 | * The cache still has objects. Move it to a temporary |
| 838 | * list so as not to try to destroy it for a second |
| 839 | * time while iterating over inactive caches below. |
| 840 | */ |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 841 | list_move(&c->memcg_params.children_node, &busy); |
Vladimir Davydov | d60fdcc | 2015-11-05 18:45:11 -0800 | [diff] [blame] | 842 | else |
| 843 | /* |
| 844 | * The cache is empty and will be destroyed soon. Clear |
| 845 | * the pointer to it in the memcg_caches array so that |
| 846 | * it will never be accessed even if the root cache |
| 847 | * stays alive. |
| 848 | */ |
| 849 | arr->entries[i] = NULL; |
| 850 | } |
| 851 | |
| 852 | /* |
| 853 | * Second, shutdown all caches left from memory cgroups that are now |
| 854 | * offline. |
| 855 | */ |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 856 | list_for_each_entry_safe(c, c2, &s->memcg_params.children, |
| 857 | memcg_params.children_node) |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 858 | shutdown_cache(c); |
Vladimir Davydov | d60fdcc | 2015-11-05 18:45:11 -0800 | [diff] [blame] | 859 | |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 860 | list_splice(&busy, &s->memcg_params.children); |
Vladimir Davydov | d60fdcc | 2015-11-05 18:45:11 -0800 | [diff] [blame] | 861 | |
| 862 | /* |
| 863 | * A cache being destroyed must be empty. In particular, this means |
| 864 | * that all per memcg caches attached to it must be empty too. |
| 865 | */ |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 866 | if (!list_empty(&s->memcg_params.children)) |
Vladimir Davydov | d60fdcc | 2015-11-05 18:45:11 -0800 | [diff] [blame] | 867 | return -EBUSY; |
| 868 | return 0; |
| 869 | } |
Shakeel Butt | 92ee383 | 2018-06-14 15:26:27 -0700 | [diff] [blame] | 870 | |
| 871 | static void flush_memcg_workqueue(struct kmem_cache *s) |
| 872 | { |
Roman Gushchin | 63b02ef | 2019-07-11 20:56:24 -0700 | [diff] [blame] | 873 | spin_lock_irq(&memcg_kmem_wq_lock); |
Shakeel Butt | 92ee383 | 2018-06-14 15:26:27 -0700 | [diff] [blame] | 874 | s->memcg_params.dying = true; |
Roman Gushchin | 63b02ef | 2019-07-11 20:56:24 -0700 | [diff] [blame] | 875 | spin_unlock_irq(&memcg_kmem_wq_lock); |
Shakeel Butt | 92ee383 | 2018-06-14 15:26:27 -0700 | [diff] [blame] | 876 | |
| 877 | /* |
Roman Gushchin | 4348669 | 2019-07-11 20:56:09 -0700 | [diff] [blame] | 878 | * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make |
Shakeel Butt | 92ee383 | 2018-06-14 15:26:27 -0700 | [diff] [blame] | 879 | * sure all registered rcu callbacks have been invoked. |
| 880 | */ |
Roman Gushchin | 4348669 | 2019-07-11 20:56:09 -0700 | [diff] [blame] | 881 | rcu_barrier(); |
Shakeel Butt | 92ee383 | 2018-06-14 15:26:27 -0700 | [diff] [blame] | 882 | |
| 883 | /* |
| 884 | * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB |
| 885 | * deactivates the memcg kmem_caches through workqueue. Make sure all |
| 886 | * previous workitems on workqueue are processed. |
| 887 | */ |
| 888 | flush_workqueue(memcg_kmem_cache_wq); |
| 889 | } |
Vladimir Davydov | d60fdcc | 2015-11-05 18:45:11 -0800 | [diff] [blame] | 890 | #else |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 891 | static inline int shutdown_memcg_caches(struct kmem_cache *s) |
Vladimir Davydov | d60fdcc | 2015-11-05 18:45:11 -0800 | [diff] [blame] | 892 | { |
| 893 | return 0; |
| 894 | } |
Shakeel Butt | 92ee383 | 2018-06-14 15:26:27 -0700 | [diff] [blame] | 895 | |
| 896 | static inline void flush_memcg_workqueue(struct kmem_cache *s) |
| 897 | { |
| 898 | } |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 899 | #endif /* CONFIG_MEMCG_KMEM */ |
Vladimir Davydov | 794b124 | 2014-04-07 15:39:26 -0700 | [diff] [blame] | 900 | |
Christoph Lameter | 41a2128 | 2014-05-06 12:50:08 -0700 | [diff] [blame] | 901 | void slab_kmem_cache_release(struct kmem_cache *s) |
| 902 | { |
Dmitry Safonov | 52b4b95 | 2016-02-17 13:11:37 -0800 | [diff] [blame] | 903 | __kmem_cache_release(s); |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 904 | destroy_memcg_params(s); |
Andrzej Hajda | 3dec16e | 2015-02-13 14:36:38 -0800 | [diff] [blame] | 905 | kfree_const(s->name); |
Christoph Lameter | 41a2128 | 2014-05-06 12:50:08 -0700 | [diff] [blame] | 906 | kmem_cache_free(kmem_cache, s); |
| 907 | } |
| 908 | |
Christoph Lameter | 945cf2b | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 909 | void kmem_cache_destroy(struct kmem_cache *s) |
| 910 | { |
Vladimir Davydov | d60fdcc | 2015-11-05 18:45:11 -0800 | [diff] [blame] | 911 | int err; |
Vladimir Davydov | d5b3cf7 | 2015-02-10 14:11:47 -0800 | [diff] [blame] | 912 | |
Sergey Senozhatsky | 3942d29 | 2015-09-08 15:00:50 -0700 | [diff] [blame] | 913 | if (unlikely(!s)) |
| 914 | return; |
| 915 | |
Shakeel Butt | 92ee383 | 2018-06-14 15:26:27 -0700 | [diff] [blame] | 916 | flush_memcg_workqueue(s); |
| 917 | |
Christoph Lameter | 945cf2b | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 918 | get_online_cpus(); |
Vladimir Davydov | 03afc0e | 2014-06-04 16:07:20 -0700 | [diff] [blame] | 919 | get_online_mems(); |
| 920 | |
Christoph Lameter | 945cf2b | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 921 | mutex_lock(&slab_mutex); |
Vladimir Davydov | b852990 | 2014-04-07 15:39:28 -0700 | [diff] [blame] | 922 | |
Christoph Lameter | 945cf2b | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 923 | s->refcount--; |
Vladimir Davydov | b852990 | 2014-04-07 15:39:28 -0700 | [diff] [blame] | 924 | if (s->refcount) |
| 925 | goto out_unlock; |
Christoph Lameter | 945cf2b | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 926 | |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 927 | err = shutdown_memcg_caches(s); |
Vladimir Davydov | d60fdcc | 2015-11-05 18:45:11 -0800 | [diff] [blame] | 928 | if (!err) |
Tejun Heo | 657dc2f | 2017-02-22 15:41:14 -0800 | [diff] [blame] | 929 | err = shutdown_cache(s); |
Vladimir Davydov | b852990 | 2014-04-07 15:39:28 -0700 | [diff] [blame] | 930 | |
Vladimir Davydov | cd918c5 | 2015-11-05 18:45:14 -0800 | [diff] [blame] | 931 | if (err) { |
Joe Perches | 756a025 | 2016-03-17 14:19:47 -0700 | [diff] [blame] | 932 | pr_err("kmem_cache_destroy %s: Slab cache still has objects\n", |
| 933 | s->name); |
Vladimir Davydov | cd918c5 | 2015-11-05 18:45:14 -0800 | [diff] [blame] | 934 | dump_stack(); |
| 935 | } |
Vladimir Davydov | b852990 | 2014-04-07 15:39:28 -0700 | [diff] [blame] | 936 | out_unlock: |
| 937 | mutex_unlock(&slab_mutex); |
Vladimir Davydov | d5b3cf7 | 2015-02-10 14:11:47 -0800 | [diff] [blame] | 938 | |
Vladimir Davydov | 03afc0e | 2014-06-04 16:07:20 -0700 | [diff] [blame] | 939 | put_online_mems(); |
Christoph Lameter | 945cf2b | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 940 | put_online_cpus(); |
| 941 | } |
| 942 | EXPORT_SYMBOL(kmem_cache_destroy); |
| 943 | |
Vladimir Davydov | 03afc0e | 2014-06-04 16:07:20 -0700 | [diff] [blame] | 944 | /** |
| 945 | * kmem_cache_shrink - Shrink a cache. |
| 946 | * @cachep: The cache to shrink. |
| 947 | * |
| 948 | * Releases as many slabs as possible for a cache. |
| 949 | * To help debugging, a zero exit status indicates all slabs were released. |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 950 | * |
| 951 | * Return: %0 if all slabs were released, non-zero otherwise |
Vladimir Davydov | 03afc0e | 2014-06-04 16:07:20 -0700 | [diff] [blame] | 952 | */ |
| 953 | int kmem_cache_shrink(struct kmem_cache *cachep) |
| 954 | { |
| 955 | int ret; |
| 956 | |
| 957 | get_online_cpus(); |
| 958 | get_online_mems(); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 959 | kasan_cache_shrink(cachep); |
Tejun Heo | c9fc586 | 2017-02-22 15:41:27 -0800 | [diff] [blame] | 960 | ret = __kmem_cache_shrink(cachep); |
Vladimir Davydov | 03afc0e | 2014-06-04 16:07:20 -0700 | [diff] [blame] | 961 | put_online_mems(); |
| 962 | put_online_cpus(); |
| 963 | return ret; |
| 964 | } |
| 965 | EXPORT_SYMBOL(kmem_cache_shrink); |
| 966 | |
Denis Kirjanov | fda9012 | 2015-11-05 18:44:59 -0800 | [diff] [blame] | 967 | bool slab_is_available(void) |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 968 | { |
| 969 | return slab_state >= UP; |
| 970 | } |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 971 | |
Christoph Lameter | 45530c4 | 2012-11-28 16:23:07 +0000 | [diff] [blame] | 972 | #ifndef CONFIG_SLOB |
| 973 | /* Create a cache during boot when no slab services are available yet */ |
Alexey Dobriyan | 361d575 | 2018-04-05 16:20:33 -0700 | [diff] [blame] | 974 | void __init create_boot_cache(struct kmem_cache *s, const char *name, |
| 975 | unsigned int size, slab_flags_t flags, |
| 976 | unsigned int useroffset, unsigned int usersize) |
Christoph Lameter | 45530c4 | 2012-11-28 16:23:07 +0000 | [diff] [blame] | 977 | { |
| 978 | int err; |
| 979 | |
| 980 | s->name = name; |
| 981 | s->size = s->object_size = size; |
Christoph Lameter | 4590685 | 2012-11-28 16:23:16 +0000 | [diff] [blame] | 982 | s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size); |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 983 | s->useroffset = useroffset; |
| 984 | s->usersize = usersize; |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 985 | |
| 986 | slab_init_memcg_params(s); |
| 987 | |
Christoph Lameter | 45530c4 | 2012-11-28 16:23:07 +0000 | [diff] [blame] | 988 | err = __kmem_cache_create(s, flags); |
| 989 | |
| 990 | if (err) |
Alexey Dobriyan | 361d575 | 2018-04-05 16:20:33 -0700 | [diff] [blame] | 991 | panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n", |
Christoph Lameter | 45530c4 | 2012-11-28 16:23:07 +0000 | [diff] [blame] | 992 | name, size, err); |
| 993 | |
| 994 | s->refcount = -1; /* Exempt from merging for now */ |
| 995 | } |
| 996 | |
Alexey Dobriyan | 55de8b9 | 2018-04-05 16:20:29 -0700 | [diff] [blame] | 997 | struct kmem_cache *__init create_kmalloc_cache(const char *name, |
| 998 | unsigned int size, slab_flags_t flags, |
| 999 | unsigned int useroffset, unsigned int usersize) |
Christoph Lameter | 45530c4 | 2012-11-28 16:23:07 +0000 | [diff] [blame] | 1000 | { |
| 1001 | struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
| 1002 | |
| 1003 | if (!s) |
| 1004 | panic("Out of memory when creating slab %s\n", name); |
| 1005 | |
David Windsor | 6c0c21a | 2017-06-10 22:50:47 -0400 | [diff] [blame] | 1006 | create_boot_cache(s, name, size, flags, useroffset, usersize); |
Christoph Lameter | 45530c4 | 2012-11-28 16:23:07 +0000 | [diff] [blame] | 1007 | list_add(&s->list, &slab_caches); |
Roman Gushchin | c03914b | 2019-07-11 20:56:02 -0700 | [diff] [blame] | 1008 | memcg_link_cache(s, NULL); |
Christoph Lameter | 45530c4 | 2012-11-28 16:23:07 +0000 | [diff] [blame] | 1009 | s->refcount = 1; |
| 1010 | return s; |
| 1011 | } |
| 1012 | |
Vlastimil Babka | cc252ea | 2018-10-26 15:05:34 -0700 | [diff] [blame] | 1013 | struct kmem_cache * |
| 1014 | kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init; |
Christoph Lameter | 9425c58 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 1015 | EXPORT_SYMBOL(kmalloc_caches); |
| 1016 | |
Christoph Lameter | f97d5f6 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 1017 | /* |
Christoph Lameter | 2c59dd6 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 1018 | * Conversion table for small slabs sizes / 8 to the index in the |
| 1019 | * kmalloc array. This is necessary for slabs < 192 since we have non power |
| 1020 | * of two cache sizes there. The size of larger slabs can be determined using |
| 1021 | * fls. |
| 1022 | */ |
Alexey Dobriyan | d5f8665 | 2018-04-05 16:20:40 -0700 | [diff] [blame] | 1023 | static u8 size_index[24] __ro_after_init = { |
Christoph Lameter | 2c59dd6 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 1024 | 3, /* 8 */ |
| 1025 | 4, /* 16 */ |
| 1026 | 5, /* 24 */ |
| 1027 | 5, /* 32 */ |
| 1028 | 6, /* 40 */ |
| 1029 | 6, /* 48 */ |
| 1030 | 6, /* 56 */ |
| 1031 | 6, /* 64 */ |
| 1032 | 1, /* 72 */ |
| 1033 | 1, /* 80 */ |
| 1034 | 1, /* 88 */ |
| 1035 | 1, /* 96 */ |
| 1036 | 7, /* 104 */ |
| 1037 | 7, /* 112 */ |
| 1038 | 7, /* 120 */ |
| 1039 | 7, /* 128 */ |
| 1040 | 2, /* 136 */ |
| 1041 | 2, /* 144 */ |
| 1042 | 2, /* 152 */ |
| 1043 | 2, /* 160 */ |
| 1044 | 2, /* 168 */ |
| 1045 | 2, /* 176 */ |
| 1046 | 2, /* 184 */ |
| 1047 | 2 /* 192 */ |
| 1048 | }; |
| 1049 | |
Alexey Dobriyan | ac914d0 | 2018-04-05 16:20:44 -0700 | [diff] [blame] | 1050 | static inline unsigned int size_index_elem(unsigned int bytes) |
Christoph Lameter | 2c59dd6 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 1051 | { |
| 1052 | return (bytes - 1) / 8; |
| 1053 | } |
| 1054 | |
| 1055 | /* |
| 1056 | * Find the kmem_cache structure that serves a given size of |
| 1057 | * allocation |
| 1058 | */ |
| 1059 | struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) |
| 1060 | { |
Alexey Dobriyan | d5f8665 | 2018-04-05 16:20:40 -0700 | [diff] [blame] | 1061 | unsigned int index; |
Christoph Lameter | 2c59dd6 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 1062 | |
| 1063 | if (size <= 192) { |
| 1064 | if (!size) |
| 1065 | return ZERO_SIZE_PTR; |
| 1066 | |
| 1067 | index = size_index[size_index_elem(size)]; |
Dmitry Vyukov | 6144847 | 2018-10-26 15:03:12 -0700 | [diff] [blame] | 1068 | } else { |
Yangtao Li | 221d7da | 2018-12-28 00:33:01 -0800 | [diff] [blame] | 1069 | if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE)) |
Dmitry Vyukov | 6144847 | 2018-10-26 15:03:12 -0700 | [diff] [blame] | 1070 | return NULL; |
Christoph Lameter | 2c59dd6 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 1071 | index = fls(size - 1); |
Dmitry Vyukov | 6144847 | 2018-10-26 15:03:12 -0700 | [diff] [blame] | 1072 | } |
Christoph Lameter | 2c59dd6 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 1073 | |
Vlastimil Babka | cc252ea | 2018-10-26 15:05:34 -0700 | [diff] [blame] | 1074 | return kmalloc_caches[kmalloc_type(flags)][index]; |
Christoph Lameter | 2c59dd6 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 1075 | } |
| 1076 | |
| 1077 | /* |
Gavin Guo | 4066c33 | 2015-06-24 16:55:54 -0700 | [diff] [blame] | 1078 | * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time. |
| 1079 | * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is |
| 1080 | * kmalloc-67108864. |
| 1081 | */ |
Vlastimil Babka | af3b5f8 | 2017-02-22 15:41:05 -0800 | [diff] [blame] | 1082 | const struct kmalloc_info_struct kmalloc_info[] __initconst = { |
Gavin Guo | 4066c33 | 2015-06-24 16:55:54 -0700 | [diff] [blame] | 1083 | {NULL, 0}, {"kmalloc-96", 96}, |
| 1084 | {"kmalloc-192", 192}, {"kmalloc-8", 8}, |
| 1085 | {"kmalloc-16", 16}, {"kmalloc-32", 32}, |
| 1086 | {"kmalloc-64", 64}, {"kmalloc-128", 128}, |
| 1087 | {"kmalloc-256", 256}, {"kmalloc-512", 512}, |
Vlastimil Babka | f0d7787 | 2018-10-26 15:05:55 -0700 | [diff] [blame] | 1088 | {"kmalloc-1k", 1024}, {"kmalloc-2k", 2048}, |
| 1089 | {"kmalloc-4k", 4096}, {"kmalloc-8k", 8192}, |
| 1090 | {"kmalloc-16k", 16384}, {"kmalloc-32k", 32768}, |
| 1091 | {"kmalloc-64k", 65536}, {"kmalloc-128k", 131072}, |
| 1092 | {"kmalloc-256k", 262144}, {"kmalloc-512k", 524288}, |
| 1093 | {"kmalloc-1M", 1048576}, {"kmalloc-2M", 2097152}, |
| 1094 | {"kmalloc-4M", 4194304}, {"kmalloc-8M", 8388608}, |
| 1095 | {"kmalloc-16M", 16777216}, {"kmalloc-32M", 33554432}, |
| 1096 | {"kmalloc-64M", 67108864} |
Gavin Guo | 4066c33 | 2015-06-24 16:55:54 -0700 | [diff] [blame] | 1097 | }; |
| 1098 | |
| 1099 | /* |
Daniel Sanders | 34cc699 | 2015-06-24 16:55:57 -0700 | [diff] [blame] | 1100 | * Patch up the size_index table if we have strange large alignment |
| 1101 | * requirements for the kmalloc array. This is only the case for |
| 1102 | * MIPS it seems. The standard arches will not generate any code here. |
| 1103 | * |
| 1104 | * Largest permitted alignment is 256 bytes due to the way we |
| 1105 | * handle the index determination for the smaller caches. |
| 1106 | * |
| 1107 | * Make sure that nothing crazy happens if someone starts tinkering |
| 1108 | * around with ARCH_KMALLOC_MINALIGN |
Christoph Lameter | f97d5f6 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 1109 | */ |
Daniel Sanders | 34cc699 | 2015-06-24 16:55:57 -0700 | [diff] [blame] | 1110 | void __init setup_kmalloc_cache_index_table(void) |
Christoph Lameter | f97d5f6 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 1111 | { |
Alexey Dobriyan | ac914d0 | 2018-04-05 16:20:44 -0700 | [diff] [blame] | 1112 | unsigned int i; |
Christoph Lameter | f97d5f6 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 1113 | |
Christoph Lameter | 2c59dd6 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 1114 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || |
| 1115 | (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); |
| 1116 | |
| 1117 | for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { |
Alexey Dobriyan | ac914d0 | 2018-04-05 16:20:44 -0700 | [diff] [blame] | 1118 | unsigned int elem = size_index_elem(i); |
Christoph Lameter | 2c59dd6 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 1119 | |
| 1120 | if (elem >= ARRAY_SIZE(size_index)) |
| 1121 | break; |
| 1122 | size_index[elem] = KMALLOC_SHIFT_LOW; |
| 1123 | } |
| 1124 | |
| 1125 | if (KMALLOC_MIN_SIZE >= 64) { |
| 1126 | /* |
| 1127 | * The 96 byte size cache is not used if the alignment |
| 1128 | * is 64 byte. |
| 1129 | */ |
| 1130 | for (i = 64 + 8; i <= 96; i += 8) |
| 1131 | size_index[size_index_elem(i)] = 7; |
| 1132 | |
| 1133 | } |
| 1134 | |
| 1135 | if (KMALLOC_MIN_SIZE >= 128) { |
| 1136 | /* |
| 1137 | * The 192 byte sized cache is not used if the alignment |
| 1138 | * is 128 byte. Redirect kmalloc to use the 256 byte cache |
| 1139 | * instead. |
| 1140 | */ |
| 1141 | for (i = 128 + 8; i <= 192; i += 8) |
| 1142 | size_index[size_index_elem(i)] = 8; |
| 1143 | } |
Daniel Sanders | 34cc699 | 2015-06-24 16:55:57 -0700 | [diff] [blame] | 1144 | } |
| 1145 | |
Vlastimil Babka | f0d7787 | 2018-10-26 15:05:55 -0700 | [diff] [blame] | 1146 | static const char * |
| 1147 | kmalloc_cache_name(const char *prefix, unsigned int size) |
| 1148 | { |
| 1149 | |
| 1150 | static const char units[3] = "\0kM"; |
| 1151 | int idx = 0; |
| 1152 | |
| 1153 | while (size >= 1024 && (size % 1024 == 0)) { |
| 1154 | size /= 1024; |
| 1155 | idx++; |
| 1156 | } |
| 1157 | |
| 1158 | return kasprintf(GFP_NOWAIT, "%s-%u%c", prefix, size, units[idx]); |
| 1159 | } |
| 1160 | |
Vlastimil Babka | 1291523 | 2018-10-26 15:05:38 -0700 | [diff] [blame] | 1161 | static void __init |
| 1162 | new_kmalloc_cache(int idx, int type, slab_flags_t flags) |
Christoph Lameter | a9730fc | 2015-06-29 09:28:08 -0500 | [diff] [blame] | 1163 | { |
Vlastimil Babka | 1291523 | 2018-10-26 15:05:38 -0700 | [diff] [blame] | 1164 | const char *name; |
| 1165 | |
| 1166 | if (type == KMALLOC_RECLAIM) { |
| 1167 | flags |= SLAB_RECLAIM_ACCOUNT; |
Vlastimil Babka | f0d7787 | 2018-10-26 15:05:55 -0700 | [diff] [blame] | 1168 | name = kmalloc_cache_name("kmalloc-rcl", |
Vlastimil Babka | 1291523 | 2018-10-26 15:05:38 -0700 | [diff] [blame] | 1169 | kmalloc_info[idx].size); |
| 1170 | BUG_ON(!name); |
| 1171 | } else { |
| 1172 | name = kmalloc_info[idx].name; |
| 1173 | } |
| 1174 | |
| 1175 | kmalloc_caches[type][idx] = create_kmalloc_cache(name, |
David Windsor | 6c0c21a | 2017-06-10 22:50:47 -0400 | [diff] [blame] | 1176 | kmalloc_info[idx].size, flags, 0, |
| 1177 | kmalloc_info[idx].size); |
Christoph Lameter | a9730fc | 2015-06-29 09:28:08 -0500 | [diff] [blame] | 1178 | } |
| 1179 | |
Daniel Sanders | 34cc699 | 2015-06-24 16:55:57 -0700 | [diff] [blame] | 1180 | /* |
| 1181 | * Create the kmalloc array. Some of the regular kmalloc arrays |
| 1182 | * may already have been created because they were needed to |
| 1183 | * enable allocations for slab creation. |
| 1184 | */ |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 1185 | void __init create_kmalloc_caches(slab_flags_t flags) |
Daniel Sanders | 34cc699 | 2015-06-24 16:55:57 -0700 | [diff] [blame] | 1186 | { |
Vlastimil Babka | 1291523 | 2018-10-26 15:05:38 -0700 | [diff] [blame] | 1187 | int i, type; |
Daniel Sanders | 34cc699 | 2015-06-24 16:55:57 -0700 | [diff] [blame] | 1188 | |
Vlastimil Babka | 1291523 | 2018-10-26 15:05:38 -0700 | [diff] [blame] | 1189 | for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) { |
| 1190 | for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { |
| 1191 | if (!kmalloc_caches[type][i]) |
| 1192 | new_kmalloc_cache(i, type, flags); |
Chris Mason | 956e46e | 2013-05-08 15:56:28 -0400 | [diff] [blame] | 1193 | |
Vlastimil Babka | 1291523 | 2018-10-26 15:05:38 -0700 | [diff] [blame] | 1194 | /* |
| 1195 | * Caches that are not of the two-to-the-power-of size. |
| 1196 | * These have to be created immediately after the |
| 1197 | * earlier power of two caches |
| 1198 | */ |
| 1199 | if (KMALLOC_MIN_SIZE <= 32 && i == 6 && |
| 1200 | !kmalloc_caches[type][1]) |
| 1201 | new_kmalloc_cache(1, type, flags); |
| 1202 | if (KMALLOC_MIN_SIZE <= 64 && i == 7 && |
| 1203 | !kmalloc_caches[type][2]) |
| 1204 | new_kmalloc_cache(2, type, flags); |
| 1205 | } |
Christoph Lameter | 8a965b3 | 2013-05-03 18:04:18 +0000 | [diff] [blame] | 1206 | } |
| 1207 | |
Christoph Lameter | f97d5f6 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 1208 | /* Kmalloc array is now usable */ |
| 1209 | slab_state = UP; |
| 1210 | |
Christoph Lameter | f97d5f6 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 1211 | #ifdef CONFIG_ZONE_DMA |
| 1212 | for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) { |
Vlastimil Babka | cc252ea | 2018-10-26 15:05:34 -0700 | [diff] [blame] | 1213 | struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i]; |
Christoph Lameter | f97d5f6 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 1214 | |
| 1215 | if (s) { |
Alexey Dobriyan | 0be7032 | 2018-04-05 16:20:26 -0700 | [diff] [blame] | 1216 | unsigned int size = kmalloc_size(i); |
Vlastimil Babka | f0d7787 | 2018-10-26 15:05:55 -0700 | [diff] [blame] | 1217 | const char *n = kmalloc_cache_name("dma-kmalloc", size); |
Christoph Lameter | f97d5f6 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 1218 | |
| 1219 | BUG_ON(!n); |
Vlastimil Babka | cc252ea | 2018-10-26 15:05:34 -0700 | [diff] [blame] | 1220 | kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache( |
| 1221 | n, size, SLAB_CACHE_DMA | flags, 0, 0); |
Christoph Lameter | f97d5f6 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 1222 | } |
| 1223 | } |
| 1224 | #endif |
| 1225 | } |
Christoph Lameter | 45530c4 | 2012-11-28 16:23:07 +0000 | [diff] [blame] | 1226 | #endif /* !CONFIG_SLOB */ |
| 1227 | |
Vladimir Davydov | cea371f | 2014-06-04 16:07:04 -0700 | [diff] [blame] | 1228 | /* |
| 1229 | * To avoid unnecessary overhead, we pass through large allocation requests |
| 1230 | * directly to the page allocator. We use __GFP_COMP, because we will need to |
| 1231 | * know the allocation order to free the pages properly in kfree. |
| 1232 | */ |
Vladimir Davydov | 5238343 | 2014-06-04 16:06:39 -0700 | [diff] [blame] | 1233 | void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) |
| 1234 | { |
| 1235 | void *ret; |
| 1236 | struct page *page; |
| 1237 | |
| 1238 | flags |= __GFP_COMP; |
Vladimir Davydov | 4949148 | 2016-07-26 15:24:24 -0700 | [diff] [blame] | 1239 | page = alloc_pages(flags, order); |
Vladimir Davydov | 5238343 | 2014-06-04 16:06:39 -0700 | [diff] [blame] | 1240 | ret = page ? page_address(page) : NULL; |
Andrey Konovalov | 0116523 | 2018-12-28 00:29:37 -0800 | [diff] [blame] | 1241 | ret = kasan_kmalloc_large(ret, size, flags); |
Andrey Konovalov | a2f7757 | 2019-02-20 22:19:16 -0800 | [diff] [blame] | 1242 | /* As ret might get tagged, call kmemleak hook after KASAN. */ |
Andrey Konovalov | 5312824 | 2019-02-20 22:19:11 -0800 | [diff] [blame] | 1243 | kmemleak_alloc(ret, size, 1, flags); |
Vladimir Davydov | 5238343 | 2014-06-04 16:06:39 -0700 | [diff] [blame] | 1244 | return ret; |
| 1245 | } |
| 1246 | EXPORT_SYMBOL(kmalloc_order); |
| 1247 | |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 1248 | #ifdef CONFIG_TRACING |
| 1249 | void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) |
| 1250 | { |
| 1251 | void *ret = kmalloc_order(size, flags, order); |
| 1252 | trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); |
| 1253 | return ret; |
| 1254 | } |
| 1255 | EXPORT_SYMBOL(kmalloc_order_trace); |
| 1256 | #endif |
Christoph Lameter | 45530c4 | 2012-11-28 16:23:07 +0000 | [diff] [blame] | 1257 | |
Thomas Garnier | 7c00fce | 2016-07-26 15:21:56 -0700 | [diff] [blame] | 1258 | #ifdef CONFIG_SLAB_FREELIST_RANDOM |
| 1259 | /* Randomize a generic freelist */ |
| 1260 | static void freelist_randomize(struct rnd_state *state, unsigned int *list, |
Alexey Dobriyan | 302d55d | 2018-04-05 16:21:46 -0700 | [diff] [blame] | 1261 | unsigned int count) |
Thomas Garnier | 7c00fce | 2016-07-26 15:21:56 -0700 | [diff] [blame] | 1262 | { |
Thomas Garnier | 7c00fce | 2016-07-26 15:21:56 -0700 | [diff] [blame] | 1263 | unsigned int rand; |
Alexey Dobriyan | 302d55d | 2018-04-05 16:21:46 -0700 | [diff] [blame] | 1264 | unsigned int i; |
Thomas Garnier | 7c00fce | 2016-07-26 15:21:56 -0700 | [diff] [blame] | 1265 | |
| 1266 | for (i = 0; i < count; i++) |
| 1267 | list[i] = i; |
| 1268 | |
| 1269 | /* Fisher-Yates shuffle */ |
| 1270 | for (i = count - 1; i > 0; i--) { |
| 1271 | rand = prandom_u32_state(state); |
| 1272 | rand %= (i + 1); |
| 1273 | swap(list[i], list[rand]); |
| 1274 | } |
| 1275 | } |
| 1276 | |
| 1277 | /* Create a random sequence per cache */ |
| 1278 | int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, |
| 1279 | gfp_t gfp) |
| 1280 | { |
| 1281 | struct rnd_state state; |
| 1282 | |
| 1283 | if (count < 2 || cachep->random_seq) |
| 1284 | return 0; |
| 1285 | |
| 1286 | cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp); |
| 1287 | if (!cachep->random_seq) |
| 1288 | return -ENOMEM; |
| 1289 | |
| 1290 | /* Get best entropy at this stage of boot */ |
| 1291 | prandom_seed_state(&state, get_random_long()); |
| 1292 | |
| 1293 | freelist_randomize(&state, cachep->random_seq, count); |
| 1294 | return 0; |
| 1295 | } |
| 1296 | |
| 1297 | /* Destroy the per-cache random freelist sequence */ |
| 1298 | void cache_random_seq_destroy(struct kmem_cache *cachep) |
| 1299 | { |
| 1300 | kfree(cachep->random_seq); |
| 1301 | cachep->random_seq = NULL; |
| 1302 | } |
| 1303 | #endif /* CONFIG_SLAB_FREELIST_RANDOM */ |
| 1304 | |
Yang Shi | 5b36577 | 2017-11-15 17:32:03 -0800 | [diff] [blame] | 1305 | #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) |
Wanpeng Li | e9b4db2 | 2013-07-04 08:33:24 +0800 | [diff] [blame] | 1306 | #ifdef CONFIG_SLAB |
Joe Perches | 0825a6f | 2018-06-14 15:27:58 -0700 | [diff] [blame] | 1307 | #define SLABINFO_RIGHTS (0600) |
Wanpeng Li | e9b4db2 | 2013-07-04 08:33:24 +0800 | [diff] [blame] | 1308 | #else |
Joe Perches | 0825a6f | 2018-06-14 15:27:58 -0700 | [diff] [blame] | 1309 | #define SLABINFO_RIGHTS (0400) |
Wanpeng Li | e9b4db2 | 2013-07-04 08:33:24 +0800 | [diff] [blame] | 1310 | #endif |
| 1311 | |
Vladimir Davydov | b047501 | 2014-12-10 15:44:19 -0800 | [diff] [blame] | 1312 | static void print_slabinfo_header(struct seq_file *m) |
Glauber Costa | bcee6e2 | 2012-10-19 18:20:26 +0400 | [diff] [blame] | 1313 | { |
| 1314 | /* |
| 1315 | * Output format version, so at least we can change it |
| 1316 | * without _too_ many complaints. |
| 1317 | */ |
| 1318 | #ifdef CONFIG_DEBUG_SLAB |
| 1319 | seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); |
| 1320 | #else |
| 1321 | seq_puts(m, "slabinfo - version: 2.1\n"); |
| 1322 | #endif |
Joe Perches | 756a025 | 2016-03-17 14:19:47 -0700 | [diff] [blame] | 1323 | seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>"); |
Glauber Costa | bcee6e2 | 2012-10-19 18:20:26 +0400 | [diff] [blame] | 1324 | seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); |
| 1325 | seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); |
| 1326 | #ifdef CONFIG_DEBUG_SLAB |
Joe Perches | 756a025 | 2016-03-17 14:19:47 -0700 | [diff] [blame] | 1327 | seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); |
Glauber Costa | bcee6e2 | 2012-10-19 18:20:26 +0400 | [diff] [blame] | 1328 | seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); |
| 1329 | #endif |
| 1330 | seq_putc(m, '\n'); |
| 1331 | } |
| 1332 | |
Vladimir Davydov | 1df3b26 | 2014-12-10 15:42:16 -0800 | [diff] [blame] | 1333 | void *slab_start(struct seq_file *m, loff_t *pos) |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 1334 | { |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 1335 | mutex_lock(&slab_mutex); |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 1336 | return seq_list_start(&slab_root_caches, *pos); |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 1337 | } |
| 1338 | |
Wanpeng Li | 276a243 | 2013-07-08 08:08:28 +0800 | [diff] [blame] | 1339 | void *slab_next(struct seq_file *m, void *p, loff_t *pos) |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 1340 | { |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 1341 | return seq_list_next(p, &slab_root_caches, pos); |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 1342 | } |
| 1343 | |
Wanpeng Li | 276a243 | 2013-07-08 08:08:28 +0800 | [diff] [blame] | 1344 | void slab_stop(struct seq_file *m, void *p) |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 1345 | { |
| 1346 | mutex_unlock(&slab_mutex); |
| 1347 | } |
| 1348 | |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 1349 | static void |
| 1350 | memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info) |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 1351 | { |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 1352 | struct kmem_cache *c; |
| 1353 | struct slabinfo sinfo; |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 1354 | |
| 1355 | if (!is_root_cache(s)) |
| 1356 | return; |
| 1357 | |
Vladimir Davydov | 426589f | 2015-02-12 14:59:23 -0800 | [diff] [blame] | 1358 | for_each_memcg_cache(c, s) { |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 1359 | memset(&sinfo, 0, sizeof(sinfo)); |
| 1360 | get_slabinfo(c, &sinfo); |
| 1361 | |
| 1362 | info->active_slabs += sinfo.active_slabs; |
| 1363 | info->num_slabs += sinfo.num_slabs; |
| 1364 | info->shared_avail += sinfo.shared_avail; |
| 1365 | info->active_objs += sinfo.active_objs; |
| 1366 | info->num_objs += sinfo.num_objs; |
| 1367 | } |
| 1368 | } |
| 1369 | |
Vladimir Davydov | b047501 | 2014-12-10 15:44:19 -0800 | [diff] [blame] | 1370 | static void cache_show(struct kmem_cache *s, struct seq_file *m) |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 1371 | { |
Glauber Costa | 0d7561c | 2012-10-19 18:20:27 +0400 | [diff] [blame] | 1372 | struct slabinfo sinfo; |
| 1373 | |
| 1374 | memset(&sinfo, 0, sizeof(sinfo)); |
| 1375 | get_slabinfo(s, &sinfo); |
| 1376 | |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 1377 | memcg_accumulate_slabinfo(s, &sinfo); |
| 1378 | |
Glauber Costa | 0d7561c | 2012-10-19 18:20:27 +0400 | [diff] [blame] | 1379 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 1380 | cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size, |
Glauber Costa | 0d7561c | 2012-10-19 18:20:27 +0400 | [diff] [blame] | 1381 | sinfo.objects_per_slab, (1 << sinfo.cache_order)); |
| 1382 | |
| 1383 | seq_printf(m, " : tunables %4u %4u %4u", |
| 1384 | sinfo.limit, sinfo.batchcount, sinfo.shared); |
| 1385 | seq_printf(m, " : slabdata %6lu %6lu %6lu", |
| 1386 | sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); |
| 1387 | slabinfo_show_stats(m, s); |
| 1388 | seq_putc(m, '\n'); |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 1389 | } |
| 1390 | |
Vladimir Davydov | 1df3b26 | 2014-12-10 15:42:16 -0800 | [diff] [blame] | 1391 | static int slab_show(struct seq_file *m, void *p) |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 1392 | { |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 1393 | struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node); |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 1394 | |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 1395 | if (p == slab_root_caches.next) |
Vladimir Davydov | 1df3b26 | 2014-12-10 15:42:16 -0800 | [diff] [blame] | 1396 | print_slabinfo_header(m); |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 1397 | cache_show(s, m); |
Vladimir Davydov | b047501 | 2014-12-10 15:44:19 -0800 | [diff] [blame] | 1398 | return 0; |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 1399 | } |
| 1400 | |
Yang Shi | 852d8be | 2017-11-15 17:32:07 -0800 | [diff] [blame] | 1401 | void dump_unreclaimable_slab(void) |
| 1402 | { |
| 1403 | struct kmem_cache *s, *s2; |
| 1404 | struct slabinfo sinfo; |
| 1405 | |
| 1406 | /* |
| 1407 | * Here acquiring slab_mutex is risky since we don't prefer to get |
| 1408 | * sleep in oom path. But, without mutex hold, it may introduce a |
| 1409 | * risk of crash. |
| 1410 | * Use mutex_trylock to protect the list traverse, dump nothing |
| 1411 | * without acquiring the mutex. |
| 1412 | */ |
| 1413 | if (!mutex_trylock(&slab_mutex)) { |
| 1414 | pr_warn("excessive unreclaimable slab but cannot dump stats\n"); |
| 1415 | return; |
| 1416 | } |
| 1417 | |
| 1418 | pr_info("Unreclaimable slab info:\n"); |
| 1419 | pr_info("Name Used Total\n"); |
| 1420 | |
| 1421 | list_for_each_entry_safe(s, s2, &slab_caches, list) { |
| 1422 | if (!is_root_cache(s) || (s->flags & SLAB_RECLAIM_ACCOUNT)) |
| 1423 | continue; |
| 1424 | |
| 1425 | get_slabinfo(s, &sinfo); |
| 1426 | |
| 1427 | if (sinfo.num_objs > 0) |
| 1428 | pr_info("%-17s %10luKB %10luKB\n", cache_name(s), |
| 1429 | (sinfo.active_objs * s->size) / 1024, |
| 1430 | (sinfo.num_objs * s->size) / 1024); |
| 1431 | } |
| 1432 | mutex_unlock(&slab_mutex); |
| 1433 | } |
| 1434 | |
Yang Shi | 5b36577 | 2017-11-15 17:32:03 -0800 | [diff] [blame] | 1435 | #if defined(CONFIG_MEMCG) |
Tejun Heo | bc2791f | 2017-02-22 15:41:21 -0800 | [diff] [blame] | 1436 | void *memcg_slab_start(struct seq_file *m, loff_t *pos) |
Vladimir Davydov | b047501 | 2014-12-10 15:44:19 -0800 | [diff] [blame] | 1437 | { |
Chris Down | aa9694b | 2019-03-05 15:45:52 -0800 | [diff] [blame] | 1438 | struct mem_cgroup *memcg = mem_cgroup_from_seq(m); |
Vladimir Davydov | b047501 | 2014-12-10 15:44:19 -0800 | [diff] [blame] | 1439 | |
Tejun Heo | bc2791f | 2017-02-22 15:41:21 -0800 | [diff] [blame] | 1440 | mutex_lock(&slab_mutex); |
| 1441 | return seq_list_start(&memcg->kmem_caches, *pos); |
| 1442 | } |
| 1443 | |
| 1444 | void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos) |
| 1445 | { |
Chris Down | aa9694b | 2019-03-05 15:45:52 -0800 | [diff] [blame] | 1446 | struct mem_cgroup *memcg = mem_cgroup_from_seq(m); |
Tejun Heo | bc2791f | 2017-02-22 15:41:21 -0800 | [diff] [blame] | 1447 | |
| 1448 | return seq_list_next(p, &memcg->kmem_caches, pos); |
| 1449 | } |
| 1450 | |
| 1451 | void memcg_slab_stop(struct seq_file *m, void *p) |
| 1452 | { |
| 1453 | mutex_unlock(&slab_mutex); |
| 1454 | } |
| 1455 | |
| 1456 | int memcg_slab_show(struct seq_file *m, void *p) |
| 1457 | { |
| 1458 | struct kmem_cache *s = list_entry(p, struct kmem_cache, |
| 1459 | memcg_params.kmem_caches_node); |
Chris Down | aa9694b | 2019-03-05 15:45:52 -0800 | [diff] [blame] | 1460 | struct mem_cgroup *memcg = mem_cgroup_from_seq(m); |
Tejun Heo | bc2791f | 2017-02-22 15:41:21 -0800 | [diff] [blame] | 1461 | |
| 1462 | if (p == memcg->kmem_caches.next) |
Vladimir Davydov | b047501 | 2014-12-10 15:44:19 -0800 | [diff] [blame] | 1463 | print_slabinfo_header(m); |
Tejun Heo | bc2791f | 2017-02-22 15:41:21 -0800 | [diff] [blame] | 1464 | cache_show(s, m); |
Vladimir Davydov | b047501 | 2014-12-10 15:44:19 -0800 | [diff] [blame] | 1465 | return 0; |
| 1466 | } |
| 1467 | #endif |
| 1468 | |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 1469 | /* |
| 1470 | * slabinfo_op - iterator that generates /proc/slabinfo |
| 1471 | * |
| 1472 | * Output layout: |
| 1473 | * cache-name |
| 1474 | * num-active-objs |
| 1475 | * total-objs |
| 1476 | * object size |
| 1477 | * num-active-slabs |
| 1478 | * total-slabs |
| 1479 | * num-pages-per-slab |
| 1480 | * + further values on SMP and with statistics enabled |
| 1481 | */ |
| 1482 | static const struct seq_operations slabinfo_op = { |
Vladimir Davydov | 1df3b26 | 2014-12-10 15:42:16 -0800 | [diff] [blame] | 1483 | .start = slab_start, |
Wanpeng Li | 276a243 | 2013-07-08 08:08:28 +0800 | [diff] [blame] | 1484 | .next = slab_next, |
| 1485 | .stop = slab_stop, |
Vladimir Davydov | 1df3b26 | 2014-12-10 15:42:16 -0800 | [diff] [blame] | 1486 | .show = slab_show, |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 1487 | }; |
| 1488 | |
| 1489 | static int slabinfo_open(struct inode *inode, struct file *file) |
| 1490 | { |
| 1491 | return seq_open(file, &slabinfo_op); |
| 1492 | } |
| 1493 | |
| 1494 | static const struct file_operations proc_slabinfo_operations = { |
| 1495 | .open = slabinfo_open, |
| 1496 | .read = seq_read, |
| 1497 | .write = slabinfo_write, |
| 1498 | .llseek = seq_lseek, |
| 1499 | .release = seq_release, |
| 1500 | }; |
| 1501 | |
| 1502 | static int __init slab_proc_init(void) |
| 1503 | { |
Wanpeng Li | e9b4db2 | 2013-07-04 08:33:24 +0800 | [diff] [blame] | 1504 | proc_create("slabinfo", SLABINFO_RIGHTS, NULL, |
| 1505 | &proc_slabinfo_operations); |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 1506 | return 0; |
| 1507 | } |
| 1508 | module_init(slab_proc_init); |
Yang Shi | 5b36577 | 2017-11-15 17:32:03 -0800 | [diff] [blame] | 1509 | #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */ |
Andrey Ryabinin | 928cec9 | 2014-08-06 16:04:44 -0700 | [diff] [blame] | 1510 | |
| 1511 | static __always_inline void *__do_krealloc(const void *p, size_t new_size, |
| 1512 | gfp_t flags) |
| 1513 | { |
| 1514 | void *ret; |
| 1515 | size_t ks = 0; |
| 1516 | |
| 1517 | if (p) |
| 1518 | ks = ksize(p); |
| 1519 | |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 1520 | if (ks >= new_size) { |
Andrey Konovalov | 0116523 | 2018-12-28 00:29:37 -0800 | [diff] [blame] | 1521 | p = kasan_krealloc((void *)p, new_size, flags); |
Andrey Ryabinin | 928cec9 | 2014-08-06 16:04:44 -0700 | [diff] [blame] | 1522 | return (void *)p; |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 1523 | } |
Andrey Ryabinin | 928cec9 | 2014-08-06 16:04:44 -0700 | [diff] [blame] | 1524 | |
| 1525 | ret = kmalloc_track_caller(new_size, flags); |
| 1526 | if (ret && p) |
| 1527 | memcpy(ret, p, ks); |
| 1528 | |
| 1529 | return ret; |
| 1530 | } |
| 1531 | |
| 1532 | /** |
| 1533 | * __krealloc - like krealloc() but don't free @p. |
| 1534 | * @p: object to reallocate memory for. |
| 1535 | * @new_size: how many bytes of memory are required. |
| 1536 | * @flags: the type of memory to allocate. |
| 1537 | * |
| 1538 | * This function is like krealloc() except it never frees the originally |
| 1539 | * allocated buffer. Use this if you don't want to free the buffer immediately |
| 1540 | * like, for example, with RCU. |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 1541 | * |
| 1542 | * Return: pointer to the allocated memory or %NULL in case of error |
Andrey Ryabinin | 928cec9 | 2014-08-06 16:04:44 -0700 | [diff] [blame] | 1543 | */ |
| 1544 | void *__krealloc(const void *p, size_t new_size, gfp_t flags) |
| 1545 | { |
| 1546 | if (unlikely(!new_size)) |
| 1547 | return ZERO_SIZE_PTR; |
| 1548 | |
| 1549 | return __do_krealloc(p, new_size, flags); |
| 1550 | |
| 1551 | } |
| 1552 | EXPORT_SYMBOL(__krealloc); |
| 1553 | |
| 1554 | /** |
| 1555 | * krealloc - reallocate memory. The contents will remain unchanged. |
| 1556 | * @p: object to reallocate memory for. |
| 1557 | * @new_size: how many bytes of memory are required. |
| 1558 | * @flags: the type of memory to allocate. |
| 1559 | * |
| 1560 | * The contents of the object pointed to are preserved up to the |
| 1561 | * lesser of the new and old sizes. If @p is %NULL, krealloc() |
| 1562 | * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a |
| 1563 | * %NULL pointer, the object pointed to is freed. |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 1564 | * |
| 1565 | * Return: pointer to the allocated memory or %NULL in case of error |
Andrey Ryabinin | 928cec9 | 2014-08-06 16:04:44 -0700 | [diff] [blame] | 1566 | */ |
| 1567 | void *krealloc(const void *p, size_t new_size, gfp_t flags) |
| 1568 | { |
| 1569 | void *ret; |
| 1570 | |
| 1571 | if (unlikely(!new_size)) { |
| 1572 | kfree(p); |
| 1573 | return ZERO_SIZE_PTR; |
| 1574 | } |
| 1575 | |
| 1576 | ret = __do_krealloc(p, new_size, flags); |
Andrey Konovalov | 772a2fa | 2018-12-28 00:30:35 -0800 | [diff] [blame] | 1577 | if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret)) |
Andrey Ryabinin | 928cec9 | 2014-08-06 16:04:44 -0700 | [diff] [blame] | 1578 | kfree(p); |
| 1579 | |
| 1580 | return ret; |
| 1581 | } |
| 1582 | EXPORT_SYMBOL(krealloc); |
| 1583 | |
| 1584 | /** |
| 1585 | * kzfree - like kfree but zero memory |
| 1586 | * @p: object to free memory of |
| 1587 | * |
| 1588 | * The memory of the object @p points to is zeroed before freed. |
| 1589 | * If @p is %NULL, kzfree() does nothing. |
| 1590 | * |
| 1591 | * Note: this function zeroes the whole allocated buffer which can be a good |
| 1592 | * deal bigger than the requested buffer size passed to kmalloc(). So be |
| 1593 | * careful when using this function in performance sensitive code. |
| 1594 | */ |
| 1595 | void kzfree(const void *p) |
| 1596 | { |
| 1597 | size_t ks; |
| 1598 | void *mem = (void *)p; |
| 1599 | |
| 1600 | if (unlikely(ZERO_OR_NULL_PTR(mem))) |
| 1601 | return; |
| 1602 | ks = ksize(mem); |
| 1603 | memset(mem, 0, ks); |
| 1604 | kfree(mem); |
| 1605 | } |
| 1606 | EXPORT_SYMBOL(kzfree); |
| 1607 | |
Marco Elver | 10d1f8c | 2019-07-11 20:54:14 -0700 | [diff] [blame] | 1608 | /** |
| 1609 | * ksize - get the actual amount of memory allocated for a given object |
| 1610 | * @objp: Pointer to the object |
| 1611 | * |
| 1612 | * kmalloc may internally round up allocations and return more memory |
| 1613 | * than requested. ksize() can be used to determine the actual amount of |
| 1614 | * memory allocated. The caller may use this additional memory, even though |
| 1615 | * a smaller amount of memory was initially specified with the kmalloc call. |
| 1616 | * The caller must guarantee that objp points to a valid object previously |
| 1617 | * allocated with either kmalloc() or kmem_cache_alloc(). The object |
| 1618 | * must not be freed during the duration of the call. |
| 1619 | * |
| 1620 | * Return: size of the actual memory used by @objp in bytes |
| 1621 | */ |
| 1622 | size_t ksize(const void *objp) |
| 1623 | { |
Marco Elver | 0d4ca4c | 2019-07-11 20:54:18 -0700 | [diff] [blame] | 1624 | size_t size; |
| 1625 | |
| 1626 | if (WARN_ON_ONCE(!objp)) |
| 1627 | return 0; |
| 1628 | /* |
| 1629 | * We need to check that the pointed to object is valid, and only then |
| 1630 | * unpoison the shadow memory below. We use __kasan_check_read(), to |
| 1631 | * generate a more useful report at the time ksize() is called (rather |
| 1632 | * than later where behaviour is undefined due to potential |
| 1633 | * use-after-free or double-free). |
| 1634 | * |
| 1635 | * If the pointed to memory is invalid we return 0, to avoid users of |
| 1636 | * ksize() writing to and potentially corrupting the memory region. |
| 1637 | * |
| 1638 | * We want to perform the check before __ksize(), to avoid potentially |
| 1639 | * crashing in __ksize() due to accessing invalid metadata. |
| 1640 | */ |
| 1641 | if (unlikely(objp == ZERO_SIZE_PTR) || !__kasan_check_read(objp, 1)) |
| 1642 | return 0; |
| 1643 | |
| 1644 | size = __ksize(objp); |
Marco Elver | 10d1f8c | 2019-07-11 20:54:14 -0700 | [diff] [blame] | 1645 | /* |
| 1646 | * We assume that ksize callers could use whole allocated area, |
| 1647 | * so we need to unpoison this area. |
| 1648 | */ |
| 1649 | kasan_unpoison_shadow(objp, size); |
| 1650 | return size; |
| 1651 | } |
| 1652 | EXPORT_SYMBOL(ksize); |
| 1653 | |
Andrey Ryabinin | 928cec9 | 2014-08-06 16:04:44 -0700 | [diff] [blame] | 1654 | /* Tracepoints definitions. */ |
| 1655 | EXPORT_TRACEPOINT_SYMBOL(kmalloc); |
| 1656 | EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); |
| 1657 | EXPORT_TRACEPOINT_SYMBOL(kmalloc_node); |
| 1658 | EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node); |
| 1659 | EXPORT_TRACEPOINT_SYMBOL(kfree); |
| 1660 | EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free); |
Howard McLauchlan | 4f6923fb | 2018-04-05 16:23:57 -0700 | [diff] [blame] | 1661 | |
| 1662 | int should_failslab(struct kmem_cache *s, gfp_t gfpflags) |
| 1663 | { |
| 1664 | if (__should_failslab(s, gfpflags)) |
| 1665 | return -ENOMEM; |
| 1666 | return 0; |
| 1667 | } |
| 1668 | ALLOW_ERROR_INJECTION(should_failslab, ERRNO); |