Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 3 | * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). |
| 4 | * |
Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 5 | * (C) SGI 2006, Christoph Lameter |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 6 | * Cleaned up and restructured to ease the addition of alternative |
| 7 | * implementations of SLAB allocators. |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 8 | * (C) Linux Foundation 2008-2013 |
| 9 | * Unified interface for all slab allocators |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #ifndef _LINUX_SLAB_H |
| 13 | #define _LINUX_SLAB_H |
| 14 | |
Andrew Morton | 1b1cec4 | 2006-12-06 20:33:22 -0800 | [diff] [blame] | 15 | #include <linux/gfp.h> |
Kees Cook | 49b7f89 | 2018-05-08 12:52:32 -0700 | [diff] [blame] | 16 | #include <linux/overflow.h> |
Andrew Morton | 1b1cec4 | 2006-12-06 20:33:22 -0800 | [diff] [blame] | 17 | #include <linux/types.h> |
Glauber Costa | 1f458cb | 2012-12-18 14:22:50 -0800 | [diff] [blame] | 18 | #include <linux/workqueue.h> |
Roman Gushchin | f0a3a24 | 2019-07-11 20:56:27 -0700 | [diff] [blame] | 19 | #include <linux/percpu-refcount.h> |
Glauber Costa | 1f458cb | 2012-12-18 14:22:50 -0800 | [diff] [blame] | 20 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 22 | /* |
| 23 | * Flags to pass to kmem_cache_create(). |
David Rientjes | 124dee0 | 2015-04-14 15:44:28 -0700 | [diff] [blame] | 24 | * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | */ |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 26 | /* DEBUG: Perform (expensive) checks on alloc/free */ |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 27 | #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 28 | /* DEBUG: Red zone objs in a cache */ |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 29 | #define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U) |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 30 | /* DEBUG: Poison objects */ |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 31 | #define SLAB_POISON ((slab_flags_t __force)0x00000800U) |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 32 | /* Align objs on cache lines */ |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 33 | #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 34 | /* Use GFP_DMA memory */ |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 35 | #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) |
Nicolas Boichat | 6d6ea1e | 2019-03-28 20:43:42 -0700 | [diff] [blame] | 36 | /* Use GFP_DMA32 memory */ |
| 37 | #define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U) |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 38 | /* DEBUG: Store the last owner for bug hunting */ |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 39 | #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 40 | /* Panic if kmem_cache_create() fails */ |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 41 | #define SLAB_PANIC ((slab_flags_t __force)0x00040000U) |
Peter Zijlstra | d7de4c1 | 2008-11-13 20:40:12 +0200 | [diff] [blame] | 42 | /* |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 43 | * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! |
Peter Zijlstra | d7de4c1 | 2008-11-13 20:40:12 +0200 | [diff] [blame] | 44 | * |
| 45 | * This delays freeing the SLAB page by a grace period, it does _NOT_ |
| 46 | * delay object freeing. This means that if you do kmem_cache_free() |
| 47 | * that memory location is free to be reused at any time. Thus it may |
| 48 | * be possible to see another object there in the same RCU grace period. |
| 49 | * |
| 50 | * This feature only ensures the memory location backing the object |
| 51 | * stays valid, the trick to using this is relying on an independent |
| 52 | * object validation pass. Something like: |
| 53 | * |
| 54 | * rcu_read_lock() |
| 55 | * again: |
| 56 | * obj = lockless_lookup(key); |
| 57 | * if (obj) { |
| 58 | * if (!try_get_ref(obj)) // might fail for free objects |
| 59 | * goto again; |
| 60 | * |
| 61 | * if (obj->key != key) { // not the object we expected |
| 62 | * put_ref(obj); |
| 63 | * goto again; |
| 64 | * } |
| 65 | * } |
| 66 | * rcu_read_unlock(); |
| 67 | * |
Joonsoo Kim | 6812670 | 2013-10-24 10:07:42 +0900 | [diff] [blame] | 68 | * This is useful if we need to approach a kernel structure obliquely, |
| 69 | * from its address obtained without the usual locking. We can lock |
| 70 | * the structure to stabilize it and check it's still at the given address, |
| 71 | * only if we can be sure that the memory has not been meanwhile reused |
| 72 | * for some other kind of object (which our subsystem's lock might corrupt). |
| 73 | * |
| 74 | * rcu_read_lock before reading the address, then rcu_read_unlock after |
| 75 | * taking the spinlock within the structure expected at that address. |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 76 | * |
| 77 | * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. |
Peter Zijlstra | d7de4c1 | 2008-11-13 20:40:12 +0200 | [diff] [blame] | 78 | */ |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 79 | /* Defer freeing slabs to RCU */ |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 80 | #define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U) |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 81 | /* Spread some memory over cpuset */ |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 82 | #define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U) |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 83 | /* Trace allocations and frees */ |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 84 | #define SLAB_TRACE ((slab_flags_t __force)0x00200000U) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
Thomas Gleixner | 30327ac | 2008-04-30 00:54:59 -0700 | [diff] [blame] | 86 | /* Flag to prevent checks on free */ |
| 87 | #ifdef CONFIG_DEBUG_OBJECTS |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 88 | # define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U) |
Thomas Gleixner | 30327ac | 2008-04-30 00:54:59 -0700 | [diff] [blame] | 89 | #else |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 90 | # define SLAB_DEBUG_OBJECTS 0 |
Thomas Gleixner | 30327ac | 2008-04-30 00:54:59 -0700 | [diff] [blame] | 91 | #endif |
| 92 | |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 93 | /* Avoid kmemleak tracing */ |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 94 | #define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U) |
Catalin Marinas | d5cff63 | 2009-06-11 13:22:40 +0100 | [diff] [blame] | 95 | |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 96 | /* Fault injection mark */ |
Dmitry Monakhov | 4c13dd3 | 2010-02-26 09:36:12 +0300 | [diff] [blame] | 97 | #ifdef CONFIG_FAILSLAB |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 98 | # define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U) |
Dmitry Monakhov | 4c13dd3 | 2010-02-26 09:36:12 +0300 | [diff] [blame] | 99 | #else |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 100 | # define SLAB_FAILSLAB 0 |
Dmitry Monakhov | 4c13dd3 | 2010-02-26 09:36:12 +0300 | [diff] [blame] | 101 | #endif |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 102 | /* Account to memcg */ |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 103 | #ifdef CONFIG_MEMCG_KMEM |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 104 | # define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U) |
Vladimir Davydov | 230e9fc | 2016-01-14 15:18:15 -0800 | [diff] [blame] | 105 | #else |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 106 | # define SLAB_ACCOUNT 0 |
Vladimir Davydov | 230e9fc | 2016-01-14 15:18:15 -0800 | [diff] [blame] | 107 | #endif |
Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 108 | |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 109 | #ifdef CONFIG_KASAN |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 110 | #define SLAB_KASAN ((slab_flags_t __force)0x08000000U) |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 111 | #else |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 112 | #define SLAB_KASAN 0 |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 113 | #endif |
| 114 | |
Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 115 | /* The following flags affect the page allocator grouping pages by mobility */ |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 116 | /* Objects are reclaimable */ |
Alexey Dobriyan | 4fd0b46 | 2017-11-15 17:32:21 -0800 | [diff] [blame] | 117 | #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) |
Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 118 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ |
Waiman Long | fcf8a1e | 2019-07-11 20:56:38 -0700 | [diff] [blame^] | 119 | |
| 120 | /* Slab deactivation flag */ |
| 121 | #define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U) |
| 122 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 123 | /* |
Christoph Lameter | 6cb8f91 | 2007-07-17 04:03:22 -0700 | [diff] [blame] | 124 | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. |
| 125 | * |
| 126 | * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. |
| 127 | * |
| 128 | * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. |
| 129 | * Both make kfree a no-op. |
| 130 | */ |
| 131 | #define ZERO_SIZE_PTR ((void *)16) |
| 132 | |
Roland Dreier | 1d4ec7b | 2007-07-20 12:13:20 -0700 | [diff] [blame] | 133 | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ |
Christoph Lameter | 6cb8f91 | 2007-07-17 04:03:22 -0700 | [diff] [blame] | 134 | (unsigned long)ZERO_SIZE_PTR) |
| 135 | |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 136 | #include <linux/kasan.h> |
Christoph Lameter | 3b0efdf | 2012-06-13 10:24:57 -0500 | [diff] [blame] | 137 | |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 138 | struct mem_cgroup; |
Christoph Lameter | 3b0efdf | 2012-06-13 10:24:57 -0500 | [diff] [blame] | 139 | /* |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 140 | * struct kmem_cache related prototypes |
| 141 | */ |
| 142 | void __init kmem_cache_init(void); |
Denis Kirjanov | fda9012 | 2015-11-05 18:44:59 -0800 | [diff] [blame] | 143 | bool slab_is_available(void); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 144 | |
Kees Cook | 2d891fb | 2017-11-30 13:04:32 -0800 | [diff] [blame] | 145 | extern bool usercopy_fallback; |
| 146 | |
Alexey Dobriyan | f4957d5 | 2018-04-05 16:20:37 -0700 | [diff] [blame] | 147 | struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, |
| 148 | unsigned int align, slab_flags_t flags, |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 149 | void (*ctor)(void *)); |
| 150 | struct kmem_cache *kmem_cache_create_usercopy(const char *name, |
Alexey Dobriyan | f4957d5 | 2018-04-05 16:20:37 -0700 | [diff] [blame] | 151 | unsigned int size, unsigned int align, |
| 152 | slab_flags_t flags, |
Alexey Dobriyan | 7bbdb81 | 2018-04-05 16:21:31 -0700 | [diff] [blame] | 153 | unsigned int useroffset, unsigned int usersize, |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 154 | void (*ctor)(void *)); |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 155 | void kmem_cache_destroy(struct kmem_cache *); |
| 156 | int kmem_cache_shrink(struct kmem_cache *); |
Vladimir Davydov | 2a4db7e | 2015-02-12 14:59:32 -0800 | [diff] [blame] | 157 | |
| 158 | void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); |
Roman Gushchin | fb2f2b0 | 2019-07-11 20:56:34 -0700 | [diff] [blame] | 159 | void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | |
Christoph Lameter | 0a31bd5 | 2007-05-06 14:49:57 -0700 | [diff] [blame] | 161 | /* |
| 162 | * Please use this macro to create slab caches. Simply specify the |
| 163 | * name of the structure and maybe some flags that are listed above. |
| 164 | * |
| 165 | * The alignment of the struct determines object alignment. If you |
| 166 | * f.e. add ____cacheline_aligned_in_smp to the struct declaration |
| 167 | * then the objects will be properly aligned in SMP configurations. |
| 168 | */ |
David Windsor | 8eb8284 | 2017-06-10 22:50:28 -0400 | [diff] [blame] | 169 | #define KMEM_CACHE(__struct, __flags) \ |
| 170 | kmem_cache_create(#__struct, sizeof(struct __struct), \ |
| 171 | __alignof__(struct __struct), (__flags), NULL) |
| 172 | |
| 173 | /* |
| 174 | * To whitelist a single field for copying to/from usercopy, use this |
| 175 | * macro instead for KMEM_CACHE() above. |
| 176 | */ |
| 177 | #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ |
| 178 | kmem_cache_create_usercopy(#__struct, \ |
| 179 | sizeof(struct __struct), \ |
| 180 | __alignof__(struct __struct), (__flags), \ |
| 181 | offsetof(struct __struct, __field), \ |
| 182 | sizeof_field(struct __struct, __field), NULL) |
Christoph Lameter | 0a31bd5 | 2007-05-06 14:49:57 -0700 | [diff] [blame] | 183 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 184 | /* |
Christoph Lameter | 3450466 | 2013-01-10 19:00:53 +0000 | [diff] [blame] | 185 | * Common kmalloc functions provided by all allocators |
| 186 | */ |
| 187 | void * __must_check __krealloc(const void *, size_t, gfp_t); |
| 188 | void * __must_check krealloc(const void *, size_t, gfp_t); |
| 189 | void kfree(const void *); |
| 190 | void kzfree(const void *); |
Marco Elver | 10d1f8c | 2019-07-11 20:54:14 -0700 | [diff] [blame] | 191 | size_t __ksize(const void *); |
Christoph Lameter | 3450466 | 2013-01-10 19:00:53 +0000 | [diff] [blame] | 192 | size_t ksize(const void *); |
| 193 | |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 194 | #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 195 | void __check_heap_object(const void *ptr, unsigned long n, struct page *page, |
| 196 | bool to_user); |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 197 | #else |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 198 | static inline void __check_heap_object(const void *ptr, unsigned long n, |
| 199 | struct page *page, bool to_user) { } |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 200 | #endif |
| 201 | |
Christoph Lameter | c601fd6 | 2013-02-05 16:36:47 +0000 | [diff] [blame] | 202 | /* |
| 203 | * Some archs want to perform DMA into kmalloc caches and need a guaranteed |
| 204 | * alignment larger than the alignment of a 64-bit integer. |
| 205 | * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. |
| 206 | */ |
| 207 | #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 |
| 208 | #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN |
| 209 | #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN |
| 210 | #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) |
| 211 | #else |
| 212 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) |
| 213 | #endif |
| 214 | |
Christoph Lameter | 3450466 | 2013-01-10 19:00:53 +0000 | [diff] [blame] | 215 | /* |
Rasmus Villemoes | 94a58c3 | 2015-11-20 15:56:48 -0800 | [diff] [blame] | 216 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. |
| 217 | * Intended for arches that get misalignment faults even for 64 bit integer |
| 218 | * aligned buffers. |
| 219 | */ |
| 220 | #ifndef ARCH_SLAB_MINALIGN |
| 221 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) |
| 222 | #endif |
| 223 | |
| 224 | /* |
| 225 | * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned |
| 226 | * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN |
| 227 | * aligned pointers. |
| 228 | */ |
| 229 | #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) |
| 230 | #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) |
| 231 | #define __assume_page_alignment __assume_aligned(PAGE_SIZE) |
| 232 | |
| 233 | /* |
Christoph Lameter | 95a05b4 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 234 | * Kmalloc array related definitions |
| 235 | */ |
| 236 | |
| 237 | #ifdef CONFIG_SLAB |
| 238 | /* |
| 239 | * The largest kmalloc size supported by the SLAB allocators is |
Christoph Lameter | 0aa817f | 2007-05-16 22:11:01 -0700 | [diff] [blame] | 240 | * 32 megabyte (2^25) or the maximum allocatable page order if that is |
| 241 | * less than 32 MB. |
| 242 | * |
| 243 | * WARNING: Its not easy to increase this value since the allocators have |
| 244 | * to do various tricks to work around compiler limitations in order to |
| 245 | * ensure proper constant folding. |
| 246 | */ |
Christoph Lameter | debee07 | 2007-06-23 17:16:43 -0700 | [diff] [blame] | 247 | #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ |
| 248 | (MAX_ORDER + PAGE_SHIFT - 1) : 25) |
Christoph Lameter | 95a05b4 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 249 | #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH |
Christoph Lameter | c601fd6 | 2013-02-05 16:36:47 +0000 | [diff] [blame] | 250 | #ifndef KMALLOC_SHIFT_LOW |
Christoph Lameter | 95a05b4 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 251 | #define KMALLOC_SHIFT_LOW 5 |
Christoph Lameter | c601fd6 | 2013-02-05 16:36:47 +0000 | [diff] [blame] | 252 | #endif |
Christoph Lameter | 069e2b35 | 2013-06-14 19:55:13 +0000 | [diff] [blame] | 253 | #endif |
| 254 | |
| 255 | #ifdef CONFIG_SLUB |
Christoph Lameter | 95a05b4 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 256 | /* |
Dave Hansen | 433a91f | 2014-01-28 14:24:50 -0800 | [diff] [blame] | 257 | * SLUB directly allocates requests fitting in to an order-1 page |
| 258 | * (PAGE_SIZE*2). Larger requests are passed to the page allocator. |
Christoph Lameter | 95a05b4 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 259 | */ |
| 260 | #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) |
Michal Hocko | bb1107f | 2017-01-10 16:57:27 -0800 | [diff] [blame] | 261 | #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) |
Christoph Lameter | c601fd6 | 2013-02-05 16:36:47 +0000 | [diff] [blame] | 262 | #ifndef KMALLOC_SHIFT_LOW |
Christoph Lameter | 95a05b4 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 263 | #define KMALLOC_SHIFT_LOW 3 |
| 264 | #endif |
Christoph Lameter | c601fd6 | 2013-02-05 16:36:47 +0000 | [diff] [blame] | 265 | #endif |
Christoph Lameter | 0aa817f | 2007-05-16 22:11:01 -0700 | [diff] [blame] | 266 | |
Christoph Lameter | 069e2b35 | 2013-06-14 19:55:13 +0000 | [diff] [blame] | 267 | #ifdef CONFIG_SLOB |
| 268 | /* |
Dave Hansen | 433a91f | 2014-01-28 14:24:50 -0800 | [diff] [blame] | 269 | * SLOB passes all requests larger than one page to the page allocator. |
Christoph Lameter | 069e2b35 | 2013-06-14 19:55:13 +0000 | [diff] [blame] | 270 | * No kmalloc array is necessary since objects of different sizes can |
| 271 | * be allocated from the same page. |
| 272 | */ |
Christoph Lameter | 069e2b35 | 2013-06-14 19:55:13 +0000 | [diff] [blame] | 273 | #define KMALLOC_SHIFT_HIGH PAGE_SHIFT |
Michal Hocko | bb1107f | 2017-01-10 16:57:27 -0800 | [diff] [blame] | 274 | #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) |
Christoph Lameter | 069e2b35 | 2013-06-14 19:55:13 +0000 | [diff] [blame] | 275 | #ifndef KMALLOC_SHIFT_LOW |
| 276 | #define KMALLOC_SHIFT_LOW 3 |
| 277 | #endif |
| 278 | #endif |
| 279 | |
Christoph Lameter | 95a05b4 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 280 | /* Maximum allocatable size */ |
| 281 | #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) |
| 282 | /* Maximum size for which we actually use a slab cache */ |
| 283 | #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) |
| 284 | /* Maximum order allocatable via the slab allocagtor */ |
| 285 | #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) |
Christoph Lameter | 0aa817f | 2007-05-16 22:11:01 -0700 | [diff] [blame] | 286 | |
Christoph Lameter | 9081064 | 2011-06-23 09:36:12 -0500 | [diff] [blame] | 287 | /* |
Christoph Lameter | ce6a502 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 288 | * Kmalloc subsystem. |
| 289 | */ |
Christoph Lameter | c601fd6 | 2013-02-05 16:36:47 +0000 | [diff] [blame] | 290 | #ifndef KMALLOC_MIN_SIZE |
Christoph Lameter | 95a05b4 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 291 | #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) |
Christoph Lameter | ce6a502 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 292 | #endif |
Christoph Lameter | ce6a502 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 293 | |
Joonsoo Kim | 24f870d | 2014-03-12 17:06:19 +0900 | [diff] [blame] | 294 | /* |
| 295 | * This restriction comes from byte sized index implementation. |
| 296 | * Page size is normally 2^12 bytes and, in this case, if we want to use |
| 297 | * byte sized index which can represent 2^8 entries, the size of the object |
| 298 | * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. |
| 299 | * If minimum size of kmalloc is less than 16, we use it as minimum object |
| 300 | * size and give up to use byte sized index. |
| 301 | */ |
| 302 | #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ |
| 303 | (KMALLOC_MIN_SIZE) : 16) |
| 304 | |
Vlastimil Babka | 1291523 | 2018-10-26 15:05:38 -0700 | [diff] [blame] | 305 | /* |
| 306 | * Whenever changing this, take care of that kmalloc_type() and |
| 307 | * create_kmalloc_caches() still work as intended. |
| 308 | */ |
Vlastimil Babka | cc252ea | 2018-10-26 15:05:34 -0700 | [diff] [blame] | 309 | enum kmalloc_cache_type { |
| 310 | KMALLOC_NORMAL = 0, |
Vlastimil Babka | 1291523 | 2018-10-26 15:05:38 -0700 | [diff] [blame] | 311 | KMALLOC_RECLAIM, |
Christoph Lameter | 9425c58 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 312 | #ifdef CONFIG_ZONE_DMA |
Vlastimil Babka | cc252ea | 2018-10-26 15:05:34 -0700 | [diff] [blame] | 313 | KMALLOC_DMA, |
Christoph Lameter | 9425c58 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 314 | #endif |
Vlastimil Babka | cc252ea | 2018-10-26 15:05:34 -0700 | [diff] [blame] | 315 | NR_KMALLOC_TYPES |
| 316 | }; |
| 317 | |
| 318 | #ifndef CONFIG_SLOB |
| 319 | extern struct kmem_cache * |
| 320 | kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; |
| 321 | |
| 322 | static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) |
| 323 | { |
Vlastimil Babka | cc252ea | 2018-10-26 15:05:34 -0700 | [diff] [blame] | 324 | #ifdef CONFIG_ZONE_DMA |
Vlastimil Babka | 4e45f71 | 2018-12-28 00:33:17 -0800 | [diff] [blame] | 325 | /* |
| 326 | * The most common case is KMALLOC_NORMAL, so test for it |
| 327 | * with a single branch for both flags. |
| 328 | */ |
| 329 | if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0)) |
| 330 | return KMALLOC_NORMAL; |
Vlastimil Babka | 1291523 | 2018-10-26 15:05:38 -0700 | [diff] [blame] | 331 | |
| 332 | /* |
Vlastimil Babka | 4e45f71 | 2018-12-28 00:33:17 -0800 | [diff] [blame] | 333 | * At least one of the flags has to be set. If both are, __GFP_DMA |
| 334 | * is more important. |
Vlastimil Babka | 1291523 | 2018-10-26 15:05:38 -0700 | [diff] [blame] | 335 | */ |
Vlastimil Babka | 4e45f71 | 2018-12-28 00:33:17 -0800 | [diff] [blame] | 336 | return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM; |
| 337 | #else |
| 338 | return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL; |
| 339 | #endif |
Vlastimil Babka | cc252ea | 2018-10-26 15:05:34 -0700 | [diff] [blame] | 340 | } |
Christoph Lameter | 9425c58 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 341 | |
Christoph Lameter | ce6a502 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 342 | /* |
| 343 | * Figure out which kmalloc slab an allocation of a certain size |
| 344 | * belongs to. |
| 345 | * 0 = zero alloc |
| 346 | * 1 = 65 .. 96 bytes |
Rasmus Villemoes | 1ed58b6 | 2015-06-24 16:55:59 -0700 | [diff] [blame] | 347 | * 2 = 129 .. 192 bytes |
| 348 | * n = 2^(n-1)+1 .. 2^n |
Christoph Lameter | ce6a502 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 349 | */ |
Alexey Dobriyan | 36071a2 | 2018-04-05 16:20:22 -0700 | [diff] [blame] | 350 | static __always_inline unsigned int kmalloc_index(size_t size) |
Christoph Lameter | ce6a502 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 351 | { |
| 352 | if (!size) |
| 353 | return 0; |
| 354 | |
| 355 | if (size <= KMALLOC_MIN_SIZE) |
| 356 | return KMALLOC_SHIFT_LOW; |
| 357 | |
| 358 | if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) |
| 359 | return 1; |
| 360 | if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) |
| 361 | return 2; |
| 362 | if (size <= 8) return 3; |
| 363 | if (size <= 16) return 4; |
| 364 | if (size <= 32) return 5; |
| 365 | if (size <= 64) return 6; |
| 366 | if (size <= 128) return 7; |
| 367 | if (size <= 256) return 8; |
| 368 | if (size <= 512) return 9; |
| 369 | if (size <= 1024) return 10; |
| 370 | if (size <= 2 * 1024) return 11; |
| 371 | if (size <= 4 * 1024) return 12; |
| 372 | if (size <= 8 * 1024) return 13; |
| 373 | if (size <= 16 * 1024) return 14; |
| 374 | if (size <= 32 * 1024) return 15; |
| 375 | if (size <= 64 * 1024) return 16; |
| 376 | if (size <= 128 * 1024) return 17; |
| 377 | if (size <= 256 * 1024) return 18; |
| 378 | if (size <= 512 * 1024) return 19; |
| 379 | if (size <= 1024 * 1024) return 20; |
| 380 | if (size <= 2 * 1024 * 1024) return 21; |
| 381 | if (size <= 4 * 1024 * 1024) return 22; |
| 382 | if (size <= 8 * 1024 * 1024) return 23; |
| 383 | if (size <= 16 * 1024 * 1024) return 24; |
| 384 | if (size <= 32 * 1024 * 1024) return 25; |
| 385 | if (size <= 64 * 1024 * 1024) return 26; |
| 386 | BUG(); |
| 387 | |
| 388 | /* Will never be reached. Needed because the compiler may complain */ |
| 389 | return -1; |
| 390 | } |
Christoph Lameter | 069e2b35 | 2013-06-14 19:55:13 +0000 | [diff] [blame] | 391 | #endif /* !CONFIG_SLOB */ |
Christoph Lameter | ce6a502 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 392 | |
Rasmus Villemoes | 48a27055 | 2016-05-19 17:10:55 -0700 | [diff] [blame] | 393 | void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; |
| 394 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; |
Vladimir Davydov | 2a4db7e | 2015-02-12 14:59:32 -0800 | [diff] [blame] | 395 | void kmem_cache_free(struct kmem_cache *, void *); |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 396 | |
Christoph Lameter | 484748f | 2015-09-04 15:45:34 -0700 | [diff] [blame] | 397 | /* |
Jesper Dangaard Brouer | 9f706d6 | 2016-03-15 14:54:03 -0700 | [diff] [blame] | 398 | * Bulk allocation and freeing operations. These are accelerated in an |
Christoph Lameter | 484748f | 2015-09-04 15:45:34 -0700 | [diff] [blame] | 399 | * allocator specific way to avoid taking locks repeatedly or building |
| 400 | * metadata structures unnecessarily. |
| 401 | * |
| 402 | * Note that interrupts must be enabled when calling these functions. |
| 403 | */ |
| 404 | void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); |
Jesper Dangaard Brouer | 865762a | 2015-11-20 15:57:58 -0800 | [diff] [blame] | 405 | int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
Christoph Lameter | 484748f | 2015-09-04 15:45:34 -0700 | [diff] [blame] | 406 | |
Jesper Dangaard Brouer | ca25719 | 2016-03-15 14:54:00 -0700 | [diff] [blame] | 407 | /* |
| 408 | * Caller must not use kfree_bulk() on memory not originally allocated |
| 409 | * by kmalloc(), because the SLOB allocator cannot handle this. |
| 410 | */ |
| 411 | static __always_inline void kfree_bulk(size_t size, void **p) |
| 412 | { |
| 413 | kmem_cache_free_bulk(NULL, size, p); |
| 414 | } |
| 415 | |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 416 | #ifdef CONFIG_NUMA |
Rasmus Villemoes | 48a27055 | 2016-05-19 17:10:55 -0700 | [diff] [blame] | 417 | void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; |
| 418 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 419 | #else |
| 420 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) |
| 421 | { |
| 422 | return __kmalloc(size, flags); |
| 423 | } |
| 424 | |
| 425 | static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) |
| 426 | { |
| 427 | return kmem_cache_alloc(s, flags); |
| 428 | } |
| 429 | #endif |
| 430 | |
| 431 | #ifdef CONFIG_TRACING |
Rasmus Villemoes | 48a27055 | 2016-05-19 17:10:55 -0700 | [diff] [blame] | 432 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc; |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 433 | |
| 434 | #ifdef CONFIG_NUMA |
| 435 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, |
| 436 | gfp_t gfpflags, |
Rasmus Villemoes | 48a27055 | 2016-05-19 17:10:55 -0700 | [diff] [blame] | 437 | int node, size_t size) __assume_slab_alignment __malloc; |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 438 | #else |
| 439 | static __always_inline void * |
| 440 | kmem_cache_alloc_node_trace(struct kmem_cache *s, |
| 441 | gfp_t gfpflags, |
| 442 | int node, size_t size) |
| 443 | { |
| 444 | return kmem_cache_alloc_trace(s, gfpflags, size); |
| 445 | } |
| 446 | #endif /* CONFIG_NUMA */ |
| 447 | |
| 448 | #else /* CONFIG_TRACING */ |
| 449 | static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, |
| 450 | gfp_t flags, size_t size) |
| 451 | { |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 452 | void *ret = kmem_cache_alloc(s, flags); |
| 453 | |
Andrey Konovalov | 0116523 | 2018-12-28 00:29:37 -0800 | [diff] [blame] | 454 | ret = kasan_kmalloc(s, ret, size, flags); |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 455 | return ret; |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 456 | } |
| 457 | |
| 458 | static __always_inline void * |
| 459 | kmem_cache_alloc_node_trace(struct kmem_cache *s, |
| 460 | gfp_t gfpflags, |
| 461 | int node, size_t size) |
| 462 | { |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 463 | void *ret = kmem_cache_alloc_node(s, gfpflags, node); |
| 464 | |
Andrey Konovalov | 0116523 | 2018-12-28 00:29:37 -0800 | [diff] [blame] | 465 | ret = kasan_kmalloc(s, ret, size, gfpflags); |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 466 | return ret; |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 467 | } |
| 468 | #endif /* CONFIG_TRACING */ |
| 469 | |
Rasmus Villemoes | 48a27055 | 2016-05-19 17:10:55 -0700 | [diff] [blame] | 470 | extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 471 | |
| 472 | #ifdef CONFIG_TRACING |
Rasmus Villemoes | 48a27055 | 2016-05-19 17:10:55 -0700 | [diff] [blame] | 473 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 474 | #else |
| 475 | static __always_inline void * |
| 476 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) |
| 477 | { |
| 478 | return kmalloc_order(size, flags, order); |
| 479 | } |
Christoph Lameter | ce6a502 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 480 | #endif |
| 481 | |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 482 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
| 483 | { |
| 484 | unsigned int order = get_order(size); |
| 485 | return kmalloc_order_trace(size, flags, order); |
| 486 | } |
| 487 | |
| 488 | /** |
| 489 | * kmalloc - allocate memory |
| 490 | * @size: how many bytes of memory are required. |
Randy Dunlap | 7e3528c | 2013-11-22 18:14:38 -0800 | [diff] [blame] | 491 | * @flags: the type of memory to allocate. |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 492 | * |
| 493 | * kmalloc is the normal method of allocating memory |
| 494 | * for objects smaller than page size in the kernel. |
Randy Dunlap | 7e3528c | 2013-11-22 18:14:38 -0800 | [diff] [blame] | 495 | * |
Mike Rapoport | 01598ba | 2018-11-11 18:48:44 +0200 | [diff] [blame] | 496 | * The @flags argument may be one of the GFP flags defined at |
| 497 | * include/linux/gfp.h and described at |
| 498 | * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>` |
Randy Dunlap | 7e3528c | 2013-11-22 18:14:38 -0800 | [diff] [blame] | 499 | * |
Mike Rapoport | 01598ba | 2018-11-11 18:48:44 +0200 | [diff] [blame] | 500 | * The recommended usage of the @flags is described at |
Jonathan Corbet | 3870a23 | 2018-11-20 09:22:24 -0700 | [diff] [blame] | 501 | * :ref:`Documentation/core-api/memory-allocation.rst <memory-allocation>` |
Randy Dunlap | 7e3528c | 2013-11-22 18:14:38 -0800 | [diff] [blame] | 502 | * |
Mike Rapoport | 01598ba | 2018-11-11 18:48:44 +0200 | [diff] [blame] | 503 | * Below is a brief outline of the most useful GFP flags |
Randy Dunlap | 7e3528c | 2013-11-22 18:14:38 -0800 | [diff] [blame] | 504 | * |
Mike Rapoport | 01598ba | 2018-11-11 18:48:44 +0200 | [diff] [blame] | 505 | * %GFP_KERNEL |
| 506 | * Allocate normal kernel ram. May sleep. |
Randy Dunlap | 7e3528c | 2013-11-22 18:14:38 -0800 | [diff] [blame] | 507 | * |
Mike Rapoport | 01598ba | 2018-11-11 18:48:44 +0200 | [diff] [blame] | 508 | * %GFP_NOWAIT |
| 509 | * Allocation will not sleep. |
Randy Dunlap | 7e3528c | 2013-11-22 18:14:38 -0800 | [diff] [blame] | 510 | * |
Mike Rapoport | 01598ba | 2018-11-11 18:48:44 +0200 | [diff] [blame] | 511 | * %GFP_ATOMIC |
| 512 | * Allocation will not sleep. May use emergency pools. |
Randy Dunlap | 7e3528c | 2013-11-22 18:14:38 -0800 | [diff] [blame] | 513 | * |
Mike Rapoport | 01598ba | 2018-11-11 18:48:44 +0200 | [diff] [blame] | 514 | * %GFP_HIGHUSER |
| 515 | * Allocate memory from high memory on behalf of user. |
Randy Dunlap | 7e3528c | 2013-11-22 18:14:38 -0800 | [diff] [blame] | 516 | * |
| 517 | * Also it is possible to set different flags by OR'ing |
| 518 | * in one or more of the following additional @flags: |
| 519 | * |
Mike Rapoport | 01598ba | 2018-11-11 18:48:44 +0200 | [diff] [blame] | 520 | * %__GFP_HIGH |
| 521 | * This allocation has high priority and may use emergency pools. |
Randy Dunlap | 7e3528c | 2013-11-22 18:14:38 -0800 | [diff] [blame] | 522 | * |
Mike Rapoport | 01598ba | 2018-11-11 18:48:44 +0200 | [diff] [blame] | 523 | * %__GFP_NOFAIL |
| 524 | * Indicate that this allocation is in no way allowed to fail |
| 525 | * (think twice before using). |
Randy Dunlap | 7e3528c | 2013-11-22 18:14:38 -0800 | [diff] [blame] | 526 | * |
Mike Rapoport | 01598ba | 2018-11-11 18:48:44 +0200 | [diff] [blame] | 527 | * %__GFP_NORETRY |
| 528 | * If memory is not immediately available, |
| 529 | * then give up at once. |
Randy Dunlap | 7e3528c | 2013-11-22 18:14:38 -0800 | [diff] [blame] | 530 | * |
Mike Rapoport | 01598ba | 2018-11-11 18:48:44 +0200 | [diff] [blame] | 531 | * %__GFP_NOWARN |
| 532 | * If allocation fails, don't issue any warnings. |
Randy Dunlap | 7e3528c | 2013-11-22 18:14:38 -0800 | [diff] [blame] | 533 | * |
Mike Rapoport | 01598ba | 2018-11-11 18:48:44 +0200 | [diff] [blame] | 534 | * %__GFP_RETRY_MAYFAIL |
| 535 | * Try really hard to succeed the allocation but fail |
| 536 | * eventually. |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 537 | */ |
| 538 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
| 539 | { |
| 540 | if (__builtin_constant_p(size)) { |
Vlastimil Babka | cc252ea | 2018-10-26 15:05:34 -0700 | [diff] [blame] | 541 | #ifndef CONFIG_SLOB |
| 542 | unsigned int index; |
| 543 | #endif |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 544 | if (size > KMALLOC_MAX_CACHE_SIZE) |
| 545 | return kmalloc_large(size, flags); |
| 546 | #ifndef CONFIG_SLOB |
Vlastimil Babka | cc252ea | 2018-10-26 15:05:34 -0700 | [diff] [blame] | 547 | index = kmalloc_index(size); |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 548 | |
Vlastimil Babka | cc252ea | 2018-10-26 15:05:34 -0700 | [diff] [blame] | 549 | if (!index) |
| 550 | return ZERO_SIZE_PTR; |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 551 | |
Vlastimil Babka | cc252ea | 2018-10-26 15:05:34 -0700 | [diff] [blame] | 552 | return kmem_cache_alloc_trace( |
| 553 | kmalloc_caches[kmalloc_type(flags)][index], |
| 554 | flags, size); |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 555 | #endif |
| 556 | } |
| 557 | return __kmalloc(size, flags); |
| 558 | } |
| 559 | |
Christoph Lameter | ce6a502 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 560 | /* |
| 561 | * Determine size used for the nth kmalloc cache. |
| 562 | * return size or 0 if a kmalloc cache for that |
| 563 | * size does not exist |
| 564 | */ |
Alexey Dobriyan | 0be7032 | 2018-04-05 16:20:26 -0700 | [diff] [blame] | 565 | static __always_inline unsigned int kmalloc_size(unsigned int n) |
Christoph Lameter | ce6a502 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 566 | { |
Christoph Lameter | 069e2b35 | 2013-06-14 19:55:13 +0000 | [diff] [blame] | 567 | #ifndef CONFIG_SLOB |
Christoph Lameter | ce6a502 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 568 | if (n > 2) |
Alexey Dobriyan | 0be7032 | 2018-04-05 16:20:26 -0700 | [diff] [blame] | 569 | return 1U << n; |
Christoph Lameter | ce6a502 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 570 | |
| 571 | if (n == 1 && KMALLOC_MIN_SIZE <= 32) |
| 572 | return 96; |
| 573 | |
| 574 | if (n == 2 && KMALLOC_MIN_SIZE <= 64) |
| 575 | return 192; |
Christoph Lameter | 069e2b35 | 2013-06-14 19:55:13 +0000 | [diff] [blame] | 576 | #endif |
Christoph Lameter | ce6a502 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 577 | return 0; |
| 578 | } |
Christoph Lameter | ce6a502 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 579 | |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 580 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
| 581 | { |
| 582 | #ifndef CONFIG_SLOB |
| 583 | if (__builtin_constant_p(size) && |
Vlastimil Babka | cc252ea | 2018-10-26 15:05:34 -0700 | [diff] [blame] | 584 | size <= KMALLOC_MAX_CACHE_SIZE) { |
Alexey Dobriyan | 36071a2 | 2018-04-05 16:20:22 -0700 | [diff] [blame] | 585 | unsigned int i = kmalloc_index(size); |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 586 | |
| 587 | if (!i) |
| 588 | return ZERO_SIZE_PTR; |
| 589 | |
Vlastimil Babka | cc252ea | 2018-10-26 15:05:34 -0700 | [diff] [blame] | 590 | return kmem_cache_alloc_node_trace( |
| 591 | kmalloc_caches[kmalloc_type(flags)][i], |
Christoph Lameter | f1b6eb6 | 2013-09-04 16:35:34 +0000 | [diff] [blame] | 592 | flags, node, size); |
| 593 | } |
| 594 | #endif |
| 595 | return __kmalloc_node(size, flags, node); |
| 596 | } |
| 597 | |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 598 | struct memcg_cache_array { |
| 599 | struct rcu_head rcu; |
| 600 | struct kmem_cache *entries[0]; |
| 601 | }; |
| 602 | |
Christoph Lameter | 0aa817f | 2007-05-16 22:11:01 -0700 | [diff] [blame] | 603 | /* |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 604 | * This is the main placeholder for memcg-related information in kmem caches. |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 605 | * Both the root cache and the child caches will have it. For the root cache, |
| 606 | * this will hold a dynamically allocated array large enough to hold |
Vladimir Davydov | f857026 | 2014-01-23 15:53:06 -0800 | [diff] [blame] | 607 | * information about the currently limited memcgs in the system. To allow the |
| 608 | * array to be accessed without taking any locks, on relocation we free the old |
| 609 | * version only after a grace period. |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 610 | * |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 611 | * Root and child caches hold different metadata. |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 612 | * |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 613 | * @root_cache: Common to root and child caches. NULL for root, pointer to |
| 614 | * the root cache for children. |
Vladimir Davydov | 426589f | 2015-02-12 14:59:23 -0800 | [diff] [blame] | 615 | * |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 616 | * The following fields are specific to root caches. |
| 617 | * |
| 618 | * @memcg_caches: kmemcg ID indexed table of child caches. This table is |
| 619 | * used to index child cachces during allocation and cleared |
| 620 | * early during shutdown. |
| 621 | * |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 622 | * @root_caches_node: List node for slab_root_caches list. |
| 623 | * |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 624 | * @children: List of all child caches. While the child caches are also |
| 625 | * reachable through @memcg_caches, a child cache remains on |
| 626 | * this list until it is actually destroyed. |
| 627 | * |
| 628 | * The following fields are specific to child caches. |
| 629 | * |
| 630 | * @memcg: Pointer to the memcg this cache belongs to. |
| 631 | * |
| 632 | * @children_node: List node for @root_cache->children list. |
Tejun Heo | bc2791f | 2017-02-22 15:41:21 -0800 | [diff] [blame] | 633 | * |
| 634 | * @kmem_caches_node: List node for @memcg->kmem_caches list. |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 635 | */ |
| 636 | struct memcg_cache_params { |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 637 | struct kmem_cache *root_cache; |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 638 | union { |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 639 | struct { |
| 640 | struct memcg_cache_array __rcu *memcg_caches; |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 641 | struct list_head __root_caches_node; |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 642 | struct list_head children; |
Shakeel Butt | 92ee383 | 2018-06-14 15:26:27 -0700 | [diff] [blame] | 643 | bool dying; |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 644 | }; |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 645 | struct { |
| 646 | struct mem_cgroup *memcg; |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 647 | struct list_head children_node; |
Tejun Heo | bc2791f | 2017-02-22 15:41:21 -0800 | [diff] [blame] | 648 | struct list_head kmem_caches_node; |
Roman Gushchin | f0a3a24 | 2019-07-11 20:56:27 -0700 | [diff] [blame] | 649 | struct percpu_ref refcnt; |
Tejun Heo | 01fb58b | 2017-02-22 15:41:30 -0800 | [diff] [blame] | 650 | |
Roman Gushchin | 0b14e8a | 2019-07-11 20:56:06 -0700 | [diff] [blame] | 651 | void (*work_fn)(struct kmem_cache *); |
Tejun Heo | 01fb58b | 2017-02-22 15:41:30 -0800 | [diff] [blame] | 652 | union { |
Roman Gushchin | 0b14e8a | 2019-07-11 20:56:06 -0700 | [diff] [blame] | 653 | struct rcu_head rcu_head; |
| 654 | struct work_struct work; |
Tejun Heo | 01fb58b | 2017-02-22 15:41:30 -0800 | [diff] [blame] | 655 | }; |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 656 | }; |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 657 | }; |
| 658 | }; |
| 659 | |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 660 | int memcg_update_all_caches(int num_memcgs); |
| 661 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 662 | /** |
Michael Opdenacker | e7efa61 | 2013-06-25 18:16:55 +0200 | [diff] [blame] | 663 | * kmalloc_array - allocate memory for an array. |
| 664 | * @n: number of elements. |
| 665 | * @size: element size. |
| 666 | * @flags: the type of memory to allocate (see kmalloc). |
Paul Drynoff | 800590f | 2006-06-23 02:03:48 -0700 | [diff] [blame] | 667 | */ |
Xi Wang | a820372 | 2012-03-05 15:14:41 -0800 | [diff] [blame] | 668 | static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 | { |
Kees Cook | 49b7f89 | 2018-05-08 12:52:32 -0700 | [diff] [blame] | 670 | size_t bytes; |
| 671 | |
| 672 | if (unlikely(check_mul_overflow(n, size, &bytes))) |
Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 673 | return NULL; |
Alexey Dobriyan | 91c6a05 | 2016-07-26 15:22:08 -0700 | [diff] [blame] | 674 | if (__builtin_constant_p(n) && __builtin_constant_p(size)) |
Kees Cook | 49b7f89 | 2018-05-08 12:52:32 -0700 | [diff] [blame] | 675 | return kmalloc(bytes, flags); |
| 676 | return __kmalloc(bytes, flags); |
Xi Wang | a820372 | 2012-03-05 15:14:41 -0800 | [diff] [blame] | 677 | } |
| 678 | |
| 679 | /** |
| 680 | * kcalloc - allocate memory for an array. The memory is set to zero. |
| 681 | * @n: number of elements. |
| 682 | * @size: element size. |
| 683 | * @flags: the type of memory to allocate (see kmalloc). |
| 684 | */ |
| 685 | static inline void *kcalloc(size_t n, size_t size, gfp_t flags) |
| 686 | { |
| 687 | return kmalloc_array(n, size, flags | __GFP_ZERO); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | } |
| 689 | |
Christoph Hellwig | 1d2c8ee | 2006-10-04 02:15:25 -0700 | [diff] [blame] | 690 | /* |
| 691 | * kmalloc_track_caller is a special version of kmalloc that records the |
| 692 | * calling function of the routine calling it for slab leak tracking instead |
| 693 | * of just the calling function (confusing, eh?). |
| 694 | * It's useful when the call to kmalloc comes from a widely-used standard |
| 695 | * allocator where we care about the real place the memory allocation |
| 696 | * request comes from. |
| 697 | */ |
Eduard - Gabriel Munteanu | ce71e27 | 2008-08-19 20:43:25 +0300 | [diff] [blame] | 698 | extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); |
Christoph Hellwig | 1d2c8ee | 2006-10-04 02:15:25 -0700 | [diff] [blame] | 699 | #define kmalloc_track_caller(size, flags) \ |
Eduard - Gabriel Munteanu | ce71e27 | 2008-08-19 20:43:25 +0300 | [diff] [blame] | 700 | __kmalloc_track_caller(size, flags, _RET_IP_) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | |
Johannes Thumshirn | 5799b25 | 2017-11-15 17:32:29 -0800 | [diff] [blame] | 702 | static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, |
| 703 | int node) |
| 704 | { |
Kees Cook | 49b7f89 | 2018-05-08 12:52:32 -0700 | [diff] [blame] | 705 | size_t bytes; |
| 706 | |
| 707 | if (unlikely(check_mul_overflow(n, size, &bytes))) |
Johannes Thumshirn | 5799b25 | 2017-11-15 17:32:29 -0800 | [diff] [blame] | 708 | return NULL; |
| 709 | if (__builtin_constant_p(n) && __builtin_constant_p(size)) |
Kees Cook | 49b7f89 | 2018-05-08 12:52:32 -0700 | [diff] [blame] | 710 | return kmalloc_node(bytes, flags, node); |
| 711 | return __kmalloc_node(bytes, flags, node); |
Johannes Thumshirn | 5799b25 | 2017-11-15 17:32:29 -0800 | [diff] [blame] | 712 | } |
| 713 | |
| 714 | static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) |
| 715 | { |
| 716 | return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); |
| 717 | } |
| 718 | |
| 719 | |
Manfred Spraul | 97e2bde | 2005-05-01 08:58:38 -0700 | [diff] [blame] | 720 | #ifdef CONFIG_NUMA |
Eduard - Gabriel Munteanu | ce71e27 | 2008-08-19 20:43:25 +0300 | [diff] [blame] | 721 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); |
Christoph Hellwig | 8b98c16 | 2006-12-06 20:32:30 -0800 | [diff] [blame] | 722 | #define kmalloc_node_track_caller(size, flags, node) \ |
| 723 | __kmalloc_node_track_caller(size, flags, node, \ |
Eduard - Gabriel Munteanu | ce71e27 | 2008-08-19 20:43:25 +0300 | [diff] [blame] | 724 | _RET_IP_) |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 725 | |
Christoph Hellwig | 8b98c16 | 2006-12-06 20:32:30 -0800 | [diff] [blame] | 726 | #else /* CONFIG_NUMA */ |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 727 | |
| 728 | #define kmalloc_node_track_caller(size, flags, node) \ |
| 729 | kmalloc_track_caller(size, flags) |
| 730 | |
Pascal Terjan | dfcd361 | 2008-11-25 15:08:19 +0100 | [diff] [blame] | 731 | #endif /* CONFIG_NUMA */ |
Christoph Hellwig | 8b98c16 | 2006-12-06 20:32:30 -0800 | [diff] [blame] | 732 | |
Christoph Lameter | 81cda66 | 2007-07-17 04:03:29 -0700 | [diff] [blame] | 733 | /* |
| 734 | * Shortcuts |
| 735 | */ |
| 736 | static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) |
| 737 | { |
| 738 | return kmem_cache_alloc(k, flags | __GFP_ZERO); |
| 739 | } |
| 740 | |
| 741 | /** |
| 742 | * kzalloc - allocate memory. The memory is set to zero. |
| 743 | * @size: how many bytes of memory are required. |
| 744 | * @flags: the type of memory to allocate (see kmalloc). |
| 745 | */ |
| 746 | static inline void *kzalloc(size_t size, gfp_t flags) |
| 747 | { |
| 748 | return kmalloc(size, flags | __GFP_ZERO); |
| 749 | } |
| 750 | |
Jeff Layton | 979b0fe | 2008-06-05 22:47:00 -0700 | [diff] [blame] | 751 | /** |
| 752 | * kzalloc_node - allocate zeroed memory from a particular memory node. |
| 753 | * @size: how many bytes of memory are required. |
| 754 | * @flags: the type of memory to allocate (see kmalloc). |
| 755 | * @node: memory node from which to allocate |
| 756 | */ |
| 757 | static inline void *kzalloc_node(size_t size, gfp_t flags, int node) |
| 758 | { |
| 759 | return kmalloc_node(size, flags | __GFP_ZERO, node); |
| 760 | } |
| 761 | |
Joonsoo Kim | 07f361b | 2014-10-09 15:26:00 -0700 | [diff] [blame] | 762 | unsigned int kmem_cache_size(struct kmem_cache *s); |
Pekka Enberg | 7e85ee0 | 2009-06-12 14:03:06 +0300 | [diff] [blame] | 763 | void __init kmem_cache_init_late(void); |
| 764 | |
Sebastian Andrzej Siewior | 6731d4f | 2016-08-23 14:53:19 +0200 | [diff] [blame] | 765 | #if defined(CONFIG_SMP) && defined(CONFIG_SLAB) |
| 766 | int slab_prepare_cpu(unsigned int cpu); |
| 767 | int slab_dead_cpu(unsigned int cpu); |
| 768 | #else |
| 769 | #define slab_prepare_cpu NULL |
| 770 | #define slab_dead_cpu NULL |
| 771 | #endif |
| 772 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | #endif /* _LINUX_SLAB_H */ |