blob: d3608d15fbe4c92968cc9a41977bf6d4d5f426d4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/slab.c
3 * Written by Mark Hemment, 1996/97.
4 * (markhe@nextd.demon.co.uk)
5 *
6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7 *
8 * Major cleanup, different bufctl logic, per-cpu arrays
9 * (c) 2000 Manfred Spraul
10 *
11 * Cleanup, make the head arrays unconditional, preparation for NUMA
12 * (c) 2002 Manfred Spraul
13 *
14 * An implementation of the Slab Allocator as described in outline in;
15 * UNIX Internals: The New Frontiers by Uresh Vahalia
16 * Pub: Prentice Hall ISBN 0-13-101908-2
17 * or with a little more detail in;
18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator
19 * Jeff Bonwick (Sun Microsystems).
20 * Presented at: USENIX Summer 1994 Technical Conference
21 *
22 * The memory is organized in caches, one cache for each object type.
23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24 * Each cache consists out of many slabs (they are small (usually one
25 * page long) and always contiguous), and each slab contains multiple
26 * initialized objects.
27 *
28 * This means, that your constructor is used only for newly allocated
Simon Arlott183ff222007-10-20 01:27:18 +020029 * slabs and you must pass objects with the same initializations to
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * kmem_cache_free.
31 *
32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33 * normal). If you need a special memory type, then must create a new
34 * cache for that memory type.
35 *
36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37 * full slabs with 0 free objects
38 * partial slabs
39 * empty slabs with no allocated objects
40 *
41 * If partial slabs exist, then new allocations come from these slabs,
42 * otherwise from empty slabs or new slabs are allocated.
43 *
44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46 *
47 * Each cache has a short per-cpu head array, most allocs
48 * and frees go into that array, and if that array overflows, then 1/2
49 * of the entries in the array are given back into the global cache.
50 * The head array is strictly LIFO and should improve the cache hit rates.
51 * On SMP, it additionally reduces the spinlock operations.
52 *
Andrew Mortona737b3e2006-03-22 00:08:11 -080053 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 * it's changed with a smp_call_function().
55 *
56 * SMP synchronization:
57 * constructors and destructors are called without any locking.
Pekka Enberg343e0d72006-02-01 03:05:50 -080058 * Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 * are accessed without any locking.
60 * The per-cpu arrays are never accessed from the wrong cpu, no locking,
61 * and local interrupts are disabled so slab code is preempt-safe.
62 * The non-constant members are protected with a per-cache irq spinlock.
63 *
64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65 * in 2000 - many ideas in the current implementation are derived from
66 * his patch.
67 *
68 * Further notes from the original documentation:
69 *
70 * 11 April '97. Started multi-threading - markhe
Christoph Lameter18004c52012-07-06 15:25:12 -050071 * The global cache-chain is protected by the mutex 'slab_mutex'.
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()).
75 *
76 * At present, each engine can be growing a cache. This should be blocked.
77 *
Christoph Lametere498be72005-09-09 13:03:32 -070078 * 15 March 2005. NUMA slab allocator.
79 * Shai Fultheim <shai@scalex86.org>.
80 * Shobhit Dayal <shobhit@calsoftinc.com>
81 * Alok N Kataria <alokk@calsoftinc.com>
82 * Christoph Lameter <christoph@lameter.com>
83 *
84 * Modified the slab allocator to be node aware on NUMA systems.
85 * Each node has its own list of partial, free and full slabs.
86 * All object allocations for a node occur from node specific slab lists.
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 */
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089#include <linux/slab.h>
90#include <linux/mm.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070091#include <linux/poison.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070092#include <linux/swap.h>
93#include <linux/cache.h>
94#include <linux/interrupt.h>
95#include <linux/init.h>
96#include <linux/compiler.h>
Paul Jackson101a5002006-03-24 03:16:07 -080097#include <linux/cpuset.h>
Alexey Dobriyana0ec95a2008-10-06 00:59:10 +040098#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#include <linux/seq_file.h>
100#include <linux/notifier.h>
101#include <linux/kallsyms.h>
102#include <linux/cpu.h>
103#include <linux/sysctl.h>
104#include <linux/module.h>
105#include <linux/rcupdate.h>
Paulo Marques543537b2005-06-23 00:09:02 -0700106#include <linux/string.h>
Andrew Morton138ae662006-12-06 20:36:41 -0800107#include <linux/uaccess.h>
Christoph Lametere498be72005-09-09 13:03:32 -0700108#include <linux/nodemask.h>
Catalin Marinasd5cff632009-06-11 13:22:40 +0100109#include <linux/kmemleak.h>
Christoph Lameterdc85da12006-01-18 17:42:36 -0800110#include <linux/mempolicy.h>
Ingo Molnarfc0abb12006-01-18 17:42:33 -0800111#include <linux/mutex.h>
Akinobu Mita8a8b6502006-12-08 02:39:44 -0800112#include <linux/fault-inject.h>
Ingo Molnare7eebaf2006-06-27 02:54:55 -0700113#include <linux/rtmutex.h>
Eric Dumazet6a2d7a92006-12-13 00:34:27 -0800114#include <linux/reciprocal_div.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700115#include <linux/debugobjects.h>
Pekka Enbergc175eea2008-05-09 20:35:53 +0200116#include <linux/kmemcheck.h>
David Rientjes8f9f8d92010-03-27 19:40:47 -0700117#include <linux/memory.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -0700118#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Mel Gorman381760e2012-07-31 16:44:30 -0700120#include <net/sock.h>
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122#include <asm/cacheflush.h>
123#include <asm/tlbflush.h>
124#include <asm/page.h>
125
Steven Rostedt4dee6b62012-01-09 17:15:42 -0500126#include <trace/events/kmem.h>
127
Mel Gorman072bb0a2012-07-31 16:43:58 -0700128#include "internal.h"
129
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800130#include "slab.h"
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132/*
Christoph Lameter50953fe2007-05-06 14:50:16 -0700133 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 * 0 for faster, smaller code (especially in the critical paths).
135 *
136 * STATS - 1 to collect stats for /proc/slabinfo.
137 * 0 for faster, smaller code (especially in the critical paths).
138 *
139 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
140 */
141
142#ifdef CONFIG_DEBUG_SLAB
143#define DEBUG 1
144#define STATS 1
145#define FORCED_DEBUG 1
146#else
147#define DEBUG 0
148#define STATS 0
149#define FORCED_DEBUG 0
150#endif
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152/* Shouldn't this be in a header file somewhere? */
153#define BYTES_PER_WORD sizeof(void *)
David Woodhouse87a927c2007-07-04 21:26:44 -0400154#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156#ifndef ARCH_KMALLOC_FLAGS
157#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
158#endif
159
Joonsoo Kimf315e3f2013-12-02 17:49:41 +0900160#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
161 <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
162
163#if FREELIST_BYTE_INDEX
164typedef unsigned char freelist_idx_t;
165#else
166typedef unsigned short freelist_idx_t;
167#endif
168
David Miller30321c72014-05-05 16:20:04 -0400169#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
Joonsoo Kimf315e3f2013-12-02 17:49:41 +0900170
Mel Gorman072bb0a2012-07-31 16:43:58 -0700171/*
172 * true if a page was allocated from pfmemalloc reserves for network-based
173 * swap
174 */
175static bool pfmemalloc_active __read_mostly;
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 * struct array_cache
179 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 * Purpose:
181 * - LIFO ordering, to hand out cache-warm objects from _alloc
182 * - reduce the number of linked list operations
183 * - reduce spinlock operations
184 *
185 * The limit is stored in the per-cpu structure to reduce the data cache
186 * footprint.
187 *
188 */
189struct array_cache {
190 unsigned int avail;
191 unsigned int limit;
192 unsigned int batchcount;
193 unsigned int touched;
Robert P. J. Daybda5b652007-10-16 23:30:05 -0700194 void *entry[]; /*
Andrew Mortona737b3e2006-03-22 00:08:11 -0800195 * Must have this definition in here for the proper
196 * alignment of array_cache. Also simplifies accessing
197 * the entries.
Mel Gorman072bb0a2012-07-31 16:43:58 -0700198 *
199 * Entries should not be directly dereferenced as
200 * entries belonging to slabs marked pfmemalloc will
201 * have the lower bits set SLAB_OBJ_PFMEMALLOC
Andrew Mortona737b3e2006-03-22 00:08:11 -0800202 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203};
204
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700205struct alien_cache {
206 spinlock_t lock;
207 struct array_cache ac;
208};
209
Mel Gorman072bb0a2012-07-31 16:43:58 -0700210#define SLAB_OBJ_PFMEMALLOC 1
211static inline bool is_obj_pfmemalloc(void *objp)
212{
213 return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
214}
215
216static inline void set_obj_pfmemalloc(void **objp)
217{
218 *objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
219 return;
220}
221
222static inline void clear_obj_pfmemalloc(void **objp)
223{
224 *objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
225}
226
Andrew Mortona737b3e2006-03-22 00:08:11 -0800227/*
Christoph Lametere498be72005-09-09 13:03:32 -0700228 * Need this for bootstrapping a per node allocator.
229 */
Joonsoo Kimbf0dea22014-10-09 15:26:27 -0700230#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000231static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
Christoph Lametere498be72005-09-09 13:03:32 -0700232#define CACHE_CACHE 0
Joonsoo Kimbf0dea22014-10-09 15:26:27 -0700233#define SIZE_NODE (MAX_NUMNODES)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
Christoph Lametered11d9e2006-06-30 01:55:45 -0700235static int drain_freelist(struct kmem_cache *cache,
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000236 struct kmem_cache_node *n, int tofree);
Christoph Lametered11d9e2006-06-30 01:55:45 -0700237static void free_block(struct kmem_cache *cachep, void **objpp, int len,
Joonsoo Kim97654df2014-08-06 16:04:25 -0700238 int node, struct list_head *list);
239static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
Pekka Enberg83b519e2009-06-10 19:40:04 +0300240static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
David Howells65f27f32006-11-22 14:55:48 +0000241static void cache_reap(struct work_struct *unused);
Christoph Lametered11d9e2006-06-30 01:55:45 -0700242
Ingo Molnare0a42722006-06-23 02:03:46 -0700243static int slab_early_init = 1;
244
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000245#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
Christoph Lametere498be72005-09-09 13:03:32 -0700246
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000247static void kmem_cache_node_init(struct kmem_cache_node *parent)
Christoph Lametere498be72005-09-09 13:03:32 -0700248{
249 INIT_LIST_HEAD(&parent->slabs_full);
250 INIT_LIST_HEAD(&parent->slabs_partial);
251 INIT_LIST_HEAD(&parent->slabs_free);
252 parent->shared = NULL;
253 parent->alien = NULL;
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -0800254 parent->colour_next = 0;
Christoph Lametere498be72005-09-09 13:03:32 -0700255 spin_lock_init(&parent->list_lock);
256 parent->free_objects = 0;
257 parent->free_touched = 0;
258}
259
Andrew Mortona737b3e2006-03-22 00:08:11 -0800260#define MAKE_LIST(cachep, listp, slab, nodeid) \
261 do { \
262 INIT_LIST_HEAD(listp); \
Christoph Lameter18bf8542014-08-06 16:04:11 -0700263 list_splice(&get_node(cachep, nodeid)->slab, listp); \
Christoph Lametere498be72005-09-09 13:03:32 -0700264 } while (0)
265
Andrew Mortona737b3e2006-03-22 00:08:11 -0800266#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
267 do { \
Christoph Lametere498be72005-09-09 13:03:32 -0700268 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
269 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
270 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
271 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273#define CFLGS_OFF_SLAB (0x80000000UL)
274#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
275
276#define BATCHREFILL_LIMIT 16
Andrew Mortona737b3e2006-03-22 00:08:11 -0800277/*
278 * Optimization question: fewer reaps means less probability for unnessary
279 * cpucache drain/refill cycles.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 *
Adrian Bunkdc6f3f22005-11-08 16:44:08 +0100281 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 * which could lock up otherwise freeable slabs.
283 */
Jianyu Zhan5f0985b2014-03-30 17:02:20 +0800284#define REAPTIMEOUT_AC (2*HZ)
285#define REAPTIMEOUT_NODE (4*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287#if STATS
288#define STATS_INC_ACTIVE(x) ((x)->num_active++)
289#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
290#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
291#define STATS_INC_GROWN(x) ((x)->grown++)
Christoph Lametered11d9e2006-06-30 01:55:45 -0700292#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
Andrew Mortona737b3e2006-03-22 00:08:11 -0800293#define STATS_SET_HIGH(x) \
294 do { \
295 if ((x)->num_active > (x)->high_mark) \
296 (x)->high_mark = (x)->num_active; \
297 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298#define STATS_INC_ERR(x) ((x)->errors++)
299#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
Christoph Lametere498be72005-09-09 13:03:32 -0700300#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -0700301#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
Andrew Mortona737b3e2006-03-22 00:08:11 -0800302#define STATS_SET_FREEABLE(x, i) \
303 do { \
304 if ((x)->max_freeable < i) \
305 (x)->max_freeable = i; \
306 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
308#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
309#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
310#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
311#else
312#define STATS_INC_ACTIVE(x) do { } while (0)
313#define STATS_DEC_ACTIVE(x) do { } while (0)
314#define STATS_INC_ALLOCED(x) do { } while (0)
315#define STATS_INC_GROWN(x) do { } while (0)
Andi Kleen4e60c862010-08-09 17:19:03 -0700316#define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317#define STATS_SET_HIGH(x) do { } while (0)
318#define STATS_INC_ERR(x) do { } while (0)
319#define STATS_INC_NODEALLOCS(x) do { } while (0)
Christoph Lametere498be72005-09-09 13:03:32 -0700320#define STATS_INC_NODEFREES(x) do { } while (0)
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -0700321#define STATS_INC_ACOVERFLOW(x) do { } while (0)
Andrew Mortona737b3e2006-03-22 00:08:11 -0800322#define STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323#define STATS_INC_ALLOCHIT(x) do { } while (0)
324#define STATS_INC_ALLOCMISS(x) do { } while (0)
325#define STATS_INC_FREEHIT(x) do { } while (0)
326#define STATS_INC_FREEMISS(x) do { } while (0)
327#endif
328
329#if DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
Andrew Mortona737b3e2006-03-22 00:08:11 -0800331/*
332 * memory layout of objects:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 * 0 : objp
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800334 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 * the end of an object is aligned with the end of the real
336 * allocation. Catches writes behind the end of the allocation.
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800337 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 * redzone word.
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800339 * cachep->obj_offset: The real object.
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500340 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
341 * cachep->size - 1* BYTES_PER_WORD: last caller address
Andrew Mortona737b3e2006-03-22 00:08:11 -0800342 * [BYTES_PER_WORD long]
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 */
Pekka Enberg343e0d72006-02-01 03:05:50 -0800344static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345{
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800346 return cachep->obj_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347}
348
David Woodhouseb46b8f12007-05-08 00:22:59 -0700349static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350{
351 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
David Woodhouseb46b8f12007-05-08 00:22:59 -0700352 return (unsigned long long*) (objp + obj_offset(cachep) -
353 sizeof(unsigned long long));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354}
355
David Woodhouseb46b8f12007-05-08 00:22:59 -0700356static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357{
358 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
359 if (cachep->flags & SLAB_STORE_USER)
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500360 return (unsigned long long *)(objp + cachep->size -
David Woodhouseb46b8f12007-05-08 00:22:59 -0700361 sizeof(unsigned long long) -
David Woodhouse87a927c2007-07-04 21:26:44 -0400362 REDZONE_ALIGN);
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500363 return (unsigned long long *) (objp + cachep->size -
David Woodhouseb46b8f12007-05-08 00:22:59 -0700364 sizeof(unsigned long long));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365}
366
Pekka Enberg343e0d72006-02-01 03:05:50 -0800367static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368{
369 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500370 return (void **)(objp + cachep->size - BYTES_PER_WORD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371}
372
373#else
374
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800375#define obj_offset(x) 0
David Woodhouseb46b8f12007-05-08 00:22:59 -0700376#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
377#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
379
380#endif
381
Joonsoo Kim03787302014-06-23 13:22:06 -0700382#ifdef CONFIG_DEBUG_SLAB_LEAK
383
Joonsoo Kimd31676d2016-03-15 14:54:24 -0700384static inline bool is_store_user_clean(struct kmem_cache *cachep)
Joonsoo Kim03787302014-06-23 13:22:06 -0700385{
Joonsoo Kimd31676d2016-03-15 14:54:24 -0700386 return atomic_read(&cachep->store_user_clean) == 1;
387}
Joonsoo Kim03787302014-06-23 13:22:06 -0700388
Joonsoo Kimd31676d2016-03-15 14:54:24 -0700389static inline void set_store_user_clean(struct kmem_cache *cachep)
390{
391 atomic_set(&cachep->store_user_clean, 1);
392}
Joonsoo Kim03787302014-06-23 13:22:06 -0700393
Joonsoo Kimd31676d2016-03-15 14:54:24 -0700394static inline void set_store_user_dirty(struct kmem_cache *cachep)
395{
396 if (is_store_user_clean(cachep))
397 atomic_set(&cachep->store_user_clean, 0);
Joonsoo Kim03787302014-06-23 13:22:06 -0700398}
399
400#else
Joonsoo Kimd31676d2016-03-15 14:54:24 -0700401static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
Joonsoo Kim03787302014-06-23 13:22:06 -0700402
403#endif
404
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405/*
David Rientjes3df1ccc2011-10-18 22:09:28 -0700406 * Do not go above this order unless 0 objects fit into the slab or
407 * overridden on the command line.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 */
David Rientjes543585c2011-10-18 22:09:24 -0700409#define SLAB_MAX_ORDER_HI 1
410#define SLAB_MAX_ORDER_LO 0
411static int slab_max_order = SLAB_MAX_ORDER_LO;
David Rientjes3df1ccc2011-10-18 22:09:28 -0700412static bool slab_max_order_set __initdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
Pekka Enberg6ed5eb2212006-02-01 03:05:49 -0800414static inline struct kmem_cache *virt_to_cache(const void *obj)
415{
Christoph Lameterb49af682007-05-06 14:49:41 -0700416 struct page *page = virt_to_head_page(obj);
Christoph Lameter35026082012-06-13 10:24:56 -0500417 return page->slab_cache;
Pekka Enberg6ed5eb2212006-02-01 03:05:49 -0800418}
419
Joonsoo Kim8456a642013-10-24 10:07:49 +0900420static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
Pekka Enberg8fea4e92006-03-22 00:08:10 -0800421 unsigned int idx)
422{
Joonsoo Kim8456a642013-10-24 10:07:49 +0900423 return page->s_mem + cache->size * idx;
Pekka Enberg8fea4e92006-03-22 00:08:10 -0800424}
425
Eric Dumazet6a2d7a92006-12-13 00:34:27 -0800426/*
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500427 * We want to avoid an expensive divide : (offset / cache->size)
428 * Using the fact that size is a constant for a particular cache,
429 * we can replace (offset / cache->size) by
Eric Dumazet6a2d7a92006-12-13 00:34:27 -0800430 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
431 */
432static inline unsigned int obj_to_index(const struct kmem_cache *cache,
Joonsoo Kim8456a642013-10-24 10:07:49 +0900433 const struct page *page, void *obj)
Pekka Enberg8fea4e92006-03-22 00:08:10 -0800434{
Joonsoo Kim8456a642013-10-24 10:07:49 +0900435 u32 offset = (obj - page->s_mem);
Eric Dumazet6a2d7a92006-12-13 00:34:27 -0800436 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
Pekka Enberg8fea4e92006-03-22 00:08:10 -0800437}
438
Joonsoo Kim6fb92432016-03-15 14:54:09 -0700439#define BOOT_CPUCACHE_ENTRIES 1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440/* internal cache of cache description objs */
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000441static struct kmem_cache kmem_cache_boot = {
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800442 .batchcount = 1,
443 .limit = BOOT_CPUCACHE_ENTRIES,
444 .shared = 1,
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500445 .size = sizeof(struct kmem_cache),
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800446 .name = "kmem_cache",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447};
448
Joonsoo Kimedcad252014-08-08 14:19:15 -0700449#define BAD_ALIEN_MAGIC 0x01020304ul
450
Tejun Heo1871e522009-10-29 22:34:13 +0900451static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
Pekka Enberg343e0d72006-02-01 03:05:50 -0800453static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454{
Joonsoo Kimbf0dea22014-10-09 15:26:27 -0700455 return this_cpu_ptr(cachep->cpu_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456}
457
Andrew Mortona737b3e2006-03-22 00:08:11 -0800458/*
459 * Calculate the number of objects and left-over bytes for a given buffer size.
460 */
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800461static void cache_estimate(unsigned long gfporder, size_t buffer_size,
Joonsoo Kim2e6b3602016-03-15 14:54:30 -0700462 unsigned long flags, size_t *left_over, unsigned int *num)
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800463{
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800464 size_t slab_size = PAGE_SIZE << gfporder;
465
466 /*
467 * The slab management structure can be either off the slab or
468 * on it. For the latter case, the memory allocated for a
469 * slab is used for:
470 *
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800471 * - @buffer_size bytes for each object
Joonsoo Kim2e6b3602016-03-15 14:54:30 -0700472 * - One freelist_idx_t for each object
473 *
474 * We don't need to consider alignment of freelist because
475 * freelist will be at the end of slab page. The objects will be
476 * at the correct alignment.
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800477 *
478 * If the slab management structure is off the slab, then the
479 * alignment will already be calculated into the size. Because
480 * the slabs are all pages aligned, the objects will be at the
481 * correct alignment when allocated.
482 */
483 if (flags & CFLGS_OFF_SLAB) {
Joonsoo Kim2e6b3602016-03-15 14:54:30 -0700484 *num = slab_size / buffer_size;
485 *left_over = slab_size % buffer_size;
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800486 } else {
Joonsoo Kim2e6b3602016-03-15 14:54:30 -0700487 *num = slab_size / (buffer_size + sizeof(freelist_idx_t));
488 *left_over = slab_size %
489 (buffer_size + sizeof(freelist_idx_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491}
492
Christoph Lameterf28510d2012-09-11 19:49:38 +0000493#if DEBUG
Harvey Harrisond40cee22008-04-30 00:55:07 -0700494#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
Andrew Mortona737b3e2006-03-22 00:08:11 -0800496static void __slab_error(const char *function, struct kmem_cache *cachep,
497 char *msg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498{
499 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800500 function, cachep->name, msg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 dump_stack();
Rusty Russell373d4d02013-01-21 17:17:39 +1030502 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503}
Christoph Lameterf28510d2012-09-11 19:49:38 +0000504#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
Paul Menage3395ee02006-12-06 20:32:16 -0800506/*
507 * By default on NUMA we use alien caches to stage the freeing of
508 * objects allocated from other nodes. This causes massive memory
509 * inefficiencies when using fake NUMA setup to split memory into a
510 * large number of small nodes, so it can be disabled on the command
511 * line
512 */
513
514static int use_alien_caches __read_mostly = 1;
515static int __init noaliencache_setup(char *s)
516{
517 use_alien_caches = 0;
518 return 1;
519}
520__setup("noaliencache", noaliencache_setup);
521
David Rientjes3df1ccc2011-10-18 22:09:28 -0700522static int __init slab_max_order_setup(char *str)
523{
524 get_option(&str, &slab_max_order);
525 slab_max_order = slab_max_order < 0 ? 0 :
526 min(slab_max_order, MAX_ORDER - 1);
527 slab_max_order_set = true;
528
529 return 1;
530}
531__setup("slab_max_order=", slab_max_order_setup);
532
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800533#ifdef CONFIG_NUMA
534/*
535 * Special reaping functions for NUMA systems called from cache_reap().
536 * These take care of doing round robin flushing of alien caches (containing
537 * objects freed on different nodes from which they were allocated) and the
538 * flushing of remote pcps by calling drain_node_pages.
539 */
Tejun Heo1871e522009-10-29 22:34:13 +0900540static DEFINE_PER_CPU(unsigned long, slab_reap_node);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800541
542static void init_reap_node(int cpu)
543{
544 int node;
545
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -0700546 node = next_node(cpu_to_mem(cpu), node_online_map);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800547 if (node == MAX_NUMNODES)
Paul Jackson442295c2006-03-22 00:09:11 -0800548 node = first_node(node_online_map);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800549
Tejun Heo1871e522009-10-29 22:34:13 +0900550 per_cpu(slab_reap_node, cpu) = node;
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800551}
552
553static void next_reap_node(void)
554{
Christoph Lameter909ea962010-12-08 16:22:55 +0100555 int node = __this_cpu_read(slab_reap_node);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800556
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800557 node = next_node(node, node_online_map);
558 if (unlikely(node >= MAX_NUMNODES))
559 node = first_node(node_online_map);
Christoph Lameter909ea962010-12-08 16:22:55 +0100560 __this_cpu_write(slab_reap_node, node);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800561}
562
563#else
564#define init_reap_node(cpu) do { } while (0)
565#define next_reap_node(void) do { } while (0)
566#endif
567
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568/*
569 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
570 * via the workqueue/eventd.
571 * Add the CPU number into the expiration time to minimize the possibility of
572 * the CPUs getting into lockstep and contending for the global cache chain
573 * lock.
574 */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400575static void start_cpu_timer(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576{
Tejun Heo1871e522009-10-29 22:34:13 +0900577 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
579 /*
580 * When this gets called from do_initcalls via cpucache_init(),
581 * init_workqueues() has already run, so keventd will be setup
582 * at that time.
583 */
David Howells52bad642006-11-22 14:54:01 +0000584 if (keventd_up() && reap_work->work.func == NULL) {
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800585 init_reap_node(cpu);
Tejun Heo203b42f2012-08-21 13:18:23 -0700586 INIT_DEFERRABLE_WORK(reap_work, cache_reap);
Arjan van de Ven2b284212006-12-10 02:21:28 -0800587 schedule_delayed_work_on(cpu, reap_work,
588 __round_jiffies_relative(HZ, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 }
590}
591
Joonsoo Kim1fe00d52014-08-06 16:04:27 -0700592static void init_arraycache(struct array_cache *ac, int limit, int batch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593{
Catalin Marinasd5cff632009-06-11 13:22:40 +0100594 /*
595 * The array_cache structures contain pointers to free object.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300596 * However, when such objects are allocated or transferred to another
Catalin Marinasd5cff632009-06-11 13:22:40 +0100597 * cache the pointers are not cleared and they could be counted as
598 * valid references during a kmemleak scan. Therefore, kmemleak must
599 * not scan such objects.
600 */
Joonsoo Kim1fe00d52014-08-06 16:04:27 -0700601 kmemleak_no_scan(ac);
602 if (ac) {
603 ac->avail = 0;
604 ac->limit = limit;
605 ac->batchcount = batch;
606 ac->touched = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 }
Joonsoo Kim1fe00d52014-08-06 16:04:27 -0700608}
609
610static struct array_cache *alloc_arraycache(int node, int entries,
611 int batchcount, gfp_t gfp)
612{
Joonsoo Kim5e804782014-08-06 16:04:40 -0700613 size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
Joonsoo Kim1fe00d52014-08-06 16:04:27 -0700614 struct array_cache *ac = NULL;
615
616 ac = kmalloc_node(memsize, gfp, node);
617 init_arraycache(ac, entries, batchcount);
618 return ac;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619}
620
Joonsoo Kim8456a642013-10-24 10:07:49 +0900621static inline bool is_slab_pfmemalloc(struct page *page)
Mel Gorman072bb0a2012-07-31 16:43:58 -0700622{
Mel Gorman072bb0a2012-07-31 16:43:58 -0700623 return PageSlabPfmemalloc(page);
624}
625
626/* Clears pfmemalloc_active if no slabs have pfmalloc set */
627static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
628 struct array_cache *ac)
629{
Christoph Lameter18bf8542014-08-06 16:04:11 -0700630 struct kmem_cache_node *n = get_node(cachep, numa_mem_id());
Joonsoo Kim8456a642013-10-24 10:07:49 +0900631 struct page *page;
Mel Gorman072bb0a2012-07-31 16:43:58 -0700632 unsigned long flags;
633
634 if (!pfmemalloc_active)
635 return;
636
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000637 spin_lock_irqsave(&n->list_lock, flags);
Joonsoo Kim8456a642013-10-24 10:07:49 +0900638 list_for_each_entry(page, &n->slabs_full, lru)
639 if (is_slab_pfmemalloc(page))
Mel Gorman072bb0a2012-07-31 16:43:58 -0700640 goto out;
641
Joonsoo Kim8456a642013-10-24 10:07:49 +0900642 list_for_each_entry(page, &n->slabs_partial, lru)
643 if (is_slab_pfmemalloc(page))
Mel Gorman072bb0a2012-07-31 16:43:58 -0700644 goto out;
645
Joonsoo Kim8456a642013-10-24 10:07:49 +0900646 list_for_each_entry(page, &n->slabs_free, lru)
647 if (is_slab_pfmemalloc(page))
Mel Gorman072bb0a2012-07-31 16:43:58 -0700648 goto out;
649
650 pfmemalloc_active = false;
651out:
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000652 spin_unlock_irqrestore(&n->list_lock, flags);
Mel Gorman072bb0a2012-07-31 16:43:58 -0700653}
654
Mel Gorman381760e2012-07-31 16:44:30 -0700655static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
Mel Gorman072bb0a2012-07-31 16:43:58 -0700656 gfp_t flags, bool force_refill)
657{
658 int i;
659 void *objp = ac->entry[--ac->avail];
660
661 /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
662 if (unlikely(is_obj_pfmemalloc(objp))) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000663 struct kmem_cache_node *n;
Mel Gorman072bb0a2012-07-31 16:43:58 -0700664
665 if (gfp_pfmemalloc_allowed(flags)) {
666 clear_obj_pfmemalloc(&objp);
667 return objp;
668 }
669
670 /* The caller cannot use PFMEMALLOC objects, find another one */
Joonsoo Kimd014dc22012-09-17 14:09:06 -0700671 for (i = 0; i < ac->avail; i++) {
Mel Gorman072bb0a2012-07-31 16:43:58 -0700672 /* If a !PFMEMALLOC object is found, swap them */
673 if (!is_obj_pfmemalloc(ac->entry[i])) {
674 objp = ac->entry[i];
675 ac->entry[i] = ac->entry[ac->avail];
676 ac->entry[ac->avail] = objp;
677 return objp;
678 }
679 }
680
681 /*
682 * If there are empty slabs on the slabs_free list and we are
683 * being forced to refill the cache, mark this one !pfmemalloc.
684 */
Christoph Lameter18bf8542014-08-06 16:04:11 -0700685 n = get_node(cachep, numa_mem_id());
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000686 if (!list_empty(&n->slabs_free) && force_refill) {
Joonsoo Kim8456a642013-10-24 10:07:49 +0900687 struct page *page = virt_to_head_page(objp);
Joonsoo Kim7ecccf92013-10-24 10:07:50 +0900688 ClearPageSlabPfmemalloc(page);
Mel Gorman072bb0a2012-07-31 16:43:58 -0700689 clear_obj_pfmemalloc(&objp);
690 recheck_pfmemalloc_active(cachep, ac);
691 return objp;
692 }
693
694 /* No !PFMEMALLOC objects available */
695 ac->avail++;
696 objp = NULL;
697 }
698
699 return objp;
700}
701
Mel Gorman381760e2012-07-31 16:44:30 -0700702static inline void *ac_get_obj(struct kmem_cache *cachep,
703 struct array_cache *ac, gfp_t flags, bool force_refill)
704{
705 void *objp;
706
707 if (unlikely(sk_memalloc_socks()))
708 objp = __ac_get_obj(cachep, ac, flags, force_refill);
709 else
710 objp = ac->entry[--ac->avail];
711
712 return objp;
713}
714
Joonsoo Kimd3aec342014-10-09 15:26:06 -0700715static noinline void *__ac_put_obj(struct kmem_cache *cachep,
716 struct array_cache *ac, void *objp)
Mel Gorman072bb0a2012-07-31 16:43:58 -0700717{
718 if (unlikely(pfmemalloc_active)) {
719 /* Some pfmemalloc slabs exist, check if this is one */
Mel Gorman30c29be2012-09-17 14:09:03 -0700720 struct page *page = virt_to_head_page(objp);
Mel Gorman072bb0a2012-07-31 16:43:58 -0700721 if (PageSlabPfmemalloc(page))
722 set_obj_pfmemalloc(&objp);
723 }
724
Mel Gorman381760e2012-07-31 16:44:30 -0700725 return objp;
726}
727
728static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
729 void *objp)
730{
731 if (unlikely(sk_memalloc_socks()))
732 objp = __ac_put_obj(cachep, ac, objp);
733
Mel Gorman072bb0a2012-07-31 16:43:58 -0700734 ac->entry[ac->avail++] = objp;
735}
736
Christoph Lameter3ded1752006-03-25 03:06:44 -0800737/*
738 * Transfer objects in one arraycache to another.
739 * Locking must be handled by the caller.
740 *
741 * Return the number of entries transferred.
742 */
743static int transfer_objects(struct array_cache *to,
744 struct array_cache *from, unsigned int max)
745{
746 /* Figure out how many entries to transfer */
Hagen Paul Pfeifer732eacc2010-10-26 14:22:23 -0700747 int nr = min3(from->avail, max, to->limit - to->avail);
Christoph Lameter3ded1752006-03-25 03:06:44 -0800748
749 if (!nr)
750 return 0;
751
752 memcpy(to->entry + to->avail, from->entry + from->avail -nr,
753 sizeof(void *) *nr);
754
755 from->avail -= nr;
756 to->avail += nr;
Christoph Lameter3ded1752006-03-25 03:06:44 -0800757 return nr;
758}
759
Christoph Lameter765c4502006-09-27 01:50:08 -0700760#ifndef CONFIG_NUMA
761
762#define drain_alien_cache(cachep, alien) do { } while (0)
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000763#define reap_alien(cachep, n) do { } while (0)
Christoph Lameter765c4502006-09-27 01:50:08 -0700764
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700765static inline struct alien_cache **alloc_alien_cache(int node,
766 int limit, gfp_t gfp)
Christoph Lameter765c4502006-09-27 01:50:08 -0700767{
Joonsoo Kimedcad252014-08-08 14:19:15 -0700768 return (struct alien_cache **)BAD_ALIEN_MAGIC;
Christoph Lameter765c4502006-09-27 01:50:08 -0700769}
770
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700771static inline void free_alien_cache(struct alien_cache **ac_ptr)
Christoph Lameter765c4502006-09-27 01:50:08 -0700772{
773}
774
775static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
776{
777 return 0;
778}
779
780static inline void *alternate_node_alloc(struct kmem_cache *cachep,
781 gfp_t flags)
782{
783 return NULL;
784}
785
Christoph Hellwig8b98c162006-12-06 20:32:30 -0800786static inline void *____cache_alloc_node(struct kmem_cache *cachep,
Christoph Lameter765c4502006-09-27 01:50:08 -0700787 gfp_t flags, int nodeid)
788{
789 return NULL;
790}
791
David Rientjes4167e9b2015-04-14 15:46:55 -0700792static inline gfp_t gfp_exact_node(gfp_t flags)
793{
794 return flags;
795}
796
Christoph Lameter765c4502006-09-27 01:50:08 -0700797#else /* CONFIG_NUMA */
798
Christoph Hellwig8b98c162006-12-06 20:32:30 -0800799static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
Paul Jacksonc61afb12006-03-24 03:16:08 -0800800static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
Christoph Lameterdc85da12006-01-18 17:42:36 -0800801
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700802static struct alien_cache *__alloc_alien_cache(int node, int entries,
803 int batch, gfp_t gfp)
Christoph Lametere498be72005-09-09 13:03:32 -0700804{
Joonsoo Kim5e804782014-08-06 16:04:40 -0700805 size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700806 struct alien_cache *alc = NULL;
807
808 alc = kmalloc_node(memsize, gfp, node);
809 init_arraycache(&alc->ac, entries, batch);
Joonsoo Kim49dfc302014-08-06 16:04:31 -0700810 spin_lock_init(&alc->lock);
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700811 return alc;
812}
813
814static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
815{
816 struct alien_cache **alc_ptr;
Joonsoo Kim5e804782014-08-06 16:04:40 -0700817 size_t memsize = sizeof(void *) * nr_node_ids;
Christoph Lametere498be72005-09-09 13:03:32 -0700818 int i;
819
820 if (limit > 1)
821 limit = 12;
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700822 alc_ptr = kzalloc_node(memsize, gfp, node);
823 if (!alc_ptr)
824 return NULL;
825
826 for_each_node(i) {
827 if (i == node || !node_online(i))
828 continue;
829 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
830 if (!alc_ptr[i]) {
831 for (i--; i >= 0; i--)
832 kfree(alc_ptr[i]);
833 kfree(alc_ptr);
834 return NULL;
Christoph Lametere498be72005-09-09 13:03:32 -0700835 }
836 }
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700837 return alc_ptr;
Christoph Lametere498be72005-09-09 13:03:32 -0700838}
839
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700840static void free_alien_cache(struct alien_cache **alc_ptr)
Christoph Lametere498be72005-09-09 13:03:32 -0700841{
842 int i;
843
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700844 if (!alc_ptr)
Christoph Lametere498be72005-09-09 13:03:32 -0700845 return;
Christoph Lametere498be72005-09-09 13:03:32 -0700846 for_each_node(i)
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700847 kfree(alc_ptr[i]);
848 kfree(alc_ptr);
Christoph Lametere498be72005-09-09 13:03:32 -0700849}
850
Pekka Enberg343e0d72006-02-01 03:05:50 -0800851static void __drain_alien_cache(struct kmem_cache *cachep,
Joonsoo Kim833b7062014-08-06 16:04:33 -0700852 struct array_cache *ac, int node,
853 struct list_head *list)
Christoph Lametere498be72005-09-09 13:03:32 -0700854{
Christoph Lameter18bf8542014-08-06 16:04:11 -0700855 struct kmem_cache_node *n = get_node(cachep, node);
Christoph Lametere498be72005-09-09 13:03:32 -0700856
857 if (ac->avail) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000858 spin_lock(&n->list_lock);
Christoph Lametere00946f2006-03-25 03:06:45 -0800859 /*
860 * Stuff objects into the remote nodes shared array first.
861 * That way we could avoid the overhead of putting the objects
862 * into the free lists and getting them back later.
863 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000864 if (n->shared)
865 transfer_objects(n->shared, ac, ac->limit);
Christoph Lametere00946f2006-03-25 03:06:45 -0800866
Joonsoo Kim833b7062014-08-06 16:04:33 -0700867 free_block(cachep, ac->entry, ac->avail, node, list);
Christoph Lametere498be72005-09-09 13:03:32 -0700868 ac->avail = 0;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000869 spin_unlock(&n->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -0700870 }
871}
872
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800873/*
874 * Called from cache_reap() to regularly drain alien caches round robin.
875 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000876static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800877{
Christoph Lameter909ea962010-12-08 16:22:55 +0100878 int node = __this_cpu_read(slab_reap_node);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800879
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000880 if (n->alien) {
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700881 struct alien_cache *alc = n->alien[node];
882 struct array_cache *ac;
Christoph Lametere00946f2006-03-25 03:06:45 -0800883
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700884 if (alc) {
885 ac = &alc->ac;
Joonsoo Kim49dfc302014-08-06 16:04:31 -0700886 if (ac->avail && spin_trylock_irq(&alc->lock)) {
Joonsoo Kim833b7062014-08-06 16:04:33 -0700887 LIST_HEAD(list);
888
889 __drain_alien_cache(cachep, ac, node, &list);
Joonsoo Kim49dfc302014-08-06 16:04:31 -0700890 spin_unlock_irq(&alc->lock);
Joonsoo Kim833b7062014-08-06 16:04:33 -0700891 slabs_destroy(cachep, &list);
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700892 }
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800893 }
894 }
895}
896
Andrew Mortona737b3e2006-03-22 00:08:11 -0800897static void drain_alien_cache(struct kmem_cache *cachep,
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700898 struct alien_cache **alien)
Christoph Lametere498be72005-09-09 13:03:32 -0700899{
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800900 int i = 0;
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700901 struct alien_cache *alc;
Christoph Lametere498be72005-09-09 13:03:32 -0700902 struct array_cache *ac;
903 unsigned long flags;
904
905 for_each_online_node(i) {
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700906 alc = alien[i];
907 if (alc) {
Joonsoo Kim833b7062014-08-06 16:04:33 -0700908 LIST_HEAD(list);
909
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700910 ac = &alc->ac;
Joonsoo Kim49dfc302014-08-06 16:04:31 -0700911 spin_lock_irqsave(&alc->lock, flags);
Joonsoo Kim833b7062014-08-06 16:04:33 -0700912 __drain_alien_cache(cachep, ac, i, &list);
Joonsoo Kim49dfc302014-08-06 16:04:31 -0700913 spin_unlock_irqrestore(&alc->lock, flags);
Joonsoo Kim833b7062014-08-06 16:04:33 -0700914 slabs_destroy(cachep, &list);
Christoph Lametere498be72005-09-09 13:03:32 -0700915 }
916 }
917}
Pekka Enberg729bd0b2006-06-23 02:03:05 -0700918
Joonsoo Kim25c4f302014-10-09 15:26:09 -0700919static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
920 int node, int page_node)
Pekka Enberg729bd0b2006-06-23 02:03:05 -0700921{
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000922 struct kmem_cache_node *n;
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700923 struct alien_cache *alien = NULL;
924 struct array_cache *ac;
Joonsoo Kim97654df2014-08-06 16:04:25 -0700925 LIST_HEAD(list);
Pekka Enberg1ca4cb22006-10-06 00:43:52 -0700926
Christoph Lameter18bf8542014-08-06 16:04:11 -0700927 n = get_node(cachep, node);
Pekka Enberg729bd0b2006-06-23 02:03:05 -0700928 STATS_INC_NODEFREES(cachep);
Joonsoo Kim25c4f302014-10-09 15:26:09 -0700929 if (n->alien && n->alien[page_node]) {
930 alien = n->alien[page_node];
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700931 ac = &alien->ac;
Joonsoo Kim49dfc302014-08-06 16:04:31 -0700932 spin_lock(&alien->lock);
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700933 if (unlikely(ac->avail == ac->limit)) {
Pekka Enberg729bd0b2006-06-23 02:03:05 -0700934 STATS_INC_ACOVERFLOW(cachep);
Joonsoo Kim25c4f302014-10-09 15:26:09 -0700935 __drain_alien_cache(cachep, ac, page_node, &list);
Pekka Enberg729bd0b2006-06-23 02:03:05 -0700936 }
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700937 ac_put_obj(cachep, ac, objp);
Joonsoo Kim49dfc302014-08-06 16:04:31 -0700938 spin_unlock(&alien->lock);
Joonsoo Kim833b7062014-08-06 16:04:33 -0700939 slabs_destroy(cachep, &list);
Pekka Enberg729bd0b2006-06-23 02:03:05 -0700940 } else {
Joonsoo Kim25c4f302014-10-09 15:26:09 -0700941 n = get_node(cachep, page_node);
Christoph Lameter18bf8542014-08-06 16:04:11 -0700942 spin_lock(&n->list_lock);
Joonsoo Kim25c4f302014-10-09 15:26:09 -0700943 free_block(cachep, &objp, 1, page_node, &list);
Christoph Lameter18bf8542014-08-06 16:04:11 -0700944 spin_unlock(&n->list_lock);
Joonsoo Kim97654df2014-08-06 16:04:25 -0700945 slabs_destroy(cachep, &list);
Pekka Enberg729bd0b2006-06-23 02:03:05 -0700946 }
947 return 1;
948}
Joonsoo Kim25c4f302014-10-09 15:26:09 -0700949
950static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
951{
952 int page_node = page_to_nid(virt_to_page(objp));
953 int node = numa_mem_id();
954 /*
955 * Make sure we are not freeing a object from another node to the array
956 * cache on this cpu.
957 */
958 if (likely(node == page_node))
959 return 0;
960
961 return __cache_free_alien(cachep, objp, node, page_node);
962}
David Rientjes4167e9b2015-04-14 15:46:55 -0700963
964/*
Mel Gormand0164ad2015-11-06 16:28:21 -0800965 * Construct gfp mask to allocate from a specific node but do not direct reclaim
966 * or warn about failures. kswapd may still wake to reclaim in the background.
David Rientjes4167e9b2015-04-14 15:46:55 -0700967 */
968static inline gfp_t gfp_exact_node(gfp_t flags)
969{
Mel Gormand0164ad2015-11-06 16:28:21 -0800970 return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~__GFP_DIRECT_RECLAIM;
David Rientjes4167e9b2015-04-14 15:46:55 -0700971}
Christoph Lametere498be72005-09-09 13:03:32 -0700972#endif
973
David Rientjes8f9f8d92010-03-27 19:40:47 -0700974/*
Christoph Lameter6a673682013-01-10 19:14:19 +0000975 * Allocates and initializes node for a node on each slab cache, used for
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000976 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
David Rientjes8f9f8d92010-03-27 19:40:47 -0700977 * will be allocated off-node since memory is not yet online for the new node.
Christoph Lameter6a673682013-01-10 19:14:19 +0000978 * When hotplugging memory or a cpu, existing node are not replaced if
David Rientjes8f9f8d92010-03-27 19:40:47 -0700979 * already in use.
980 *
Christoph Lameter18004c52012-07-06 15:25:12 -0500981 * Must hold slab_mutex.
David Rientjes8f9f8d92010-03-27 19:40:47 -0700982 */
Christoph Lameter6a673682013-01-10 19:14:19 +0000983static int init_cache_node_node(int node)
David Rientjes8f9f8d92010-03-27 19:40:47 -0700984{
985 struct kmem_cache *cachep;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000986 struct kmem_cache_node *n;
Joonsoo Kim5e804782014-08-06 16:04:40 -0700987 const size_t memsize = sizeof(struct kmem_cache_node);
David Rientjes8f9f8d92010-03-27 19:40:47 -0700988
Christoph Lameter18004c52012-07-06 15:25:12 -0500989 list_for_each_entry(cachep, &slab_caches, list) {
David Rientjes8f9f8d92010-03-27 19:40:47 -0700990 /*
Jianyu Zhan5f0985b2014-03-30 17:02:20 +0800991 * Set up the kmem_cache_node for cpu before we can
David Rientjes8f9f8d92010-03-27 19:40:47 -0700992 * begin anything. Make sure some other cpu on this
993 * node has not already allocated this
994 */
Christoph Lameter18bf8542014-08-06 16:04:11 -0700995 n = get_node(cachep, node);
996 if (!n) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000997 n = kmalloc_node(memsize, GFP_KERNEL, node);
998 if (!n)
David Rientjes8f9f8d92010-03-27 19:40:47 -0700999 return -ENOMEM;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001000 kmem_cache_node_init(n);
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08001001 n->next_reap = jiffies + REAPTIMEOUT_NODE +
1002 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
David Rientjes8f9f8d92010-03-27 19:40:47 -07001003
1004 /*
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08001005 * The kmem_cache_nodes don't come and go as CPUs
1006 * come and go. slab_mutex is sufficient
David Rientjes8f9f8d92010-03-27 19:40:47 -07001007 * protection here.
1008 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001009 cachep->node[node] = n;
David Rientjes8f9f8d92010-03-27 19:40:47 -07001010 }
1011
Christoph Lameter18bf8542014-08-06 16:04:11 -07001012 spin_lock_irq(&n->list_lock);
1013 n->free_limit =
David Rientjes8f9f8d92010-03-27 19:40:47 -07001014 (1 + nr_cpus_node(node)) *
1015 cachep->batchcount + cachep->num;
Christoph Lameter18bf8542014-08-06 16:04:11 -07001016 spin_unlock_irq(&n->list_lock);
David Rientjes8f9f8d92010-03-27 19:40:47 -07001017 }
1018 return 0;
1019}
1020
Wanpeng Li0fa81032013-07-04 08:33:22 +08001021static inline int slabs_tofree(struct kmem_cache *cachep,
1022 struct kmem_cache_node *n)
1023{
1024 return (n->free_objects + cachep->num - 1) / cachep->num;
1025}
1026
Paul Gortmaker0db06282013-06-19 14:53:51 -04001027static void cpuup_canceled(long cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028{
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001029 struct kmem_cache *cachep;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001030 struct kmem_cache_node *n = NULL;
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07001031 int node = cpu_to_mem(cpu);
Rusty Russella70f7302009-03-13 14:49:46 +10301032 const struct cpumask *mask = cpumask_of_node(node);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001033
Christoph Lameter18004c52012-07-06 15:25:12 -05001034 list_for_each_entry(cachep, &slab_caches, list) {
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001035 struct array_cache *nc;
1036 struct array_cache *shared;
Joonsoo Kimc8522a32014-08-06 16:04:29 -07001037 struct alien_cache **alien;
Joonsoo Kim97654df2014-08-06 16:04:25 -07001038 LIST_HEAD(list);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001039
Christoph Lameter18bf8542014-08-06 16:04:11 -07001040 n = get_node(cachep, node);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001041 if (!n)
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001042 continue;
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001043
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001044 spin_lock_irq(&n->list_lock);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001045
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001046 /* Free limit for this kmem_cache_node */
1047 n->free_limit -= cachep->batchcount;
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001048
1049 /* cpu is dead; no one can alloc from it. */
1050 nc = per_cpu_ptr(cachep->cpu_cache, cpu);
1051 if (nc) {
Joonsoo Kim97654df2014-08-06 16:04:25 -07001052 free_block(cachep, nc->entry, nc->avail, node, &list);
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001053 nc->avail = 0;
1054 }
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001055
Rusty Russell58463c12009-12-17 11:43:12 -06001056 if (!cpumask_empty(mask)) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001057 spin_unlock_irq(&n->list_lock);
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001058 goto free_slab;
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001059 }
1060
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001061 shared = n->shared;
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001062 if (shared) {
1063 free_block(cachep, shared->entry,
Joonsoo Kim97654df2014-08-06 16:04:25 -07001064 shared->avail, node, &list);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001065 n->shared = NULL;
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001066 }
1067
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001068 alien = n->alien;
1069 n->alien = NULL;
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001070
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001071 spin_unlock_irq(&n->list_lock);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001072
1073 kfree(shared);
1074 if (alien) {
1075 drain_alien_cache(cachep, alien);
1076 free_alien_cache(alien);
1077 }
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001078
1079free_slab:
Joonsoo Kim97654df2014-08-06 16:04:25 -07001080 slabs_destroy(cachep, &list);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001081 }
1082 /*
1083 * In the previous loop, all the objects were freed to
1084 * the respective cache's slabs, now we can go ahead and
1085 * shrink each nodelist to its limit.
1086 */
Christoph Lameter18004c52012-07-06 15:25:12 -05001087 list_for_each_entry(cachep, &slab_caches, list) {
Christoph Lameter18bf8542014-08-06 16:04:11 -07001088 n = get_node(cachep, node);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001089 if (!n)
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001090 continue;
Wanpeng Li0fa81032013-07-04 08:33:22 +08001091 drain_freelist(cachep, n, slabs_tofree(cachep, n));
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001092 }
1093}
1094
Paul Gortmaker0db06282013-06-19 14:53:51 -04001095static int cpuup_prepare(long cpu)
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001096{
Pekka Enberg343e0d72006-02-01 03:05:50 -08001097 struct kmem_cache *cachep;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001098 struct kmem_cache_node *n = NULL;
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07001099 int node = cpu_to_mem(cpu);
David Rientjes8f9f8d92010-03-27 19:40:47 -07001100 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001102 /*
1103 * We need to do this right in the beginning since
1104 * alloc_arraycache's are going to use this list.
1105 * kmalloc_node allows us to add the slab to the right
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001106 * kmem_cache_node and not this cpu's kmem_cache_node
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001107 */
Christoph Lameter6a673682013-01-10 19:14:19 +00001108 err = init_cache_node_node(node);
David Rientjes8f9f8d92010-03-27 19:40:47 -07001109 if (err < 0)
1110 goto bad;
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001111
1112 /*
1113 * Now we can go ahead with allocating the shared arrays and
1114 * array caches
1115 */
Christoph Lameter18004c52012-07-06 15:25:12 -05001116 list_for_each_entry(cachep, &slab_caches, list) {
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001117 struct array_cache *shared = NULL;
Joonsoo Kimc8522a32014-08-06 16:04:29 -07001118 struct alien_cache **alien = NULL;
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001119
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001120 if (cachep->shared) {
1121 shared = alloc_arraycache(node,
1122 cachep->shared * cachep->batchcount,
Pekka Enberg83b519e2009-06-10 19:40:04 +03001123 0xbaadf00d, GFP_KERNEL);
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001124 if (!shared)
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001125 goto bad;
1126 }
1127 if (use_alien_caches) {
Pekka Enberg83b519e2009-06-10 19:40:04 +03001128 alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
Akinobu Mita12d00f62007-10-18 03:05:11 -07001129 if (!alien) {
1130 kfree(shared);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001131 goto bad;
Akinobu Mita12d00f62007-10-18 03:05:11 -07001132 }
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001133 }
Christoph Lameter18bf8542014-08-06 16:04:11 -07001134 n = get_node(cachep, node);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001135 BUG_ON(!n);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001136
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001137 spin_lock_irq(&n->list_lock);
1138 if (!n->shared) {
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001139 /*
1140 * We are serialised from CPU_DEAD or
1141 * CPU_UP_CANCELLED by the cpucontrol lock
1142 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001143 n->shared = shared;
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001144 shared = NULL;
1145 }
1146#ifdef CONFIG_NUMA
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001147 if (!n->alien) {
1148 n->alien = alien;
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001149 alien = NULL;
1150 }
1151#endif
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001152 spin_unlock_irq(&n->list_lock);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001153 kfree(shared);
1154 free_alien_cache(alien);
1155 }
Pekka Enbergce79ddc2009-11-23 22:01:15 +02001156
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001157 return 0;
1158bad:
Akinobu Mita12d00f62007-10-18 03:05:11 -07001159 cpuup_canceled(cpu);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001160 return -ENOMEM;
1161}
1162
Paul Gortmaker0db06282013-06-19 14:53:51 -04001163static int cpuup_callback(struct notifier_block *nfb,
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001164 unsigned long action, void *hcpu)
1165{
1166 long cpu = (long)hcpu;
1167 int err = 0;
1168
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 switch (action) {
Heiko Carstens38c3bd92007-05-09 02:34:05 -07001170 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001171 case CPU_UP_PREPARE_FROZEN:
Christoph Lameter18004c52012-07-06 15:25:12 -05001172 mutex_lock(&slab_mutex);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001173 err = cpuup_prepare(cpu);
Christoph Lameter18004c52012-07-06 15:25:12 -05001174 mutex_unlock(&slab_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 break;
1176 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001177 case CPU_ONLINE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 start_cpu_timer(cpu);
1179 break;
1180#ifdef CONFIG_HOTPLUG_CPU
Christoph Lameter5830c592007-05-09 02:34:22 -07001181 case CPU_DOWN_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001182 case CPU_DOWN_PREPARE_FROZEN:
Christoph Lameter5830c592007-05-09 02:34:22 -07001183 /*
Christoph Lameter18004c52012-07-06 15:25:12 -05001184 * Shutdown cache reaper. Note that the slab_mutex is
Christoph Lameter5830c592007-05-09 02:34:22 -07001185 * held so that if cache_reap() is invoked it cannot do
1186 * anything expensive but will only modify reap_work
1187 * and reschedule the timer.
1188 */
Tejun Heoafe2c512010-12-14 16:21:17 +01001189 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
Christoph Lameter5830c592007-05-09 02:34:22 -07001190 /* Now the cache_reaper is guaranteed to be not running. */
Tejun Heo1871e522009-10-29 22:34:13 +09001191 per_cpu(slab_reap_work, cpu).work.func = NULL;
Christoph Lameter5830c592007-05-09 02:34:22 -07001192 break;
1193 case CPU_DOWN_FAILED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001194 case CPU_DOWN_FAILED_FROZEN:
Christoph Lameter5830c592007-05-09 02:34:22 -07001195 start_cpu_timer(cpu);
1196 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001198 case CPU_DEAD_FROZEN:
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001199 /*
1200 * Even if all the cpus of a node are down, we don't free the
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001201 * kmem_cache_node of any cache. This to avoid a race between
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001202 * cpu_down, and a kmalloc allocation from another cpu for
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001203 * memory from the node of the cpu going down. The node
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001204 * structure is usually allocated from kmem_cache_create() and
1205 * gets destroyed at kmem_cache_destroy().
1206 */
Simon Arlott183ff222007-10-20 01:27:18 +02001207 /* fall through */
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -08001208#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001210 case CPU_UP_CANCELED_FROZEN:
Christoph Lameter18004c52012-07-06 15:25:12 -05001211 mutex_lock(&slab_mutex);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001212 cpuup_canceled(cpu);
Christoph Lameter18004c52012-07-06 15:25:12 -05001213 mutex_unlock(&slab_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 }
Akinobu Mitaeac40682010-05-26 14:43:32 -07001216 return notifier_from_errno(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217}
1218
Paul Gortmaker0db06282013-06-19 14:53:51 -04001219static struct notifier_block cpucache_notifier = {
Chandra Seetharaman74b85f32006-06-27 02:54:09 -07001220 &cpuup_callback, NULL, 0
1221};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
David Rientjes8f9f8d92010-03-27 19:40:47 -07001223#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1224/*
1225 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1226 * Returns -EBUSY if all objects cannot be drained so that the node is not
1227 * removed.
1228 *
Christoph Lameter18004c52012-07-06 15:25:12 -05001229 * Must hold slab_mutex.
David Rientjes8f9f8d92010-03-27 19:40:47 -07001230 */
Christoph Lameter6a673682013-01-10 19:14:19 +00001231static int __meminit drain_cache_node_node(int node)
David Rientjes8f9f8d92010-03-27 19:40:47 -07001232{
1233 struct kmem_cache *cachep;
1234 int ret = 0;
1235
Christoph Lameter18004c52012-07-06 15:25:12 -05001236 list_for_each_entry(cachep, &slab_caches, list) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001237 struct kmem_cache_node *n;
David Rientjes8f9f8d92010-03-27 19:40:47 -07001238
Christoph Lameter18bf8542014-08-06 16:04:11 -07001239 n = get_node(cachep, node);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001240 if (!n)
David Rientjes8f9f8d92010-03-27 19:40:47 -07001241 continue;
1242
Wanpeng Li0fa81032013-07-04 08:33:22 +08001243 drain_freelist(cachep, n, slabs_tofree(cachep, n));
David Rientjes8f9f8d92010-03-27 19:40:47 -07001244
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001245 if (!list_empty(&n->slabs_full) ||
1246 !list_empty(&n->slabs_partial)) {
David Rientjes8f9f8d92010-03-27 19:40:47 -07001247 ret = -EBUSY;
1248 break;
1249 }
1250 }
1251 return ret;
1252}
1253
1254static int __meminit slab_memory_callback(struct notifier_block *self,
1255 unsigned long action, void *arg)
1256{
1257 struct memory_notify *mnb = arg;
1258 int ret = 0;
1259 int nid;
1260
1261 nid = mnb->status_change_nid;
1262 if (nid < 0)
1263 goto out;
1264
1265 switch (action) {
1266 case MEM_GOING_ONLINE:
Christoph Lameter18004c52012-07-06 15:25:12 -05001267 mutex_lock(&slab_mutex);
Christoph Lameter6a673682013-01-10 19:14:19 +00001268 ret = init_cache_node_node(nid);
Christoph Lameter18004c52012-07-06 15:25:12 -05001269 mutex_unlock(&slab_mutex);
David Rientjes8f9f8d92010-03-27 19:40:47 -07001270 break;
1271 case MEM_GOING_OFFLINE:
Christoph Lameter18004c52012-07-06 15:25:12 -05001272 mutex_lock(&slab_mutex);
Christoph Lameter6a673682013-01-10 19:14:19 +00001273 ret = drain_cache_node_node(nid);
Christoph Lameter18004c52012-07-06 15:25:12 -05001274 mutex_unlock(&slab_mutex);
David Rientjes8f9f8d92010-03-27 19:40:47 -07001275 break;
1276 case MEM_ONLINE:
1277 case MEM_OFFLINE:
1278 case MEM_CANCEL_ONLINE:
1279 case MEM_CANCEL_OFFLINE:
1280 break;
1281 }
1282out:
Prarit Bhargava5fda1bd2011-03-22 16:30:49 -07001283 return notifier_from_errno(ret);
David Rientjes8f9f8d92010-03-27 19:40:47 -07001284}
1285#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1286
Christoph Lametere498be72005-09-09 13:03:32 -07001287/*
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001288 * swap the static kmem_cache_node with kmalloced memory
Christoph Lametere498be72005-09-09 13:03:32 -07001289 */
Christoph Lameter6744f082013-01-10 19:12:17 +00001290static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
David Rientjes8f9f8d92010-03-27 19:40:47 -07001291 int nodeid)
Christoph Lametere498be72005-09-09 13:03:32 -07001292{
Christoph Lameter6744f082013-01-10 19:12:17 +00001293 struct kmem_cache_node *ptr;
Christoph Lametere498be72005-09-09 13:03:32 -07001294
Christoph Lameter6744f082013-01-10 19:12:17 +00001295 ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
Christoph Lametere498be72005-09-09 13:03:32 -07001296 BUG_ON(!ptr);
1297
Christoph Lameter6744f082013-01-10 19:12:17 +00001298 memcpy(ptr, list, sizeof(struct kmem_cache_node));
Ingo Molnar2b2d5492006-07-03 00:25:28 -07001299 /*
1300 * Do not assume that spinlocks can be initialized via memcpy:
1301 */
1302 spin_lock_init(&ptr->list_lock);
1303
Christoph Lametere498be72005-09-09 13:03:32 -07001304 MAKE_ALL_LISTS(cachep, ptr, nodeid);
Christoph Lameter6a673682013-01-10 19:14:19 +00001305 cachep->node[nodeid] = ptr;
Christoph Lametere498be72005-09-09 13:03:32 -07001306}
1307
Andrew Mortona737b3e2006-03-22 00:08:11 -08001308/*
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001309 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1310 * size of kmem_cache_node.
Pekka Enberg556a1692008-01-25 08:20:51 +02001311 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001312static void __init set_up_node(struct kmem_cache *cachep, int index)
Pekka Enberg556a1692008-01-25 08:20:51 +02001313{
1314 int node;
1315
1316 for_each_online_node(node) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001317 cachep->node[node] = &init_kmem_cache_node[index + node];
Christoph Lameter6a673682013-01-10 19:14:19 +00001318 cachep->node[node]->next_reap = jiffies +
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08001319 REAPTIMEOUT_NODE +
1320 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
Pekka Enberg556a1692008-01-25 08:20:51 +02001321 }
1322}
1323
1324/*
Andrew Mortona737b3e2006-03-22 00:08:11 -08001325 * Initialisation. Called after the page allocator have been initialised and
1326 * before smp_init().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 */
1328void __init kmem_cache_init(void)
1329{
Christoph Lametere498be72005-09-09 13:03:32 -07001330 int i;
1331
Joonsoo Kim68126702013-10-24 10:07:42 +09001332 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
1333 sizeof(struct rcu_head));
Christoph Lameter9b030cb2012-09-05 00:20:33 +00001334 kmem_cache = &kmem_cache_boot;
1335
Mel Gormanb6e68bc2009-06-16 15:32:16 -07001336 if (num_possible_nodes() == 1)
Siddha, Suresh B62918a02007-05-02 19:27:18 +02001337 use_alien_caches = 0;
1338
Christoph Lameter3c583462012-11-28 16:23:01 +00001339 for (i = 0; i < NUM_INIT_LISTS; i++)
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001340 kmem_cache_node_init(&init_kmem_cache_node[i]);
Christoph Lameter3c583462012-11-28 16:23:01 +00001341
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 /*
1343 * Fragmentation resistance on low memory - only use bigger
David Rientjes3df1ccc2011-10-18 22:09:28 -07001344 * page orders on machines with more than 32MB of memory if
1345 * not overridden on the command line.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 */
David Rientjes3df1ccc2011-10-18 22:09:28 -07001347 if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
David Rientjes543585c2011-10-18 22:09:24 -07001348 slab_max_order = SLAB_MAX_ORDER_HI;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 /* Bootstrap is tricky, because several objects are allocated
1351 * from caches that do not exist yet:
Christoph Lameter9b030cb2012-09-05 00:20:33 +00001352 * 1) initialize the kmem_cache cache: it contains the struct
1353 * kmem_cache structures of all caches, except kmem_cache itself:
1354 * kmem_cache is statically allocated.
Christoph Lametere498be72005-09-09 13:03:32 -07001355 * Initially an __init data area is used for the head array and the
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001356 * kmem_cache_node structures, it's replaced with a kmalloc allocated
Christoph Lametere498be72005-09-09 13:03:32 -07001357 * array at the end of the bootstrap.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 * 2) Create the first kmalloc cache.
Pekka Enberg343e0d72006-02-01 03:05:50 -08001359 * The struct kmem_cache for the new cache is allocated normally.
Christoph Lametere498be72005-09-09 13:03:32 -07001360 * An __init data area is used for the head array.
1361 * 3) Create the remaining kmalloc caches, with minimally sized
1362 * head arrays.
Christoph Lameter9b030cb2012-09-05 00:20:33 +00001363 * 4) Replace the __init data head arrays for kmem_cache and the first
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 * kmalloc cache with kmalloc allocated arrays.
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001365 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
Christoph Lametere498be72005-09-09 13:03:32 -07001366 * the other cache's with kmalloc allocated memory.
1367 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 */
1369
Christoph Lameter9b030cb2012-09-05 00:20:33 +00001370 /* 1) create the kmem_cache */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
Eric Dumazet8da34302007-05-06 14:49:29 -07001372 /*
Eric Dumazetb56efcf2011-07-20 19:04:23 +02001373 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
Eric Dumazet8da34302007-05-06 14:49:29 -07001374 */
Christoph Lameter2f9baa92012-11-28 16:23:09 +00001375 create_boot_cache(kmem_cache, "kmem_cache",
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001376 offsetof(struct kmem_cache, node) +
Christoph Lameter6744f082013-01-10 19:12:17 +00001377 nr_node_ids * sizeof(struct kmem_cache_node *),
Christoph Lameter2f9baa92012-11-28 16:23:09 +00001378 SLAB_HWCACHE_ALIGN);
1379 list_add(&kmem_cache->list, &slab_caches);
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001380 slab_state = PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
Andrew Mortona737b3e2006-03-22 00:08:11 -08001382 /*
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001383 * Initialize the caches that provide memory for the kmem_cache_node
1384 * structures first. Without this, further allocations will bug.
Christoph Lametere498be72005-09-09 13:03:32 -07001385 */
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001386 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001387 kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001388 slab_state = PARTIAL_NODE;
Daniel Sanders34cc6992015-06-24 16:55:57 -07001389 setup_kmalloc_cache_index_table();
Christoph Lametere498be72005-09-09 13:03:32 -07001390
Ingo Molnare0a42722006-06-23 02:03:46 -07001391 slab_early_init = 0;
1392
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001393 /* 5) Replace the bootstrap kmem_cache_node */
Christoph Lametere498be72005-09-09 13:03:32 -07001394 {
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001395 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
Mel Gorman9c09a952008-01-24 05:49:54 -08001397 for_each_online_node(nid) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001398 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
Pekka Enberg556a1692008-01-25 08:20:51 +02001399
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001400 init_list(kmalloc_caches[INDEX_NODE],
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001401 &init_kmem_cache_node[SIZE_NODE + nid], nid);
Christoph Lametere498be72005-09-09 13:03:32 -07001402 }
1403 }
1404
Christoph Lameterf97d5f62013-01-10 19:12:17 +00001405 create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
Pekka Enberg8429db52009-06-12 15:58:59 +03001406}
Ravikiran G Thirumalai056c6242006-09-25 23:31:38 -07001407
Pekka Enberg8429db52009-06-12 15:58:59 +03001408void __init kmem_cache_init_late(void)
1409{
1410 struct kmem_cache *cachep;
1411
Christoph Lameter97d06602012-07-06 15:25:11 -05001412 slab_state = UP;
Peter Zijlstra52cef182011-11-28 21:12:40 +01001413
Pekka Enberg8429db52009-06-12 15:58:59 +03001414 /* 6) resize the head arrays to their final sizes */
Christoph Lameter18004c52012-07-06 15:25:12 -05001415 mutex_lock(&slab_mutex);
1416 list_for_each_entry(cachep, &slab_caches, list)
Pekka Enberg8429db52009-06-12 15:58:59 +03001417 if (enable_cpucache(cachep, GFP_NOWAIT))
1418 BUG();
Christoph Lameter18004c52012-07-06 15:25:12 -05001419 mutex_unlock(&slab_mutex);
Ravikiran G Thirumalai056c6242006-09-25 23:31:38 -07001420
Christoph Lameter97d06602012-07-06 15:25:11 -05001421 /* Done! */
1422 slab_state = FULL;
1423
Andrew Mortona737b3e2006-03-22 00:08:11 -08001424 /*
1425 * Register a cpu startup notifier callback that initializes
1426 * cpu_cache_get for all new cpus
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 */
1428 register_cpu_notifier(&cpucache_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
David Rientjes8f9f8d92010-03-27 19:40:47 -07001430#ifdef CONFIG_NUMA
1431 /*
1432 * Register a memory hotplug callback that initializes and frees
Christoph Lameter6a673682013-01-10 19:14:19 +00001433 * node.
David Rientjes8f9f8d92010-03-27 19:40:47 -07001434 */
1435 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1436#endif
1437
Andrew Mortona737b3e2006-03-22 00:08:11 -08001438 /*
1439 * The reap timers are started later, with a module init call: That part
1440 * of the kernel is not yet operational.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 */
1442}
1443
1444static int __init cpucache_init(void)
1445{
1446 int cpu;
1447
Andrew Mortona737b3e2006-03-22 00:08:11 -08001448 /*
1449 * Register the timers that return unneeded pages to the page allocator
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 */
Christoph Lametere498be72005-09-09 13:03:32 -07001451 for_each_online_cpu(cpu)
Andrew Mortona737b3e2006-03-22 00:08:11 -08001452 start_cpu_timer(cpu);
Glauber Costaa164f8962012-06-21 00:59:18 +04001453
1454 /* Done! */
Christoph Lameter97d06602012-07-06 15:25:11 -05001455 slab_state = FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 return 0;
1457}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458__initcall(cpucache_init);
1459
Rafael Aquini8bdec192012-03-09 17:27:27 -03001460static noinline void
1461slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1462{
David Rientjes9a02d692014-06-04 16:06:36 -07001463#if DEBUG
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001464 struct kmem_cache_node *n;
Joonsoo Kim8456a642013-10-24 10:07:49 +09001465 struct page *page;
Rafael Aquini8bdec192012-03-09 17:27:27 -03001466 unsigned long flags;
1467 int node;
David Rientjes9a02d692014-06-04 16:06:36 -07001468 static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1469 DEFAULT_RATELIMIT_BURST);
1470
1471 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1472 return;
Rafael Aquini8bdec192012-03-09 17:27:27 -03001473
1474 printk(KERN_WARNING
1475 "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1476 nodeid, gfpflags);
1477 printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n",
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05001478 cachep->name, cachep->size, cachep->gfporder);
Rafael Aquini8bdec192012-03-09 17:27:27 -03001479
Christoph Lameter18bf8542014-08-06 16:04:11 -07001480 for_each_kmem_cache_node(cachep, node, n) {
Rafael Aquini8bdec192012-03-09 17:27:27 -03001481 unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1482 unsigned long active_slabs = 0, num_slabs = 0;
1483
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001484 spin_lock_irqsave(&n->list_lock, flags);
Joonsoo Kim8456a642013-10-24 10:07:49 +09001485 list_for_each_entry(page, &n->slabs_full, lru) {
Rafael Aquini8bdec192012-03-09 17:27:27 -03001486 active_objs += cachep->num;
1487 active_slabs++;
1488 }
Joonsoo Kim8456a642013-10-24 10:07:49 +09001489 list_for_each_entry(page, &n->slabs_partial, lru) {
1490 active_objs += page->active;
Rafael Aquini8bdec192012-03-09 17:27:27 -03001491 active_slabs++;
1492 }
Joonsoo Kim8456a642013-10-24 10:07:49 +09001493 list_for_each_entry(page, &n->slabs_free, lru)
Rafael Aquini8bdec192012-03-09 17:27:27 -03001494 num_slabs++;
1495
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001496 free_objects += n->free_objects;
1497 spin_unlock_irqrestore(&n->list_lock, flags);
Rafael Aquini8bdec192012-03-09 17:27:27 -03001498
1499 num_slabs += active_slabs;
1500 num_objs = num_slabs * cachep->num;
1501 printk(KERN_WARNING
1502 " node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
1503 node, active_slabs, num_slabs, active_objs, num_objs,
1504 free_objects);
1505 }
David Rientjes9a02d692014-06-04 16:06:36 -07001506#endif
Rafael Aquini8bdec192012-03-09 17:27:27 -03001507}
1508
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509/*
Wang Sheng-Hui8a7d9b42014-08-06 16:04:46 -07001510 * Interface to system's page allocator. No need to hold the
1511 * kmem_cache_node ->list_lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 *
1513 * If we requested dmaable memory, we will get it. Even if we
1514 * did not request dmaable memory, we might get it, but that
1515 * would be relatively rare and ignorable.
1516 */
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09001517static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1518 int nodeid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519{
1520 struct page *page;
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001521 int nr_pages;
Christoph Lameter765c4502006-09-27 01:50:08 -07001522
Glauber Costaa618e892012-06-14 16:17:21 +04001523 flags |= cachep->allocflags;
Mel Gormane12ba742007-10-16 01:25:52 -07001524 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1525 flags |= __GFP_RECLAIMABLE;
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001526
Vlastimil Babka96db8002015-09-08 15:03:50 -07001527 page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
Rafael Aquini8bdec192012-03-09 17:27:27 -03001528 if (!page) {
David Rientjes9a02d692014-06-04 16:06:36 -07001529 slab_out_of_memory(cachep, flags, nodeid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 return NULL;
Rafael Aquini8bdec192012-03-09 17:27:27 -03001531 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08001533 if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
1534 __free_pages(page, cachep->gfporder);
1535 return NULL;
1536 }
1537
Mel Gormanb37f1dd2012-07-31 16:44:03 -07001538 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
Michal Hocko2f064f32015-08-21 14:11:51 -07001539 if (page_is_pfmemalloc(page))
Mel Gorman072bb0a2012-07-31 16:43:58 -07001540 pfmemalloc_active = true;
1541
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001542 nr_pages = (1 << cachep->gfporder);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
Christoph Lameter972d1a72006-09-25 23:31:51 -07001544 add_zone_page_state(page_zone(page),
1545 NR_SLAB_RECLAIMABLE, nr_pages);
1546 else
1547 add_zone_page_state(page_zone(page),
1548 NR_SLAB_UNRECLAIMABLE, nr_pages);
Joonsoo Kima57a4982013-10-24 10:07:44 +09001549 __SetPageSlab(page);
Michal Hocko2f064f32015-08-21 14:11:51 -07001550 if (page_is_pfmemalloc(page))
Joonsoo Kima57a4982013-10-24 10:07:44 +09001551 SetPageSlabPfmemalloc(page);
Mel Gorman072bb0a2012-07-31 16:43:58 -07001552
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001553 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1554 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1555
1556 if (cachep->ctor)
1557 kmemcheck_mark_uninitialized_pages(page, nr_pages);
1558 else
1559 kmemcheck_mark_unallocated_pages(page, nr_pages);
1560 }
Pekka Enbergc175eea2008-05-09 20:35:53 +02001561
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09001562 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563}
1564
1565/*
1566 * Interface to system's page release.
1567 */
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09001568static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569{
Joonsoo Kima57a4982013-10-24 10:07:44 +09001570 const unsigned long nr_freed = (1 << cachep->gfporder);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001572 kmemcheck_free_shadow(page, cachep->gfporder);
Pekka Enbergc175eea2008-05-09 20:35:53 +02001573
Christoph Lameter972d1a72006-09-25 23:31:51 -07001574 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1575 sub_zone_page_state(page_zone(page),
1576 NR_SLAB_RECLAIMABLE, nr_freed);
1577 else
1578 sub_zone_page_state(page_zone(page),
1579 NR_SLAB_UNRECLAIMABLE, nr_freed);
Joonsoo Kim73293c22013-10-24 10:07:37 +09001580
Joonsoo Kima57a4982013-10-24 10:07:44 +09001581 BUG_ON(!PageSlab(page));
Joonsoo Kim73293c22013-10-24 10:07:37 +09001582 __ClearPageSlabPfmemalloc(page);
Joonsoo Kima57a4982013-10-24 10:07:44 +09001583 __ClearPageSlab(page);
Joonsoo Kim8456a642013-10-24 10:07:49 +09001584 page_mapcount_reset(page);
1585 page->mapping = NULL;
Glauber Costa1f458cb2012-12-18 14:22:50 -08001586
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 if (current->reclaim_state)
1588 current->reclaim_state->reclaimed_slab += nr_freed;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08001589 __free_kmem_pages(page, cachep->gfporder);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590}
1591
1592static void kmem_rcu_free(struct rcu_head *head)
1593{
Joonsoo Kim68126702013-10-24 10:07:42 +09001594 struct kmem_cache *cachep;
1595 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596
Joonsoo Kim68126702013-10-24 10:07:42 +09001597 page = container_of(head, struct page, rcu_head);
1598 cachep = page->slab_cache;
1599
1600 kmem_freepages(cachep, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601}
1602
1603#if DEBUG
Joonsoo Kim40b44132016-03-15 14:54:21 -07001604static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1605{
1606 if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
1607 (cachep->size % PAGE_SIZE) == 0)
1608 return true;
1609
1610 return false;
1611}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
1613#ifdef CONFIG_DEBUG_PAGEALLOC
Pekka Enberg343e0d72006-02-01 03:05:50 -08001614static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001615 unsigned long caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616{
Christoph Lameter8c138bc2012-06-13 10:24:58 -05001617 int size = cachep->object_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001619 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001621 if (size < 5 * sizeof(unsigned long))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 return;
1623
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001624 *addr++ = 0x12345678;
1625 *addr++ = caller;
1626 *addr++ = smp_processor_id();
1627 size -= 3 * sizeof(unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 {
1629 unsigned long *sptr = &caller;
1630 unsigned long svalue;
1631
1632 while (!kstack_end(sptr)) {
1633 svalue = *sptr++;
1634 if (kernel_text_address(svalue)) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001635 *addr++ = svalue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 size -= sizeof(unsigned long);
1637 if (size <= sizeof(unsigned long))
1638 break;
1639 }
1640 }
1641
1642 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001643 *addr++ = 0x87654321;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644}
Joonsoo Kim40b44132016-03-15 14:54:21 -07001645
1646static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1647 int map, unsigned long caller)
1648{
1649 if (!is_debug_pagealloc_cache(cachep))
1650 return;
1651
1652 if (caller)
1653 store_stackinfo(cachep, objp, caller);
1654
1655 kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1656}
1657
1658#else
1659static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1660 int map, unsigned long caller) {}
1661
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662#endif
1663
Pekka Enberg343e0d72006-02-01 03:05:50 -08001664static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665{
Christoph Lameter8c138bc2012-06-13 10:24:58 -05001666 int size = cachep->object_size;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001667 addr = &((char *)addr)[obj_offset(cachep)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668
1669 memset(addr, val, size);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001670 *(unsigned char *)(addr + size - 1) = POISON_END;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671}
1672
1673static void dump_line(char *data, int offset, int limit)
1674{
1675 int i;
Dave Jonesaa83aa42006-09-29 01:59:51 -07001676 unsigned char error = 0;
1677 int bad_count = 0;
1678
Sebastian Andrzej Siewiorfdde6ab2011-07-29 18:22:13 +02001679 printk(KERN_ERR "%03x: ", offset);
Dave Jonesaa83aa42006-09-29 01:59:51 -07001680 for (i = 0; i < limit; i++) {
1681 if (data[offset + i] != POISON_FREE) {
1682 error = data[offset + i];
1683 bad_count++;
1684 }
Dave Jonesaa83aa42006-09-29 01:59:51 -07001685 }
Sebastian Andrzej Siewiorfdde6ab2011-07-29 18:22:13 +02001686 print_hex_dump(KERN_CONT, "", 0, 16, 1,
1687 &data[offset], limit, 1);
Dave Jonesaa83aa42006-09-29 01:59:51 -07001688
1689 if (bad_count == 1) {
1690 error ^= POISON_FREE;
1691 if (!(error & (error - 1))) {
1692 printk(KERN_ERR "Single bit error detected. Probably "
1693 "bad RAM.\n");
1694#ifdef CONFIG_X86
1695 printk(KERN_ERR "Run memtest86+ or a similar memory "
1696 "test tool.\n");
1697#else
1698 printk(KERN_ERR "Run a memory test tool.\n");
1699#endif
1700 }
1701 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702}
1703#endif
1704
1705#if DEBUG
1706
Pekka Enberg343e0d72006-02-01 03:05:50 -08001707static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708{
1709 int i, size;
1710 char *realobj;
1711
1712 if (cachep->flags & SLAB_RED_ZONE) {
David Woodhouseb46b8f12007-05-08 00:22:59 -07001713 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
Andrew Mortona737b3e2006-03-22 00:08:11 -08001714 *dbg_redzone1(cachep, objp),
1715 *dbg_redzone2(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 }
1717
1718 if (cachep->flags & SLAB_STORE_USER) {
Joe Perches071361d2012-12-12 10:19:12 -08001719 printk(KERN_ERR "Last user: [<%p>](%pSR)\n",
1720 *dbg_userword(cachep, objp),
1721 *dbg_userword(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 }
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001723 realobj = (char *)objp + obj_offset(cachep);
Christoph Lameter8c138bc2012-06-13 10:24:58 -05001724 size = cachep->object_size;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001725 for (i = 0; i < size && lines; i += 16, lines--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 int limit;
1727 limit = 16;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001728 if (i + limit > size)
1729 limit = size - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 dump_line(realobj, i, limit);
1731 }
1732}
1733
Pekka Enberg343e0d72006-02-01 03:05:50 -08001734static void check_poison_obj(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735{
1736 char *realobj;
1737 int size, i;
1738 int lines = 0;
1739
Joonsoo Kim40b44132016-03-15 14:54:21 -07001740 if (is_debug_pagealloc_cache(cachep))
1741 return;
1742
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001743 realobj = (char *)objp + obj_offset(cachep);
Christoph Lameter8c138bc2012-06-13 10:24:58 -05001744 size = cachep->object_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001746 for (i = 0; i < size; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 char exp = POISON_FREE;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001748 if (i == size - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 exp = POISON_END;
1750 if (realobj[i] != exp) {
1751 int limit;
1752 /* Mismatch ! */
1753 /* Print header */
1754 if (lines == 0) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001755 printk(KERN_ERR
Dave Jonesface37f2011-11-15 15:03:52 -08001756 "Slab corruption (%s): %s start=%p, len=%d\n",
1757 print_tainted(), cachep->name, realobj, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 print_objinfo(cachep, objp, 0);
1759 }
1760 /* Hexdump the affected line */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001761 i = (i / 16) * 16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 limit = 16;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001763 if (i + limit > size)
1764 limit = size - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 dump_line(realobj, i, limit);
1766 i += 16;
1767 lines++;
1768 /* Limit to 5 lines */
1769 if (lines > 5)
1770 break;
1771 }
1772 }
1773 if (lines != 0) {
1774 /* Print some data about the neighboring objects, if they
1775 * exist:
1776 */
Joonsoo Kim8456a642013-10-24 10:07:49 +09001777 struct page *page = virt_to_head_page(objp);
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001778 unsigned int objnr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
Joonsoo Kim8456a642013-10-24 10:07:49 +09001780 objnr = obj_to_index(cachep, page, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 if (objnr) {
Joonsoo Kim8456a642013-10-24 10:07:49 +09001782 objp = index_to_obj(cachep, page, objnr - 1);
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001783 realobj = (char *)objp + obj_offset(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001785 realobj, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 print_objinfo(cachep, objp, 2);
1787 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001788 if (objnr + 1 < cachep->num) {
Joonsoo Kim8456a642013-10-24 10:07:49 +09001789 objp = index_to_obj(cachep, page, objnr + 1);
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001790 realobj = (char *)objp + obj_offset(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 printk(KERN_ERR "Next obj: start=%p, len=%d\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001792 realobj, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 print_objinfo(cachep, objp, 2);
1794 }
1795 }
1796}
1797#endif
1798
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799#if DEBUG
Joonsoo Kim8456a642013-10-24 10:07:49 +09001800static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1801 struct page *page)
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001802{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 int i;
1804 for (i = 0; i < cachep->num; i++) {
Joonsoo Kim8456a642013-10-24 10:07:49 +09001805 void *objp = index_to_obj(cachep, page, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806
1807 if (cachep->flags & SLAB_POISON) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 check_poison_obj(cachep, objp);
Joonsoo Kim40b44132016-03-15 14:54:21 -07001809 slab_kernel_map(cachep, objp, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 }
1811 if (cachep->flags & SLAB_RED_ZONE) {
1812 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1813 slab_error(cachep, "start of a freed object "
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001814 "was overwritten");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1816 slab_error(cachep, "end of a freed object "
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001817 "was overwritten");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 }
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001820}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821#else
Joonsoo Kim8456a642013-10-24 10:07:49 +09001822static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1823 struct page *page)
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001824{
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001825}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826#endif
1827
Randy Dunlap911851e2006-03-22 00:08:14 -08001828/**
1829 * slab_destroy - destroy and release all objects in a slab
1830 * @cachep: cache pointer being destroyed
Masanari Iidacb8ee1a2014-01-28 02:57:08 +09001831 * @page: page pointer being destroyed
Randy Dunlap911851e2006-03-22 00:08:14 -08001832 *
Wang Sheng-Hui8a7d9b42014-08-06 16:04:46 -07001833 * Destroy all the objs in a slab page, and release the mem back to the system.
1834 * Before calling the slab page must have been unlinked from the cache. The
1835 * kmem_cache_node ->list_lock is not held/needed.
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001836 */
Joonsoo Kim8456a642013-10-24 10:07:49 +09001837static void slab_destroy(struct kmem_cache *cachep, struct page *page)
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001838{
Joonsoo Kim7e007352013-10-30 19:04:01 +09001839 void *freelist;
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001840
Joonsoo Kim8456a642013-10-24 10:07:49 +09001841 freelist = page->freelist;
1842 slab_destroy_debugcheck(cachep, page);
Kirill A. Shutemovbc4f6102015-11-06 16:29:44 -08001843 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
1844 call_rcu(&page->rcu_head, kmem_rcu_free);
1845 else
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09001846 kmem_freepages(cachep, page);
Joonsoo Kim68126702013-10-24 10:07:42 +09001847
1848 /*
Joonsoo Kim8456a642013-10-24 10:07:49 +09001849 * From now on, we don't use freelist
Joonsoo Kim68126702013-10-24 10:07:42 +09001850 * although actual page can be freed in rcu context
1851 */
1852 if (OFF_SLAB(cachep))
Joonsoo Kim8456a642013-10-24 10:07:49 +09001853 kmem_cache_free(cachep->freelist_cache, freelist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854}
1855
Joonsoo Kim97654df2014-08-06 16:04:25 -07001856static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1857{
1858 struct page *page, *n;
1859
1860 list_for_each_entry_safe(page, n, list, lru) {
1861 list_del(&page->lru);
1862 slab_destroy(cachep, page);
1863 }
1864}
1865
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866/**
Randy.Dunlapa70773d2006-02-01 03:05:52 -08001867 * calculate_slab_order - calculate size (page order) of slabs
1868 * @cachep: pointer to the cache that is being created
1869 * @size: size of objects to be created in this cache.
Randy.Dunlapa70773d2006-02-01 03:05:52 -08001870 * @flags: slab allocation flags
1871 *
1872 * Also calculates the number of objects per slab.
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001873 *
1874 * This could be made much more intelligent. For now, try to avoid using
1875 * high order pages for slabs. When the gfp() functions are more friendly
1876 * towards high-order requests, this should be changed.
1877 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08001878static size_t calculate_slab_order(struct kmem_cache *cachep,
Joonsoo Kim2e6b3602016-03-15 14:54:30 -07001879 size_t size, unsigned long flags)
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001880{
1881 size_t left_over = 0;
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001882 int gfporder;
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001883
Christoph Lameter0aa817f2007-05-16 22:11:01 -07001884 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001885 unsigned int num;
1886 size_t remainder;
1887
Joonsoo Kim2e6b3602016-03-15 14:54:30 -07001888 cache_estimate(gfporder, size, flags, &remainder, &num);
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001889 if (!num)
1890 continue;
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001891
Joonsoo Kimf315e3f2013-12-02 17:49:41 +09001892 /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
1893 if (num > SLAB_OBJ_MAX_NUM)
1894 break;
1895
Ingo Molnarb1ab41c2006-06-02 15:44:58 +02001896 if (flags & CFLGS_OFF_SLAB) {
Joonsoo Kim3217fd92016-03-15 14:54:41 -07001897 struct kmem_cache *freelist_cache;
1898 size_t freelist_size;
Ingo Molnarb1ab41c2006-06-02 15:44:58 +02001899
Joonsoo Kim3217fd92016-03-15 14:54:41 -07001900 freelist_size = num * sizeof(freelist_idx_t);
1901 freelist_cache = kmalloc_slab(freelist_size, 0u);
1902 if (!freelist_cache)
1903 continue;
1904
1905 /*
1906 * Needed to avoid possible looping condition
1907 * in cache_grow()
1908 */
1909 if (OFF_SLAB(freelist_cache))
1910 continue;
1911
1912 /* check if off slab has enough benefit */
1913 if (freelist_cache->size > cachep->size / 2)
1914 continue;
Ingo Molnarb1ab41c2006-06-02 15:44:58 +02001915 }
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001916
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001917 /* Found something acceptable - save it away */
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001918 cachep->num = num;
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001919 cachep->gfporder = gfporder;
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001920 left_over = remainder;
1921
1922 /*
Linus Torvaldsf78bb8a2006-03-08 10:33:05 -08001923 * A VFS-reclaimable slab tends to have most allocations
1924 * as GFP_NOFS and we really don't want to have to be allocating
1925 * higher-order pages when we are unable to shrink dcache.
1926 */
1927 if (flags & SLAB_RECLAIM_ACCOUNT)
1928 break;
1929
1930 /*
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001931 * Large number of objects is good, but very large slabs are
1932 * currently bad for the gfp()s.
1933 */
David Rientjes543585c2011-10-18 22:09:24 -07001934 if (gfporder >= slab_max_order)
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001935 break;
1936
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001937 /*
1938 * Acceptable internal fragmentation?
1939 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08001940 if (left_over * 8 <= (PAGE_SIZE << gfporder))
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001941 break;
1942 }
1943 return left_over;
1944}
1945
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001946static struct array_cache __percpu *alloc_kmem_cache_cpus(
1947 struct kmem_cache *cachep, int entries, int batchcount)
1948{
1949 int cpu;
1950 size_t size;
1951 struct array_cache __percpu *cpu_cache;
1952
1953 size = sizeof(void *) * entries + sizeof(struct array_cache);
Joonsoo Kim85c9f4b2014-10-13 15:51:01 -07001954 cpu_cache = __alloc_percpu(size, sizeof(void *));
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001955
1956 if (!cpu_cache)
1957 return NULL;
1958
1959 for_each_possible_cpu(cpu) {
1960 init_arraycache(per_cpu_ptr(cpu_cache, cpu),
1961 entries, batchcount);
1962 }
1963
1964 return cpu_cache;
1965}
1966
Pekka Enberg83b519e2009-06-10 19:40:04 +03001967static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08001968{
Christoph Lameter97d06602012-07-06 15:25:11 -05001969 if (slab_state >= FULL)
Pekka Enberg83b519e2009-06-10 19:40:04 +03001970 return enable_cpucache(cachep, gfp);
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07001971
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001972 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
1973 if (!cachep->cpu_cache)
1974 return 1;
1975
Christoph Lameter97d06602012-07-06 15:25:11 -05001976 if (slab_state == DOWN) {
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001977 /* Creation of first cache (kmem_cache). */
1978 set_up_node(kmem_cache, CACHE_CACHE);
Christoph Lameter2f9baa92012-11-28 16:23:09 +00001979 } else if (slab_state == PARTIAL) {
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001980 /* For kmem_cache_node */
1981 set_up_node(cachep, SIZE_NODE);
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08001982 } else {
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001983 int node;
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08001984
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001985 for_each_online_node(node) {
1986 cachep->node[node] = kmalloc_node(
1987 sizeof(struct kmem_cache_node), gfp, node);
1988 BUG_ON(!cachep->node[node]);
1989 kmem_cache_node_init(cachep->node[node]);
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08001990 }
1991 }
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001992
Christoph Lameter6a673682013-01-10 19:14:19 +00001993 cachep->node[numa_mem_id()]->next_reap =
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08001994 jiffies + REAPTIMEOUT_NODE +
1995 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08001996
1997 cpu_cache_get(cachep)->avail = 0;
1998 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1999 cpu_cache_get(cachep)->batchcount = 1;
2000 cpu_cache_get(cachep)->touched = 0;
2001 cachep->batchcount = 1;
2002 cachep->limit = BOOT_CPUCACHE_ENTRIES;
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07002003 return 0;
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08002004}
2005
Joonsoo Kim12220de2014-10-09 15:26:24 -07002006unsigned long kmem_cache_flags(unsigned long object_size,
2007 unsigned long flags, const char *name,
2008 void (*ctor)(void *))
2009{
2010 return flags;
2011}
2012
2013struct kmem_cache *
2014__kmem_cache_alias(const char *name, size_t size, size_t align,
2015 unsigned long flags, void (*ctor)(void *))
2016{
2017 struct kmem_cache *cachep;
2018
2019 cachep = find_mergeable(size, align, flags, name, ctor);
2020 if (cachep) {
2021 cachep->refcount++;
2022
2023 /*
2024 * Adjust the object sizes so that we clear
2025 * the complete object on kzalloc.
2026 */
2027 cachep->object_size = max_t(int, cachep->object_size, size);
2028 }
2029 return cachep;
2030}
2031
Joonsoo Kim158e3192016-03-15 14:54:35 -07002032static bool set_off_slab_cache(struct kmem_cache *cachep,
2033 size_t size, unsigned long flags)
2034{
2035 size_t left;
2036
2037 cachep->num = 0;
2038
2039 /*
Joonsoo Kim3217fd92016-03-15 14:54:41 -07002040 * Always use on-slab management when SLAB_NOLEAKTRACE
2041 * to avoid recursive calls into kmemleak.
Joonsoo Kim158e3192016-03-15 14:54:35 -07002042 */
Joonsoo Kim158e3192016-03-15 14:54:35 -07002043 if (flags & SLAB_NOLEAKTRACE)
2044 return false;
2045
2046 /*
2047 * Size is large, assume best to place the slab management obj
2048 * off-slab (should allow better packing of objs).
2049 */
2050 left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
2051 if (!cachep->num)
2052 return false;
2053
2054 /*
2055 * If the slab has been placed off-slab, and we have enough space then
2056 * move it on-slab. This is at the expense of any extra colouring.
2057 */
2058 if (left >= cachep->num * sizeof(freelist_idx_t))
2059 return false;
2060
2061 cachep->colour = left / cachep->colour_off;
2062
2063 return true;
2064}
2065
2066static bool set_on_slab_cache(struct kmem_cache *cachep,
2067 size_t size, unsigned long flags)
2068{
2069 size_t left;
2070
2071 cachep->num = 0;
2072
2073 left = calculate_slab_order(cachep, size, flags);
2074 if (!cachep->num)
2075 return false;
2076
2077 cachep->colour = left / cachep->colour_off;
2078
2079 return true;
2080}
2081
Pekka Enberg4d268eb2006-01-08 01:00:36 -08002082/**
Christoph Lameter039363f2012-07-06 15:25:10 -05002083 * __kmem_cache_create - Create a cache.
Randy Dunlapa755b762012-11-06 17:10:10 -08002084 * @cachep: cache management descriptor
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 * @flags: SLAB flags
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 *
2087 * Returns a ptr to the cache on success, NULL on failure.
2088 * Cannot be called within a int, but can be interrupted.
Paul Mundt20c2df82007-07-20 10:11:58 +09002089 * The @ctor is run when new pages are allocated by the cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 * The flags are
2092 *
2093 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2094 * to catch references to uninitialised memory.
2095 *
2096 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2097 * for buffer overruns.
2098 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2100 * cacheline. This can be beneficial if you're counting cycles as closely
2101 * as davem.
2102 */
Christoph Lameter278b1bb2012-09-05 00:20:34 +00002103int
Christoph Lameter8a13a4c2012-09-04 23:18:33 +00002104__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105{
David Rientjesd4a5fca2014-09-25 16:05:20 -07002106 size_t ralign = BYTES_PER_WORD;
Pekka Enberg83b519e2009-06-10 19:40:04 +03002107 gfp_t gfp;
Christoph Lameter278b1bb2012-09-05 00:20:34 +00002108 int err;
Christoph Lameter8a13a4c2012-09-04 23:18:33 +00002109 size_t size = cachep->size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111#if DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112#if FORCED_DEBUG
2113 /*
2114 * Enable redzoning and last user accounting, except for caches with
2115 * large objects, if the increased size would increase the object size
2116 * above the next power of two: caches with object sizes just above a
2117 * power of two have a significant amount of internal fragmentation.
2118 */
David Woodhouse87a927c2007-07-04 21:26:44 -04002119 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2120 2 * sizeof(unsigned long long)))
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002121 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 if (!(flags & SLAB_DESTROY_BY_RCU))
2123 flags |= SLAB_POISON;
2124#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126
Andrew Mortona737b3e2006-03-22 00:08:11 -08002127 /*
2128 * Check that size is in terms of words. This is needed to avoid
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 * unaligned accesses for some archs when redzoning is used, and makes
2130 * sure any on-slab bufctl's are also correctly aligned.
2131 */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002132 if (size & (BYTES_PER_WORD - 1)) {
2133 size += (BYTES_PER_WORD - 1);
2134 size &= ~(BYTES_PER_WORD - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 }
2136
David Woodhouse87a927c2007-07-04 21:26:44 -04002137 if (flags & SLAB_RED_ZONE) {
2138 ralign = REDZONE_ALIGN;
2139 /* If redzoning, ensure that the second redzone is suitably
2140 * aligned, by adjusting the object size accordingly. */
2141 size += REDZONE_ALIGN - 1;
2142 size &= ~(REDZONE_ALIGN - 1);
2143 }
Pekka Enbergca5f9702006-09-25 23:31:25 -07002144
Kevin Hilmana44b56d2006-12-06 20:32:11 -08002145 /* 3) caller mandated alignment */
Christoph Lameter8a13a4c2012-09-04 23:18:33 +00002146 if (ralign < cachep->align) {
2147 ralign = cachep->align;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 }
Pekka Enberg3ff84a72011-02-14 17:46:21 +02002149 /* disable debug if necessary */
2150 if (ralign > __alignof__(unsigned long long))
Kevin Hilmana44b56d2006-12-06 20:32:11 -08002151 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002152 /*
Pekka Enbergca5f9702006-09-25 23:31:25 -07002153 * 4) Store it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 */
Christoph Lameter8a13a4c2012-09-04 23:18:33 +00002155 cachep->align = ralign;
Joonsoo Kim158e3192016-03-15 14:54:35 -07002156 cachep->colour_off = cache_line_size();
2157 /* Offset must be a multiple of the alignment. */
2158 if (cachep->colour_off < cachep->align)
2159 cachep->colour_off = cachep->align;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160
Pekka Enberg83b519e2009-06-10 19:40:04 +03002161 if (slab_is_available())
2162 gfp = GFP_KERNEL;
2163 else
2164 gfp = GFP_NOWAIT;
2165
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166#if DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
Pekka Enbergca5f9702006-09-25 23:31:25 -07002168 /*
2169 * Both debugging options require word-alignment which is calculated
2170 * into align above.
2171 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 if (flags & SLAB_RED_ZONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 /* add space for red zone words */
Pekka Enberg3ff84a72011-02-14 17:46:21 +02002174 cachep->obj_offset += sizeof(unsigned long long);
2175 size += 2 * sizeof(unsigned long long);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 }
2177 if (flags & SLAB_STORE_USER) {
Pekka Enbergca5f9702006-09-25 23:31:25 -07002178 /* user store requires one word storage behind the end of
David Woodhouse87a927c2007-07-04 21:26:44 -04002179 * the real object. But if the second red zone needs to be
2180 * aligned to 64 bits, we must allow that much space.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 */
David Woodhouse87a927c2007-07-04 21:26:44 -04002182 if (flags & SLAB_RED_ZONE)
2183 size += REDZONE_ALIGN;
2184 else
2185 size += BYTES_PER_WORD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 }
Joonsoo Kim832a15d2016-03-15 14:54:33 -07002187#endif
2188
2189 size = ALIGN(size, cachep->align);
2190 /*
2191 * We should restrict the number of objects in a slab to implement
2192 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2193 */
2194 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2195 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2196
2197#if DEBUG
Joonsoo Kim03a2d2a2015-10-01 15:36:54 -07002198 /*
2199 * To activate debug pagealloc, off-slab management is necessary
2200 * requirement. In early phase of initialization, small sized slab
2201 * doesn't get initialized so it would not be possible. So, we need
2202 * to check size >= 256. It guarantees that all necessary small
2203 * sized slab is initialized in current slab initialization sequence.
2204 */
Joonsoo Kim40323272016-03-15 14:54:18 -07002205 if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
Joonsoo Kimf3a3c322016-03-15 14:54:38 -07002206 size >= 256 && cachep->object_size > cache_line_size()) {
2207 if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
2208 size_t tmp_size = ALIGN(size, PAGE_SIZE);
2209
2210 if (set_off_slab_cache(cachep, tmp_size, flags)) {
2211 flags |= CFLGS_OFF_SLAB;
2212 cachep->obj_offset += tmp_size - size;
2213 size = tmp_size;
2214 goto done;
2215 }
2216 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 }
2218#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
Joonsoo Kim158e3192016-03-15 14:54:35 -07002220 if (set_off_slab_cache(cachep, size, flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 flags |= CFLGS_OFF_SLAB;
Joonsoo Kim158e3192016-03-15 14:54:35 -07002222 goto done;
Joonsoo Kim832a15d2016-03-15 14:54:33 -07002223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
Joonsoo Kim158e3192016-03-15 14:54:35 -07002225 if (set_on_slab_cache(cachep, size, flags))
2226 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
Joonsoo Kim158e3192016-03-15 14:54:35 -07002228 return -E2BIG;
Christoph Lameter8a13a4c2012-09-04 23:18:33 +00002229
Joonsoo Kim158e3192016-03-15 14:54:35 -07002230done:
2231 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 cachep->flags = flags;
Joonsoo Kima57a4982013-10-24 10:07:44 +09002233 cachep->allocflags = __GFP_COMP;
Christoph Lameter4b51d662007-02-10 01:43:10 -08002234 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
Glauber Costaa618e892012-06-14 16:17:21 +04002235 cachep->allocflags |= GFP_DMA;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002236 cachep->size = size;
Eric Dumazet6a2d7a92006-12-13 00:34:27 -08002237 cachep->reciprocal_buffer_size = reciprocal_value(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238
Joonsoo Kim40b44132016-03-15 14:54:21 -07002239#if DEBUG
2240 /*
2241 * If we're going to use the generic kernel_map_pages()
2242 * poisoning, then it's going to smash the contents of
2243 * the redzone and userword anyhow, so switch them off.
2244 */
2245 if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
2246 (cachep->flags & SLAB_POISON) &&
2247 is_debug_pagealloc_cache(cachep))
2248 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2249#endif
2250
2251 if (OFF_SLAB(cachep)) {
Joonsoo Kim158e3192016-03-15 14:54:35 -07002252 cachep->freelist_cache =
2253 kmalloc_slab(cachep->freelist_size, 0u);
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07002254 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255
Christoph Lameter278b1bb2012-09-05 00:20:34 +00002256 err = setup_cpu_cache(cachep, gfp);
2257 if (err) {
Dmitry Safonov52b4b952016-02-17 13:11:37 -08002258 __kmem_cache_release(cachep);
Christoph Lameter278b1bb2012-09-05 00:20:34 +00002259 return err;
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07002260 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
Christoph Lameter278b1bb2012-09-05 00:20:34 +00002262 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264
2265#if DEBUG
2266static void check_irq_off(void)
2267{
2268 BUG_ON(!irqs_disabled());
2269}
2270
2271static void check_irq_on(void)
2272{
2273 BUG_ON(irqs_disabled());
2274}
2275
Pekka Enberg343e0d72006-02-01 03:05:50 -08002276static void check_spinlock_acquired(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277{
2278#ifdef CONFIG_SMP
2279 check_irq_off();
Christoph Lameter18bf8542014-08-06 16:04:11 -07002280 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281#endif
2282}
Christoph Lametere498be72005-09-09 13:03:32 -07002283
Pekka Enberg343e0d72006-02-01 03:05:50 -08002284static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
Christoph Lametere498be72005-09-09 13:03:32 -07002285{
2286#ifdef CONFIG_SMP
2287 check_irq_off();
Christoph Lameter18bf8542014-08-06 16:04:11 -07002288 assert_spin_locked(&get_node(cachep, node)->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07002289#endif
2290}
2291
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292#else
2293#define check_irq_off() do { } while(0)
2294#define check_irq_on() do { } while(0)
2295#define check_spinlock_acquired(x) do { } while(0)
Christoph Lametere498be72005-09-09 13:03:32 -07002296#define check_spinlock_acquired_node(x, y) do { } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297#endif
2298
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002299static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
Christoph Lameteraab22072006-03-22 00:09:06 -08002300 struct array_cache *ac,
2301 int force, int node);
2302
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303static void do_drain(void *arg)
2304{
Andrew Mortona737b3e2006-03-22 00:08:11 -08002305 struct kmem_cache *cachep = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 struct array_cache *ac;
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07002307 int node = numa_mem_id();
Christoph Lameter18bf8542014-08-06 16:04:11 -07002308 struct kmem_cache_node *n;
Joonsoo Kim97654df2014-08-06 16:04:25 -07002309 LIST_HEAD(list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310
2311 check_irq_off();
Pekka Enberg9a2dba42006-02-01 03:05:49 -08002312 ac = cpu_cache_get(cachep);
Christoph Lameter18bf8542014-08-06 16:04:11 -07002313 n = get_node(cachep, node);
2314 spin_lock(&n->list_lock);
Joonsoo Kim97654df2014-08-06 16:04:25 -07002315 free_block(cachep, ac->entry, ac->avail, node, &list);
Christoph Lameter18bf8542014-08-06 16:04:11 -07002316 spin_unlock(&n->list_lock);
Joonsoo Kim97654df2014-08-06 16:04:25 -07002317 slabs_destroy(cachep, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 ac->avail = 0;
2319}
2320
Pekka Enberg343e0d72006-02-01 03:05:50 -08002321static void drain_cpu_caches(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322{
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002323 struct kmem_cache_node *n;
Christoph Lametere498be72005-09-09 13:03:32 -07002324 int node;
2325
Jens Axboe15c8b6c2008-05-09 09:39:44 +02002326 on_each_cpu(do_drain, cachep, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 check_irq_on();
Christoph Lameter18bf8542014-08-06 16:04:11 -07002328 for_each_kmem_cache_node(cachep, node, n)
2329 if (n->alien)
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002330 drain_alien_cache(cachep, n->alien);
Roland Dreiera4523a82006-05-15 11:41:00 -07002331
Christoph Lameter18bf8542014-08-06 16:04:11 -07002332 for_each_kmem_cache_node(cachep, node, n)
2333 drain_array(cachep, n, n->shared, 1, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334}
2335
Christoph Lametered11d9e2006-06-30 01:55:45 -07002336/*
2337 * Remove slabs from the list of free slabs.
2338 * Specify the number of slabs to drain in tofree.
2339 *
2340 * Returns the actual number of slabs released.
2341 */
2342static int drain_freelist(struct kmem_cache *cache,
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002343 struct kmem_cache_node *n, int tofree)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344{
Christoph Lametered11d9e2006-06-30 01:55:45 -07002345 struct list_head *p;
2346 int nr_freed;
Joonsoo Kim8456a642013-10-24 10:07:49 +09002347 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348
Christoph Lametered11d9e2006-06-30 01:55:45 -07002349 nr_freed = 0;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002350 while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002352 spin_lock_irq(&n->list_lock);
2353 p = n->slabs_free.prev;
2354 if (p == &n->slabs_free) {
2355 spin_unlock_irq(&n->list_lock);
Christoph Lametered11d9e2006-06-30 01:55:45 -07002356 goto out;
2357 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358
Joonsoo Kim8456a642013-10-24 10:07:49 +09002359 page = list_entry(p, struct page, lru);
Joonsoo Kim8456a642013-10-24 10:07:49 +09002360 list_del(&page->lru);
Christoph Lametered11d9e2006-06-30 01:55:45 -07002361 /*
2362 * Safe to drop the lock. The slab is no longer linked
2363 * to the cache.
2364 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002365 n->free_objects -= cache->num;
2366 spin_unlock_irq(&n->list_lock);
Joonsoo Kim8456a642013-10-24 10:07:49 +09002367 slab_destroy(cache, page);
Christoph Lametered11d9e2006-06-30 01:55:45 -07002368 nr_freed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 }
Christoph Lametered11d9e2006-06-30 01:55:45 -07002370out:
2371 return nr_freed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372}
2373
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -08002374int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
Christoph Lametere498be72005-09-09 13:03:32 -07002375{
Christoph Lameter18bf8542014-08-06 16:04:11 -07002376 int ret = 0;
2377 int node;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002378 struct kmem_cache_node *n;
Christoph Lametere498be72005-09-09 13:03:32 -07002379
2380 drain_cpu_caches(cachep);
2381
2382 check_irq_on();
Christoph Lameter18bf8542014-08-06 16:04:11 -07002383 for_each_kmem_cache_node(cachep, node, n) {
Wanpeng Li0fa81032013-07-04 08:33:22 +08002384 drain_freelist(cachep, n, slabs_tofree(cachep, n));
Christoph Lametered11d9e2006-06-30 01:55:45 -07002385
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002386 ret += !list_empty(&n->slabs_full) ||
2387 !list_empty(&n->slabs_partial);
Christoph Lametere498be72005-09-09 13:03:32 -07002388 }
2389 return (ret ? 1 : 0);
2390}
2391
Christoph Lameter945cf2b2012-09-04 23:18:33 +00002392int __kmem_cache_shutdown(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393{
Dmitry Safonov52b4b952016-02-17 13:11:37 -08002394 return __kmem_cache_shrink(cachep, false);
2395}
2396
2397void __kmem_cache_release(struct kmem_cache *cachep)
2398{
Christoph Lameter12c36672012-09-04 23:38:33 +00002399 int i;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002400 struct kmem_cache_node *n;
Christoph Lameter12c36672012-09-04 23:38:33 +00002401
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07002402 free_percpu(cachep->cpu_cache);
Christoph Lameter12c36672012-09-04 23:38:33 +00002403
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002404 /* NUMA: free the node structures */
Christoph Lameter18bf8542014-08-06 16:04:11 -07002405 for_each_kmem_cache_node(cachep, i, n) {
2406 kfree(n->shared);
2407 free_alien_cache(n->alien);
2408 kfree(n);
2409 cachep->node[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07002413/*
2414 * Get the memory for a slab management obj.
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08002415 *
2416 * For a slab cache when the slab descriptor is off-slab, the
2417 * slab descriptor can't come from the same cache which is being created,
2418 * Because if it is the case, that means we defer the creation of
2419 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
2420 * And we eventually call down to __kmem_cache_create(), which
2421 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
2422 * This is a "chicken-and-egg" problem.
2423 *
2424 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2425 * which are all initialized during kmem_cache_init().
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07002426 */
Joonsoo Kim7e007352013-10-30 19:04:01 +09002427static void *alloc_slabmgmt(struct kmem_cache *cachep,
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09002428 struct page *page, int colour_off,
2429 gfp_t local_flags, int nodeid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430{
Joonsoo Kim7e007352013-10-30 19:04:01 +09002431 void *freelist;
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09002432 void *addr = page_address(page);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002433
Joonsoo Kim2e6b3602016-03-15 14:54:30 -07002434 page->s_mem = addr + colour_off;
2435 page->active = 0;
2436
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 if (OFF_SLAB(cachep)) {
2438 /* Slab management obj is off-slab. */
Joonsoo Kim8456a642013-10-24 10:07:49 +09002439 freelist = kmem_cache_alloc_node(cachep->freelist_cache,
Pekka Enberg8759ec52008-11-26 10:01:31 +02002440 local_flags, nodeid);
Joonsoo Kim8456a642013-10-24 10:07:49 +09002441 if (!freelist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 return NULL;
2443 } else {
Joonsoo Kim2e6b3602016-03-15 14:54:30 -07002444 /* We will use last bytes at the slab for freelist */
2445 freelist = addr + (PAGE_SIZE << cachep->gfporder) -
2446 cachep->freelist_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 }
Joonsoo Kim2e6b3602016-03-15 14:54:30 -07002448
Joonsoo Kim8456a642013-10-24 10:07:49 +09002449 return freelist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450}
2451
Joonsoo Kim7cc689732014-04-18 16:24:09 +09002452static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453{
Joonsoo Kima41adfa2013-12-02 17:49:42 +09002454 return ((freelist_idx_t *)page->freelist)[idx];
Joonsoo Kime5c58df2013-12-02 17:49:40 +09002455}
2456
2457static inline void set_free_obj(struct page *page,
Joonsoo Kim7cc689732014-04-18 16:24:09 +09002458 unsigned int idx, freelist_idx_t val)
Joonsoo Kime5c58df2013-12-02 17:49:40 +09002459{
Joonsoo Kima41adfa2013-12-02 17:49:42 +09002460 ((freelist_idx_t *)(page->freelist))[idx] = val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461}
2462
Joonsoo Kim10b2e9e2016-03-15 14:54:47 -07002463static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464{
Joonsoo Kim10b2e9e2016-03-15 14:54:47 -07002465#if DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466 int i;
2467
2468 for (i = 0; i < cachep->num; i++) {
Joonsoo Kim8456a642013-10-24 10:07:49 +09002469 void *objp = index_to_obj(cachep, page, i);
Joonsoo Kim10b2e9e2016-03-15 14:54:47 -07002470
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471 if (cachep->flags & SLAB_STORE_USER)
2472 *dbg_userword(cachep, objp) = NULL;
2473
2474 if (cachep->flags & SLAB_RED_ZONE) {
2475 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2476 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2477 }
2478 /*
Andrew Mortona737b3e2006-03-22 00:08:11 -08002479 * Constructors are not allowed to allocate memory from the same
2480 * cache which they are a constructor for. Otherwise, deadlock.
2481 * They must also be threaded.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 */
2483 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07002484 cachep->ctor(objp + obj_offset(cachep));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485
2486 if (cachep->flags & SLAB_RED_ZONE) {
2487 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2488 slab_error(cachep, "constructor overwrote the"
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002489 " end of an object");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2491 slab_error(cachep, "constructor overwrote the"
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002492 " start of an object");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 }
Joonsoo Kim40b44132016-03-15 14:54:21 -07002494 /* need to poison the objs? */
2495 if (cachep->flags & SLAB_POISON) {
2496 poison_obj(cachep, objp, POISON_FREE);
2497 slab_kernel_map(cachep, objp, 0, 0);
2498 }
Joonsoo Kim10b2e9e2016-03-15 14:54:47 -07002499 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500#endif
Joonsoo Kim10b2e9e2016-03-15 14:54:47 -07002501}
2502
2503static void cache_init_objs(struct kmem_cache *cachep,
2504 struct page *page)
2505{
2506 int i;
2507
2508 cache_init_objs_debug(cachep, page);
2509
2510 for (i = 0; i < cachep->num; i++) {
2511 /* constructor could break poison info */
2512 if (DEBUG == 0 && cachep->ctor)
2513 cachep->ctor(index_to_obj(cachep, page, i));
2514
Joonsoo Kime5c58df2013-12-02 17:49:40 +09002515 set_free_obj(page, i, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517}
2518
Pekka Enberg343e0d72006-02-01 03:05:50 -08002519static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520{
Christoph Lameter4b51d662007-02-10 01:43:10 -08002521 if (CONFIG_ZONE_DMA_FLAG) {
2522 if (flags & GFP_DMA)
Glauber Costaa618e892012-06-14 16:17:21 +04002523 BUG_ON(!(cachep->allocflags & GFP_DMA));
Christoph Lameter4b51d662007-02-10 01:43:10 -08002524 else
Glauber Costaa618e892012-06-14 16:17:21 +04002525 BUG_ON(cachep->allocflags & GFP_DMA);
Christoph Lameter4b51d662007-02-10 01:43:10 -08002526 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527}
2528
Joonsoo Kim260b61d2016-03-15 14:54:12 -07002529static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
Matthew Dobson78d382d2006-02-01 03:05:47 -08002530{
Joonsoo Kimb1cb0982013-10-24 10:07:45 +09002531 void *objp;
Matthew Dobson78d382d2006-02-01 03:05:47 -08002532
Joonsoo Kime5c58df2013-12-02 17:49:40 +09002533 objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
Joonsoo Kim8456a642013-10-24 10:07:49 +09002534 page->active++;
Matthew Dobson78d382d2006-02-01 03:05:47 -08002535
Joonsoo Kimd31676d2016-03-15 14:54:24 -07002536#if DEBUG
2537 if (cachep->flags & SLAB_STORE_USER)
2538 set_store_user_dirty(cachep);
2539#endif
2540
Matthew Dobson78d382d2006-02-01 03:05:47 -08002541 return objp;
2542}
2543
Joonsoo Kim260b61d2016-03-15 14:54:12 -07002544static void slab_put_obj(struct kmem_cache *cachep,
2545 struct page *page, void *objp)
Matthew Dobson78d382d2006-02-01 03:05:47 -08002546{
Joonsoo Kim8456a642013-10-24 10:07:49 +09002547 unsigned int objnr = obj_to_index(cachep, page, objp);
Matthew Dobson78d382d2006-02-01 03:05:47 -08002548#if DEBUG
Joonsoo Kim16025172013-10-24 10:07:46 +09002549 unsigned int i;
Matthew Dobson78d382d2006-02-01 03:05:47 -08002550
Joonsoo Kimb1cb0982013-10-24 10:07:45 +09002551 /* Verify double free bug */
Joonsoo Kim8456a642013-10-24 10:07:49 +09002552 for (i = page->active; i < cachep->num; i++) {
Joonsoo Kime5c58df2013-12-02 17:49:40 +09002553 if (get_free_obj(page, i) == objnr) {
Joonsoo Kimb1cb0982013-10-24 10:07:45 +09002554 printk(KERN_ERR "slab: double free detected in cache "
2555 "'%s', objp %p\n", cachep->name, objp);
2556 BUG();
2557 }
Matthew Dobson78d382d2006-02-01 03:05:47 -08002558 }
2559#endif
Joonsoo Kim8456a642013-10-24 10:07:49 +09002560 page->active--;
Joonsoo Kime5c58df2013-12-02 17:49:40 +09002561 set_free_obj(page, page->active, objnr);
Matthew Dobson78d382d2006-02-01 03:05:47 -08002562}
2563
Pekka Enberg47768742006-06-23 02:03:07 -07002564/*
2565 * Map pages beginning at addr to the given cache and slab. This is required
2566 * for the slab allocator to be able to lookup the cache and slab of a
Nick Pigginccd35fb2011-01-07 17:49:17 +11002567 * virtual address for kfree, ksize, and slab debugging.
Pekka Enberg47768742006-06-23 02:03:07 -07002568 */
Joonsoo Kim8456a642013-10-24 10:07:49 +09002569static void slab_map_pages(struct kmem_cache *cache, struct page *page,
Joonsoo Kim7e007352013-10-30 19:04:01 +09002570 void *freelist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571{
Joonsoo Kima57a4982013-10-24 10:07:44 +09002572 page->slab_cache = cache;
Joonsoo Kim8456a642013-10-24 10:07:49 +09002573 page->freelist = freelist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574}
2575
2576/*
2577 * Grow (by 1) the number of slabs within a cache. This is called by
2578 * kmem_cache_alloc() when there are no active objs left in a cache.
2579 */
Christoph Lameter3c517a62006-12-06 20:33:29 -08002580static int cache_grow(struct kmem_cache *cachep,
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09002581 gfp_t flags, int nodeid, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582{
Joonsoo Kim7e007352013-10-30 19:04:01 +09002583 void *freelist;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002584 size_t offset;
2585 gfp_t local_flags;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002586 struct kmem_cache_node *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587
Andrew Mortona737b3e2006-03-22 00:08:11 -08002588 /*
2589 * Be lazy and only check for valid flags here, keeping it out of the
2590 * critical path in kmem_cache_alloc().
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 */
Andrew Mortonc871ac42014-12-10 15:42:25 -08002592 if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
2593 pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
2594 BUG();
2595 }
Christoph Lameter6cb06222007-10-16 01:25:41 -07002596 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002598 /* Take the node list lock to change the colour_next on this node */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 check_irq_off();
Christoph Lameter18bf8542014-08-06 16:04:11 -07002600 n = get_node(cachep, nodeid);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002601 spin_lock(&n->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602
2603 /* Get colour for the slab, and cal the next value. */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002604 offset = n->colour_next;
2605 n->colour_next++;
2606 if (n->colour_next >= cachep->colour)
2607 n->colour_next = 0;
2608 spin_unlock(&n->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -08002610 offset *= cachep->colour_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611
Mel Gormand0164ad2015-11-06 16:28:21 -08002612 if (gfpflags_allow_blocking(local_flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 local_irq_enable();
2614
2615 /*
2616 * The test for missing atomic flag is performed here, rather than
2617 * the more obvious place, simply to reduce the critical path length
2618 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2619 * will eventually be caught here (where it matters).
2620 */
2621 kmem_flagcheck(cachep, flags);
2622
Andrew Mortona737b3e2006-03-22 00:08:11 -08002623 /*
2624 * Get mem for the objs. Attempt to allocate a physical page from
2625 * 'nodeid'.
Christoph Lametere498be72005-09-09 13:03:32 -07002626 */
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09002627 if (!page)
2628 page = kmem_getpages(cachep, local_flags, nodeid);
2629 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 goto failed;
2631
2632 /* Get slab management. */
Joonsoo Kim8456a642013-10-24 10:07:49 +09002633 freelist = alloc_slabmgmt(cachep, page, offset,
Christoph Lameter6cb06222007-10-16 01:25:41 -07002634 local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
Joonsoo Kim8456a642013-10-24 10:07:49 +09002635 if (!freelist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 goto opps1;
2637
Joonsoo Kim8456a642013-10-24 10:07:49 +09002638 slab_map_pages(cachep, page, freelist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639
Joonsoo Kim8456a642013-10-24 10:07:49 +09002640 cache_init_objs(cachep, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641
Mel Gormand0164ad2015-11-06 16:28:21 -08002642 if (gfpflags_allow_blocking(local_flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 local_irq_disable();
2644 check_irq_off();
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002645 spin_lock(&n->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646
2647 /* Make slab active. */
Joonsoo Kim8456a642013-10-24 10:07:49 +09002648 list_add_tail(&page->lru, &(n->slabs_free));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649 STATS_INC_GROWN(cachep);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002650 n->free_objects += cachep->num;
2651 spin_unlock(&n->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652 return 1;
Andrew Mortona737b3e2006-03-22 00:08:11 -08002653opps1:
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09002654 kmem_freepages(cachep, page);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002655failed:
Mel Gormand0164ad2015-11-06 16:28:21 -08002656 if (gfpflags_allow_blocking(local_flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 local_irq_disable();
2658 return 0;
2659}
2660
2661#if DEBUG
2662
2663/*
2664 * Perform extra freeing checks:
2665 * - detect bad pointers.
2666 * - POISON/RED_ZONE checking
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 */
2668static void kfree_debugcheck(const void *objp)
2669{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670 if (!virt_addr_valid(objp)) {
2671 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002672 (unsigned long)objp);
2673 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675}
2676
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002677static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2678{
David Woodhouseb46b8f12007-05-08 00:22:59 -07002679 unsigned long long redzone1, redzone2;
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002680
2681 redzone1 = *dbg_redzone1(cache, obj);
2682 redzone2 = *dbg_redzone2(cache, obj);
2683
2684 /*
2685 * Redzone is ok.
2686 */
2687 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2688 return;
2689
2690 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2691 slab_error(cache, "double free detected");
2692 else
2693 slab_error(cache, "memory outside object was overwritten");
2694
David Woodhouseb46b8f12007-05-08 00:22:59 -07002695 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002696 obj, redzone1, redzone2);
2697}
2698
Pekka Enberg343e0d72006-02-01 03:05:50 -08002699static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03002700 unsigned long caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 unsigned int objnr;
Joonsoo Kim8456a642013-10-24 10:07:49 +09002703 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704
Matthew Wilcox80cbd912007-11-29 12:05:13 -07002705 BUG_ON(virt_to_cache(objp) != cachep);
2706
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002707 objp -= obj_offset(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 kfree_debugcheck(objp);
Christoph Lameterb49af682007-05-06 14:49:41 -07002709 page = virt_to_head_page(objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 if (cachep->flags & SLAB_RED_ZONE) {
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002712 verify_redzone_free(cachep, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2714 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2715 }
Joonsoo Kimd31676d2016-03-15 14:54:24 -07002716 if (cachep->flags & SLAB_STORE_USER) {
2717 set_store_user_dirty(cachep);
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03002718 *dbg_userword(cachep, objp) = (void *)caller;
Joonsoo Kimd31676d2016-03-15 14:54:24 -07002719 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720
Joonsoo Kim8456a642013-10-24 10:07:49 +09002721 objnr = obj_to_index(cachep, page, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722
2723 BUG_ON(objnr >= cachep->num);
Joonsoo Kim8456a642013-10-24 10:07:49 +09002724 BUG_ON(objp != index_to_obj(cachep, page, objnr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 if (cachep->flags & SLAB_POISON) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 poison_obj(cachep, objp, POISON_FREE);
Joonsoo Kim40b44132016-03-15 14:54:21 -07002728 slab_kernel_map(cachep, objp, 0, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 }
2730 return objp;
2731}
2732
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733#else
2734#define kfree_debugcheck(x) do { } while(0)
2735#define cache_free_debugcheck(x,objp,z) (objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736#endif
2737
Joonsoo Kimd8410232016-03-15 14:54:44 -07002738static inline void fixup_slab_list(struct kmem_cache *cachep,
2739 struct kmem_cache_node *n, struct page *page)
2740{
2741 /* move slabp to correct slabp list: */
2742 list_del(&page->lru);
2743 if (page->active == cachep->num)
2744 list_add(&page->lru, &n->slabs_full);
2745 else
2746 list_add(&page->lru, &n->slabs_partial);
2747}
2748
Geliang Tang7aa0d222016-01-14 15:18:02 -08002749static struct page *get_first_slab(struct kmem_cache_node *n)
2750{
2751 struct page *page;
2752
2753 page = list_first_entry_or_null(&n->slabs_partial,
2754 struct page, lru);
2755 if (!page) {
2756 n->free_touched = 1;
2757 page = list_first_entry_or_null(&n->slabs_free,
2758 struct page, lru);
2759 }
2760
2761 return page;
2762}
2763
Mel Gorman072bb0a2012-07-31 16:43:58 -07002764static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
2765 bool force_refill)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766{
2767 int batchcount;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002768 struct kmem_cache_node *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 struct array_cache *ac;
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07002770 int node;
2771
Joe Korty6d2144d2008-03-05 15:04:59 -08002772 check_irq_off();
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07002773 node = numa_mem_id();
Mel Gorman072bb0a2012-07-31 16:43:58 -07002774 if (unlikely(force_refill))
2775 goto force_grow;
2776retry:
Joe Korty6d2144d2008-03-05 15:04:59 -08002777 ac = cpu_cache_get(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 batchcount = ac->batchcount;
2779 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002780 /*
2781 * If there was little recent activity on this cache, then
2782 * perform only a partial refill. Otherwise we could generate
2783 * refill bouncing.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 */
2785 batchcount = BATCHREFILL_LIMIT;
2786 }
Christoph Lameter18bf8542014-08-06 16:04:11 -07002787 n = get_node(cachep, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002789 BUG_ON(ac->avail > 0 || !n);
2790 spin_lock(&n->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07002791
Christoph Lameter3ded1752006-03-25 03:06:44 -08002792 /* See if we can refill from the shared array */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002793 if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
2794 n->shared->touched = 1;
Christoph Lameter3ded1752006-03-25 03:06:44 -08002795 goto alloc_done;
Nick Piggin44b57f12010-01-27 22:27:40 +11002796 }
Christoph Lameter3ded1752006-03-25 03:06:44 -08002797
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 while (batchcount > 0) {
Joonsoo Kim8456a642013-10-24 10:07:49 +09002799 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 /* Get slab alloc is to come from. */
Geliang Tang7aa0d222016-01-14 15:18:02 -08002801 page = get_first_slab(n);
2802 if (!page)
2803 goto must_grow;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 check_spinlock_acquired(cachep);
Pekka Enberg714b81712007-05-06 14:49:03 -07002806
2807 /*
2808 * The slab was either on partial or free list so
2809 * there must be at least one object available for
2810 * allocation.
2811 */
Joonsoo Kim8456a642013-10-24 10:07:49 +09002812 BUG_ON(page->active >= cachep->num);
Pekka Enberg714b81712007-05-06 14:49:03 -07002813
Joonsoo Kim8456a642013-10-24 10:07:49 +09002814 while (page->active < cachep->num && batchcount--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 STATS_INC_ALLOCED(cachep);
2816 STATS_INC_ACTIVE(cachep);
2817 STATS_SET_HIGH(cachep);
2818
Joonsoo Kim260b61d2016-03-15 14:54:12 -07002819 ac_put_obj(cachep, ac, slab_get_obj(cachep, page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821
Joonsoo Kimd8410232016-03-15 14:54:44 -07002822 fixup_slab_list(cachep, n, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 }
2824
Andrew Mortona737b3e2006-03-22 00:08:11 -08002825must_grow:
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002826 n->free_objects -= ac->avail;
Andrew Mortona737b3e2006-03-22 00:08:11 -08002827alloc_done:
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002828 spin_unlock(&n->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829
2830 if (unlikely(!ac->avail)) {
2831 int x;
Mel Gorman072bb0a2012-07-31 16:43:58 -07002832force_grow:
David Rientjes4167e9b2015-04-14 15:46:55 -07002833 x = cache_grow(cachep, gfp_exact_node(flags), node, NULL);
Christoph Lametere498be72005-09-09 13:03:32 -07002834
Andrew Mortona737b3e2006-03-22 00:08:11 -08002835 /* cache_grow can reenable interrupts, then ac could change. */
Pekka Enberg9a2dba42006-02-01 03:05:49 -08002836 ac = cpu_cache_get(cachep);
David Rientjes51cd8e62012-08-28 19:57:21 -07002837 node = numa_mem_id();
Mel Gorman072bb0a2012-07-31 16:43:58 -07002838
2839 /* no objects in sight? abort */
2840 if (!x && (ac->avail == 0 || force_refill))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 return NULL;
2842
Andrew Mortona737b3e2006-03-22 00:08:11 -08002843 if (!ac->avail) /* objects refilled by interrupt? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 goto retry;
2845 }
2846 ac->touched = 1;
Mel Gorman072bb0a2012-07-31 16:43:58 -07002847
2848 return ac_get_obj(cachep, ac, flags, force_refill);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849}
2850
Andrew Mortona737b3e2006-03-22 00:08:11 -08002851static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2852 gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853{
Mel Gormand0164ad2015-11-06 16:28:21 -08002854 might_sleep_if(gfpflags_allow_blocking(flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855#if DEBUG
2856 kmem_flagcheck(cachep, flags);
2857#endif
2858}
2859
2860#if DEBUG
Andrew Mortona737b3e2006-03-22 00:08:11 -08002861static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03002862 gfp_t flags, void *objp, unsigned long caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002864 if (!objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 return objp;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002866 if (cachep->flags & SLAB_POISON) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 check_poison_obj(cachep, objp);
Joonsoo Kim40b44132016-03-15 14:54:21 -07002868 slab_kernel_map(cachep, objp, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 poison_obj(cachep, objp, POISON_INUSE);
2870 }
2871 if (cachep->flags & SLAB_STORE_USER)
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03002872 *dbg_userword(cachep, objp) = (void *)caller;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873
2874 if (cachep->flags & SLAB_RED_ZONE) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002875 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
2876 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
2877 slab_error(cachep, "double free, or memory outside"
2878 " object was overwritten");
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002879 printk(KERN_ERR
David Woodhouseb46b8f12007-05-08 00:22:59 -07002880 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
Andrew Mortona737b3e2006-03-22 00:08:11 -08002881 objp, *dbg_redzone1(cachep, objp),
2882 *dbg_redzone2(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 }
2884 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
2885 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
2886 }
Joonsoo Kim03787302014-06-23 13:22:06 -07002887
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002888 objp += obj_offset(cachep);
Christoph Lameter4f104932007-05-06 14:50:17 -07002889 if (cachep->ctor && cachep->flags & SLAB_POISON)
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07002890 cachep->ctor(objp);
Tetsuo Handa7ea466f2011-07-21 09:42:45 +09002891 if (ARCH_SLAB_MINALIGN &&
2892 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
Kevin Hilmana44b56d2006-12-06 20:32:11 -08002893 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
Hugh Dickinsc2251502011-07-11 13:35:08 -07002894 objp, (int)ARCH_SLAB_MINALIGN);
Kevin Hilmana44b56d2006-12-06 20:32:11 -08002895 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 return objp;
2897}
2898#else
2899#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2900#endif
2901
Pekka Enberg343e0d72006-02-01 03:05:50 -08002902static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002904 void *objp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 struct array_cache *ac;
Mel Gorman072bb0a2012-07-31 16:43:58 -07002906 bool force_refill = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907
Alok N Kataria5c382302005-09-27 21:45:46 -07002908 check_irq_off();
Akinobu Mita8a8b6502006-12-08 02:39:44 -08002909
Pekka Enberg9a2dba42006-02-01 03:05:49 -08002910 ac = cpu_cache_get(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 if (likely(ac->avail)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912 ac->touched = 1;
Mel Gorman072bb0a2012-07-31 16:43:58 -07002913 objp = ac_get_obj(cachep, ac, flags, false);
2914
J. R. Okajimaddbf2e82009-12-02 16:55:50 +09002915 /*
Mel Gorman072bb0a2012-07-31 16:43:58 -07002916 * Allow for the possibility all avail objects are not allowed
2917 * by the current flags
J. R. Okajimaddbf2e82009-12-02 16:55:50 +09002918 */
Mel Gorman072bb0a2012-07-31 16:43:58 -07002919 if (objp) {
2920 STATS_INC_ALLOCHIT(cachep);
2921 goto out;
2922 }
2923 force_refill = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 }
Mel Gorman072bb0a2012-07-31 16:43:58 -07002925
2926 STATS_INC_ALLOCMISS(cachep);
2927 objp = cache_alloc_refill(cachep, flags, force_refill);
2928 /*
2929 * the 'ac' may be updated by cache_alloc_refill(),
2930 * and kmemleak_erase() requires its correct value.
2931 */
2932 ac = cpu_cache_get(cachep);
2933
2934out:
Catalin Marinasd5cff632009-06-11 13:22:40 +01002935 /*
2936 * To avoid a false negative, if an object that is in one of the
2937 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
2938 * treat the array pointers as a reference to the object.
2939 */
J. R. Okajimaf3d8b532009-12-02 16:55:49 +09002940 if (objp)
2941 kmemleak_erase(&ac->entry[ac->avail]);
Alok N Kataria5c382302005-09-27 21:45:46 -07002942 return objp;
2943}
2944
Christoph Lametere498be72005-09-09 13:03:32 -07002945#ifdef CONFIG_NUMA
2946/*
Zefan Li2ad654b2014-09-25 09:41:02 +08002947 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
Paul Jacksonc61afb12006-03-24 03:16:08 -08002948 *
2949 * If we are in_interrupt, then process context, including cpusets and
2950 * mempolicy, may not apply and should not be used for allocation policy.
2951 */
2952static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
2953{
2954 int nid_alloc, nid_here;
2955
Christoph Lameter765c4502006-09-27 01:50:08 -07002956 if (in_interrupt() || (flags & __GFP_THISNODE))
Paul Jacksonc61afb12006-03-24 03:16:08 -08002957 return NULL;
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07002958 nid_alloc = nid_here = numa_mem_id();
Paul Jacksonc61afb12006-03-24 03:16:08 -08002959 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
Jack Steiner6adef3e2010-05-26 14:42:49 -07002960 nid_alloc = cpuset_slab_spread_node();
Paul Jacksonc61afb12006-03-24 03:16:08 -08002961 else if (current->mempolicy)
David Rientjes2a389612014-04-07 15:37:29 -07002962 nid_alloc = mempolicy_slab_node();
Paul Jacksonc61afb12006-03-24 03:16:08 -08002963 if (nid_alloc != nid_here)
Christoph Hellwig8b98c162006-12-06 20:32:30 -08002964 return ____cache_alloc_node(cachep, flags, nid_alloc);
Paul Jacksonc61afb12006-03-24 03:16:08 -08002965 return NULL;
2966}
2967
2968/*
Christoph Lameter765c4502006-09-27 01:50:08 -07002969 * Fallback function if there was no memory available and no objects on a
Christoph Lameter3c517a62006-12-06 20:33:29 -08002970 * certain node and fall back is permitted. First we scan all the
Christoph Lameter6a673682013-01-10 19:14:19 +00002971 * available node for available objects. If that fails then we
Christoph Lameter3c517a62006-12-06 20:33:29 -08002972 * perform an allocation without specifying a node. This allows the page
2973 * allocator to do its reclaim / fallback magic. We then insert the
2974 * slab into the proper nodelist and then allocate from it.
Christoph Lameter765c4502006-09-27 01:50:08 -07002975 */
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08002976static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
Christoph Lameter765c4502006-09-27 01:50:08 -07002977{
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08002978 struct zonelist *zonelist;
2979 gfp_t local_flags;
Mel Gormandd1a2392008-04-28 02:12:17 -07002980 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07002981 struct zone *zone;
2982 enum zone_type high_zoneidx = gfp_zone(flags);
Christoph Lameter765c4502006-09-27 01:50:08 -07002983 void *obj = NULL;
Christoph Lameter3c517a62006-12-06 20:33:29 -08002984 int nid;
Mel Gormancc9a6c82012-03-21 16:34:11 -07002985 unsigned int cpuset_mems_cookie;
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08002986
2987 if (flags & __GFP_THISNODE)
2988 return NULL;
2989
Christoph Lameter6cb06222007-10-16 01:25:41 -07002990 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
Christoph Lameter765c4502006-09-27 01:50:08 -07002991
Mel Gormancc9a6c82012-03-21 16:34:11 -07002992retry_cpuset:
Mel Gormand26914d2014-04-03 14:47:24 -07002993 cpuset_mems_cookie = read_mems_allowed_begin();
David Rientjes2a389612014-04-07 15:37:29 -07002994 zonelist = node_zonelist(mempolicy_slab_node(), flags);
Mel Gormancc9a6c82012-03-21 16:34:11 -07002995
Christoph Lameter3c517a62006-12-06 20:33:29 -08002996retry:
2997 /*
2998 * Look through allowed nodes for objects available
2999 * from existing per node queues.
3000 */
Mel Gorman54a6eb52008-04-28 02:12:16 -07003001 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3002 nid = zone_to_nid(zone);
Christoph Lameteraedb0eb2006-10-21 10:24:16 -07003003
Vladimir Davydov061d7072014-12-12 16:58:25 -08003004 if (cpuset_zone_allowed(zone, flags) &&
Christoph Lameter18bf8542014-08-06 16:04:11 -07003005 get_node(cache, nid) &&
3006 get_node(cache, nid)->free_objects) {
Christoph Lameter3c517a62006-12-06 20:33:29 -08003007 obj = ____cache_alloc_node(cache,
David Rientjes4167e9b2015-04-14 15:46:55 -07003008 gfp_exact_node(flags), nid);
Christoph Lameter481c5342008-06-21 16:46:35 -07003009 if (obj)
3010 break;
3011 }
Christoph Lameter3c517a62006-12-06 20:33:29 -08003012 }
3013
Christoph Lametercfce6602007-05-06 14:50:17 -07003014 if (!obj) {
Christoph Lameter3c517a62006-12-06 20:33:29 -08003015 /*
3016 * This allocation will be performed within the constraints
3017 * of the current cpuset / memory policy requirements.
3018 * We may trigger various forms of reclaim on the allowed
3019 * set and go into memory reserves if necessary.
3020 */
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09003021 struct page *page;
3022
Mel Gormand0164ad2015-11-06 16:28:21 -08003023 if (gfpflags_allow_blocking(local_flags))
Christoph Lameterdd47ea72006-12-13 00:34:11 -08003024 local_irq_enable();
3025 kmem_flagcheck(cache, flags);
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09003026 page = kmem_getpages(cache, local_flags, numa_mem_id());
Mel Gormand0164ad2015-11-06 16:28:21 -08003027 if (gfpflags_allow_blocking(local_flags))
Christoph Lameterdd47ea72006-12-13 00:34:11 -08003028 local_irq_disable();
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09003029 if (page) {
Christoph Lameter3c517a62006-12-06 20:33:29 -08003030 /*
3031 * Insert into the appropriate per node queues
3032 */
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09003033 nid = page_to_nid(page);
3034 if (cache_grow(cache, flags, nid, page)) {
Christoph Lameter3c517a62006-12-06 20:33:29 -08003035 obj = ____cache_alloc_node(cache,
David Rientjes4167e9b2015-04-14 15:46:55 -07003036 gfp_exact_node(flags), nid);
Christoph Lameter3c517a62006-12-06 20:33:29 -08003037 if (!obj)
3038 /*
3039 * Another processor may allocate the
3040 * objects in the slab since we are
3041 * not holding any locks.
3042 */
3043 goto retry;
3044 } else {
Hugh Dickinsb6a60452007-01-05 16:36:36 -08003045 /* cache_grow already freed obj */
Christoph Lameter3c517a62006-12-06 20:33:29 -08003046 obj = NULL;
3047 }
3048 }
Christoph Lameteraedb0eb2006-10-21 10:24:16 -07003049 }
Mel Gormancc9a6c82012-03-21 16:34:11 -07003050
Mel Gormand26914d2014-04-03 14:47:24 -07003051 if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
Mel Gormancc9a6c82012-03-21 16:34:11 -07003052 goto retry_cpuset;
Christoph Lameter765c4502006-09-27 01:50:08 -07003053 return obj;
3054}
3055
3056/*
Christoph Lametere498be72005-09-09 13:03:32 -07003057 * A interface to enable slab creation on nodeid
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058 */
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003059static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
Andrew Mortona737b3e2006-03-22 00:08:11 -08003060 int nodeid)
Christoph Lametere498be72005-09-09 13:03:32 -07003061{
Joonsoo Kim8456a642013-10-24 10:07:49 +09003062 struct page *page;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003063 struct kmem_cache_node *n;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003064 void *obj;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003065 int x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066
Paul Mackerras7c3fbbd2014-12-02 15:59:48 -08003067 VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
Christoph Lameter18bf8542014-08-06 16:04:11 -07003068 n = get_node(cachep, nodeid);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003069 BUG_ON(!n);
Christoph Lametere498be72005-09-09 13:03:32 -07003070
Andrew Mortona737b3e2006-03-22 00:08:11 -08003071retry:
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08003072 check_irq_off();
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003073 spin_lock(&n->list_lock);
Geliang Tang7aa0d222016-01-14 15:18:02 -08003074 page = get_first_slab(n);
3075 if (!page)
3076 goto must_grow;
Christoph Lametere498be72005-09-09 13:03:32 -07003077
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003078 check_spinlock_acquired_node(cachep, nodeid);
Christoph Lametere498be72005-09-09 13:03:32 -07003079
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003080 STATS_INC_NODEALLOCS(cachep);
3081 STATS_INC_ACTIVE(cachep);
3082 STATS_SET_HIGH(cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07003083
Joonsoo Kim8456a642013-10-24 10:07:49 +09003084 BUG_ON(page->active == cachep->num);
Christoph Lametere498be72005-09-09 13:03:32 -07003085
Joonsoo Kim260b61d2016-03-15 14:54:12 -07003086 obj = slab_get_obj(cachep, page);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003087 n->free_objects--;
Christoph Lametere498be72005-09-09 13:03:32 -07003088
Joonsoo Kimd8410232016-03-15 14:54:44 -07003089 fixup_slab_list(cachep, n, page);
Christoph Lametere498be72005-09-09 13:03:32 -07003090
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003091 spin_unlock(&n->list_lock);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003092 goto done;
Christoph Lametere498be72005-09-09 13:03:32 -07003093
Andrew Mortona737b3e2006-03-22 00:08:11 -08003094must_grow:
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003095 spin_unlock(&n->list_lock);
David Rientjes4167e9b2015-04-14 15:46:55 -07003096 x = cache_grow(cachep, gfp_exact_node(flags), nodeid, NULL);
Christoph Lameter765c4502006-09-27 01:50:08 -07003097 if (x)
3098 goto retry;
Christoph Lametere498be72005-09-09 13:03:32 -07003099
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003100 return fallback_alloc(cachep, flags);
Christoph Lameter765c4502006-09-27 01:50:08 -07003101
Andrew Mortona737b3e2006-03-22 00:08:11 -08003102done:
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003103 return obj;
Christoph Lametere498be72005-09-09 13:03:32 -07003104}
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003105
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003106static __always_inline void *
Ezequiel Garcia48356302012-09-08 17:47:57 -03003107slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003108 unsigned long caller)
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003109{
3110 unsigned long save_flags;
3111 void *ptr;
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07003112 int slab_node = numa_mem_id();
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003113
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +10003114 flags &= gfp_allowed_mask;
Jesper Dangaard Brouer011ecea2016-03-15 14:53:41 -07003115 cachep = slab_pre_alloc_hook(cachep, flags);
3116 if (unlikely(!cachep))
Akinobu Mita824ebef2007-05-06 14:49:58 -07003117 return NULL;
3118
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003119 cache_alloc_debugcheck_before(cachep, flags);
3120 local_irq_save(save_flags);
3121
Andrew Mortoneacbbae2011-07-28 13:59:49 -07003122 if (nodeid == NUMA_NO_NODE)
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07003123 nodeid = slab_node;
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003124
Christoph Lameter18bf8542014-08-06 16:04:11 -07003125 if (unlikely(!get_node(cachep, nodeid))) {
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003126 /* Node not bootstrapped yet */
3127 ptr = fallback_alloc(cachep, flags);
3128 goto out;
3129 }
3130
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07003131 if (nodeid == slab_node) {
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003132 /*
3133 * Use the locally cached objects if possible.
3134 * However ____cache_alloc does not allow fallback
3135 * to other nodes. It may fail while we still have
3136 * objects on other nodes available.
3137 */
3138 ptr = ____cache_alloc(cachep, flags);
3139 if (ptr)
3140 goto out;
3141 }
3142 /* ___cache_alloc_node can fall back to other nodes */
3143 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3144 out:
3145 local_irq_restore(save_flags);
3146 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3147
Jesper Dangaard Brouerd5e3ed62016-03-15 14:53:47 -07003148 if (unlikely(flags & __GFP_ZERO) && ptr)
3149 memset(ptr, 0, cachep->object_size);
Christoph Lameterd07dbea2007-07-17 04:03:23 -07003150
Jesper Dangaard Brouerd5e3ed62016-03-15 14:53:47 -07003151 slab_post_alloc_hook(cachep, flags, 1, &ptr);
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003152 return ptr;
3153}
3154
3155static __always_inline void *
3156__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3157{
3158 void *objp;
3159
Zefan Li2ad654b2014-09-25 09:41:02 +08003160 if (current->mempolicy || cpuset_do_slab_mem_spread()) {
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003161 objp = alternate_node_alloc(cache, flags);
3162 if (objp)
3163 goto out;
3164 }
3165 objp = ____cache_alloc(cache, flags);
3166
3167 /*
3168 * We may just have run out of memory on the local node.
3169 * ____cache_alloc_node() knows how to locate memory on other nodes
3170 */
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07003171 if (!objp)
3172 objp = ____cache_alloc_node(cache, flags, numa_mem_id());
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003173
3174 out:
3175 return objp;
3176}
3177#else
3178
3179static __always_inline void *
3180__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3181{
3182 return ____cache_alloc(cachep, flags);
3183}
3184
3185#endif /* CONFIG_NUMA */
3186
3187static __always_inline void *
Ezequiel Garcia48356302012-09-08 17:47:57 -03003188slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003189{
3190 unsigned long save_flags;
3191 void *objp;
3192
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +10003193 flags &= gfp_allowed_mask;
Jesper Dangaard Brouer011ecea2016-03-15 14:53:41 -07003194 cachep = slab_pre_alloc_hook(cachep, flags);
3195 if (unlikely(!cachep))
Akinobu Mita824ebef2007-05-06 14:49:58 -07003196 return NULL;
3197
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003198 cache_alloc_debugcheck_before(cachep, flags);
3199 local_irq_save(save_flags);
3200 objp = __do_cache_alloc(cachep, flags);
3201 local_irq_restore(save_flags);
3202 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3203 prefetchw(objp);
3204
Jesper Dangaard Brouerd5e3ed62016-03-15 14:53:47 -07003205 if (unlikely(flags & __GFP_ZERO) && objp)
3206 memset(objp, 0, cachep->object_size);
Christoph Lameterd07dbea2007-07-17 04:03:23 -07003207
Jesper Dangaard Brouerd5e3ed62016-03-15 14:53:47 -07003208 slab_post_alloc_hook(cachep, flags, 1, &objp);
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003209 return objp;
3210}
Christoph Lametere498be72005-09-09 13:03:32 -07003211
3212/*
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08003213 * Caller needs to acquire correct kmem_cache_node's list_lock
Joonsoo Kim97654df2014-08-06 16:04:25 -07003214 * @list: List of detached free slabs should be freed by caller
Christoph Lametere498be72005-09-09 13:03:32 -07003215 */
Joonsoo Kim97654df2014-08-06 16:04:25 -07003216static void free_block(struct kmem_cache *cachep, void **objpp,
3217 int nr_objects, int node, struct list_head *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218{
3219 int i;
Joonsoo Kim25c063f2014-08-06 16:04:22 -07003220 struct kmem_cache_node *n = get_node(cachep, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221
3222 for (i = 0; i < nr_objects; i++) {
Mel Gorman072bb0a2012-07-31 16:43:58 -07003223 void *objp;
Joonsoo Kim8456a642013-10-24 10:07:49 +09003224 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225
Mel Gorman072bb0a2012-07-31 16:43:58 -07003226 clear_obj_pfmemalloc(&objpp[i]);
3227 objp = objpp[i];
3228
Joonsoo Kim8456a642013-10-24 10:07:49 +09003229 page = virt_to_head_page(objp);
Joonsoo Kim8456a642013-10-24 10:07:49 +09003230 list_del(&page->lru);
Christoph Lameterff694162005-09-22 21:44:02 -07003231 check_spinlock_acquired_node(cachep, node);
Joonsoo Kim260b61d2016-03-15 14:54:12 -07003232 slab_put_obj(cachep, page, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233 STATS_DEC_ACTIVE(cachep);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003234 n->free_objects++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235
3236 /* fixup slab chains */
Joonsoo Kim8456a642013-10-24 10:07:49 +09003237 if (page->active == 0) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003238 if (n->free_objects > n->free_limit) {
3239 n->free_objects -= cachep->num;
Joonsoo Kim97654df2014-08-06 16:04:25 -07003240 list_add_tail(&page->lru, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 } else {
Joonsoo Kim8456a642013-10-24 10:07:49 +09003242 list_add(&page->lru, &n->slabs_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 }
3244 } else {
3245 /* Unconditionally move a slab to the end of the
3246 * partial list on free - maximum time for the
3247 * other objects to be freed, too.
3248 */
Joonsoo Kim8456a642013-10-24 10:07:49 +09003249 list_add_tail(&page->lru, &n->slabs_partial);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250 }
3251 }
3252}
3253
Pekka Enberg343e0d72006-02-01 03:05:50 -08003254static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255{
3256 int batchcount;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003257 struct kmem_cache_node *n;
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07003258 int node = numa_mem_id();
Joonsoo Kim97654df2014-08-06 16:04:25 -07003259 LIST_HEAD(list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260
3261 batchcount = ac->batchcount;
Joonsoo Kim260b61d2016-03-15 14:54:12 -07003262
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263 check_irq_off();
Christoph Lameter18bf8542014-08-06 16:04:11 -07003264 n = get_node(cachep, node);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003265 spin_lock(&n->list_lock);
3266 if (n->shared) {
3267 struct array_cache *shared_array = n->shared;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003268 int max = shared_array->limit - shared_array->avail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269 if (max) {
3270 if (batchcount > max)
3271 batchcount = max;
Christoph Lametere498be72005-09-09 13:03:32 -07003272 memcpy(&(shared_array->entry[shared_array->avail]),
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003273 ac->entry, sizeof(void *) * batchcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274 shared_array->avail += batchcount;
3275 goto free_done;
3276 }
3277 }
3278
Joonsoo Kim97654df2014-08-06 16:04:25 -07003279 free_block(cachep, ac->entry, batchcount, node, &list);
Andrew Mortona737b3e2006-03-22 00:08:11 -08003280free_done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281#if STATS
3282 {
3283 int i = 0;
Geliang Tang73c02192016-01-14 15:17:59 -08003284 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285
Geliang Tang73c02192016-01-14 15:17:59 -08003286 list_for_each_entry(page, &n->slabs_free, lru) {
Joonsoo Kim8456a642013-10-24 10:07:49 +09003287 BUG_ON(page->active);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288
3289 i++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290 }
3291 STATS_SET_FREEABLE(cachep, i);
3292 }
3293#endif
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003294 spin_unlock(&n->list_lock);
Joonsoo Kim97654df2014-08-06 16:04:25 -07003295 slabs_destroy(cachep, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 ac->avail -= batchcount;
Andrew Mortona737b3e2006-03-22 00:08:11 -08003297 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298}
3299
3300/*
Andrew Mortona737b3e2006-03-22 00:08:11 -08003301 * Release an obj back to its cache. If the obj has a constructed state, it must
3302 * be in this state _before_ it is released. Called with disabled ints.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303 */
Suleiman Souhlala947eb92011-06-02 00:16:42 -07003304static inline void __cache_free(struct kmem_cache *cachep, void *objp,
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003305 unsigned long caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306{
Pekka Enberg9a2dba42006-02-01 03:05:49 -08003307 struct array_cache *ac = cpu_cache_get(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308
3309 check_irq_off();
Catalin Marinasd5cff632009-06-11 13:22:40 +01003310 kmemleak_free_recursive(objp, cachep->flags);
Suleiman Souhlala947eb92011-06-02 00:16:42 -07003311 objp = cache_free_debugcheck(cachep, objp, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312
Christoph Lameter8c138bc2012-06-13 10:24:58 -05003313 kmemcheck_slab_free(cachep, objp, cachep->object_size);
Pekka Enbergc175eea2008-05-09 20:35:53 +02003314
Siddha, Suresh B1807a1a2007-08-22 14:01:49 -07003315 /*
3316 * Skip calling cache_free_alien() when the platform is not numa.
3317 * This will avoid cache misses that happen while accessing slabp (which
3318 * is per page memory reference) to get nodeid. Instead use a global
3319 * variable to skip the call, which is mostly likely to be present in
3320 * the cache.
3321 */
Mel Gormanb6e68bc2009-06-16 15:32:16 -07003322 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
Pekka Enberg729bd0b2006-06-23 02:03:05 -07003323 return;
Christoph Lametere498be72005-09-09 13:03:32 -07003324
Joonsoo Kim3d880192014-10-09 15:26:04 -07003325 if (ac->avail < ac->limit) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326 STATS_INC_FREEHIT(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327 } else {
3328 STATS_INC_FREEMISS(cachep);
3329 cache_flusharray(cachep, ac);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 }
Zhao Jin42c8c992011-08-27 00:26:17 +08003331
Mel Gorman072bb0a2012-07-31 16:43:58 -07003332 ac_put_obj(cachep, ac, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333}
3334
3335/**
3336 * kmem_cache_alloc - Allocate an object
3337 * @cachep: The cache to allocate from.
3338 * @flags: See kmalloc().
3339 *
3340 * Allocate an object from this cache. The flags are only relevant
3341 * if the cache has no available objects.
3342 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003343void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344{
Ezequiel Garcia48356302012-09-08 17:47:57 -03003345 void *ret = slab_alloc(cachep, flags, _RET_IP_);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003346
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02003347 trace_kmem_cache_alloc(_RET_IP_, ret,
Christoph Lameter8c138bc2012-06-13 10:24:58 -05003348 cachep->object_size, cachep->size, flags);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003349
3350 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351}
3352EXPORT_SYMBOL(kmem_cache_alloc);
3353
Jesper Dangaard Brouer7b0501d2016-03-15 14:53:53 -07003354static __always_inline void
3355cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
3356 size_t size, void **p, unsigned long caller)
3357{
3358 size_t i;
3359
3360 for (i = 0; i < size; i++)
3361 p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
3362}
3363
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -08003364int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
Jesper Dangaard Brouer2a777ea2016-03-15 14:53:50 -07003365 void **p)
Christoph Lameter484748f2015-09-04 15:45:34 -07003366{
Jesper Dangaard Brouer2a777ea2016-03-15 14:53:50 -07003367 size_t i;
3368
3369 s = slab_pre_alloc_hook(s, flags);
3370 if (!s)
3371 return 0;
3372
3373 cache_alloc_debugcheck_before(s, flags);
3374
3375 local_irq_disable();
3376 for (i = 0; i < size; i++) {
3377 void *objp = __do_cache_alloc(s, flags);
3378
Jesper Dangaard Brouer2a777ea2016-03-15 14:53:50 -07003379 if (unlikely(!objp))
3380 goto error;
3381 p[i] = objp;
3382 }
3383 local_irq_enable();
3384
Jesper Dangaard Brouer7b0501d2016-03-15 14:53:53 -07003385 cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
3386
Jesper Dangaard Brouer2a777ea2016-03-15 14:53:50 -07003387 /* Clear memory outside IRQ disabled section */
3388 if (unlikely(flags & __GFP_ZERO))
3389 for (i = 0; i < size; i++)
3390 memset(p[i], 0, s->object_size);
3391
3392 slab_post_alloc_hook(s, flags, size, p);
3393 /* FIXME: Trace call missing. Christoph would like a bulk variant */
3394 return size;
3395error:
3396 local_irq_enable();
Jesper Dangaard Brouer7b0501d2016-03-15 14:53:53 -07003397 cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
Jesper Dangaard Brouer2a777ea2016-03-15 14:53:50 -07003398 slab_post_alloc_hook(s, flags, i, p);
3399 __kmem_cache_free_bulk(s, i, p);
3400 return 0;
Christoph Lameter484748f2015-09-04 15:45:34 -07003401}
3402EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3403
Li Zefan0f24f122009-12-11 15:45:30 +08003404#ifdef CONFIG_TRACING
Steven Rostedt85beb582010-11-24 16:23:34 -05003405void *
Ezequiel Garcia40521472012-09-08 17:47:56 -03003406kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003407{
Steven Rostedt85beb582010-11-24 16:23:34 -05003408 void *ret;
3409
Ezequiel Garcia48356302012-09-08 17:47:57 -03003410 ret = slab_alloc(cachep, flags, _RET_IP_);
Steven Rostedt85beb582010-11-24 16:23:34 -05003411
3412 trace_kmalloc(_RET_IP_, ret,
Ezequiel Garciaff4fcd02012-09-08 17:47:52 -03003413 size, cachep->size, flags);
Steven Rostedt85beb582010-11-24 16:23:34 -05003414 return ret;
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003415}
Steven Rostedt85beb582010-11-24 16:23:34 -05003416EXPORT_SYMBOL(kmem_cache_alloc_trace);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003417#endif
3418
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419#ifdef CONFIG_NUMA
Zhouping Liud0d04b72013-05-16 11:36:23 +08003420/**
3421 * kmem_cache_alloc_node - Allocate an object on the specified node
3422 * @cachep: The cache to allocate from.
3423 * @flags: See kmalloc().
3424 * @nodeid: node number of the target node.
3425 *
3426 * Identical to kmem_cache_alloc but it will allocate memory on the given
3427 * node, which can improve the performance for cpu bound structures.
3428 *
3429 * Fallback to other node is possible if __GFP_THISNODE is not set.
3430 */
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003431void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3432{
Ezequiel Garcia48356302012-09-08 17:47:57 -03003433 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003434
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02003435 trace_kmem_cache_alloc_node(_RET_IP_, ret,
Christoph Lameter8c138bc2012-06-13 10:24:58 -05003436 cachep->object_size, cachep->size,
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02003437 flags, nodeid);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003438
3439 return ret;
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003440}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441EXPORT_SYMBOL(kmem_cache_alloc_node);
3442
Li Zefan0f24f122009-12-11 15:45:30 +08003443#ifdef CONFIG_TRACING
Ezequiel Garcia40521472012-09-08 17:47:56 -03003444void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
Steven Rostedt85beb582010-11-24 16:23:34 -05003445 gfp_t flags,
Ezequiel Garcia40521472012-09-08 17:47:56 -03003446 int nodeid,
3447 size_t size)
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003448{
Steven Rostedt85beb582010-11-24 16:23:34 -05003449 void *ret;
3450
Ezequiel Garcia592f4142012-09-25 08:07:08 -03003451 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003452
Steven Rostedt85beb582010-11-24 16:23:34 -05003453 trace_kmalloc_node(_RET_IP_, ret,
Ezequiel Garciaff4fcd02012-09-08 17:47:52 -03003454 size, cachep->size,
Steven Rostedt85beb582010-11-24 16:23:34 -05003455 flags, nodeid);
3456 return ret;
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003457}
Steven Rostedt85beb582010-11-24 16:23:34 -05003458EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003459#endif
3460
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003461static __always_inline void *
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003462__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003463{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003464 struct kmem_cache *cachep;
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003465
Christoph Lameter2c59dd62013-01-10 19:14:19 +00003466 cachep = kmalloc_slab(size, flags);
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003467 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3468 return cachep;
Ezequiel Garcia40521472012-09-08 17:47:56 -03003469 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003470}
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003471
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003472void *__kmalloc_node(size_t size, gfp_t flags, int node)
3473{
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003474 return __do_kmalloc_node(size, flags, node, _RET_IP_);
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003475}
Christoph Hellwigdbe5e692006-09-25 23:31:36 -07003476EXPORT_SYMBOL(__kmalloc_node);
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003477
3478void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003479 int node, unsigned long caller)
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003480{
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003481 return __do_kmalloc_node(size, flags, node, caller);
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003482}
3483EXPORT_SYMBOL(__kmalloc_node_track_caller);
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003484#endif /* CONFIG_NUMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485
3486/**
Paul Drynoff800590f2006-06-23 02:03:48 -07003487 * __do_kmalloc - allocate memory
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 * @size: how many bytes of memory are required.
Paul Drynoff800590f2006-06-23 02:03:48 -07003489 * @flags: the type of memory to allocate (see kmalloc).
Randy Dunlap911851e2006-03-22 00:08:14 -08003490 * @caller: function caller for debug tracking of the caller
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 */
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003492static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003493 unsigned long caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003494{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003495 struct kmem_cache *cachep;
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003496 void *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497
Christoph Lameter2c59dd62013-01-10 19:14:19 +00003498 cachep = kmalloc_slab(size, flags);
Linus Torvaldsa5c96d82007-07-19 13:17:15 -07003499 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3500 return cachep;
Ezequiel Garcia48356302012-09-08 17:47:57 -03003501 ret = slab_alloc(cachep, flags, caller);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003502
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003503 trace_kmalloc(caller, ret,
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003504 size, cachep->size, flags);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003505
3506 return ret;
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003507}
3508
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003509void *__kmalloc(size_t size, gfp_t flags)
3510{
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003511 return __do_kmalloc(size, flags, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512}
3513EXPORT_SYMBOL(__kmalloc);
3514
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003515void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003516{
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003517 return __do_kmalloc(size, flags, caller);
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003518}
3519EXPORT_SYMBOL(__kmalloc_track_caller);
Christoph Hellwig1d2c8ee2006-10-04 02:15:25 -07003520
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521/**
3522 * kmem_cache_free - Deallocate an object
3523 * @cachep: The cache the allocation was from.
3524 * @objp: The previously allocated object.
3525 *
3526 * Free an object which was previously allocated from this
3527 * cache.
3528 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003529void kmem_cache_free(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530{
3531 unsigned long flags;
Glauber Costab9ce5ef2012-12-18 14:22:46 -08003532 cachep = cache_from_obj(cachep, objp);
3533 if (!cachep)
3534 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535
3536 local_irq_save(flags);
Feng Tangd97d4762012-07-02 14:29:10 +08003537 debug_check_no_locks_freed(objp, cachep->object_size);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07003538 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
Christoph Lameter8c138bc2012-06-13 10:24:58 -05003539 debug_check_no_obj_freed(objp, cachep->object_size);
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003540 __cache_free(cachep, objp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541 local_irq_restore(flags);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003542
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02003543 trace_kmem_cache_free(_RET_IP_, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544}
3545EXPORT_SYMBOL(kmem_cache_free);
3546
Jesper Dangaard Brouere6cdb582016-03-15 14:53:56 -07003547void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
3548{
3549 struct kmem_cache *s;
3550 size_t i;
3551
3552 local_irq_disable();
3553 for (i = 0; i < size; i++) {
3554 void *objp = p[i];
3555
Jesper Dangaard Brouerca257192016-03-15 14:54:00 -07003556 if (!orig_s) /* called via kfree_bulk */
3557 s = virt_to_cache(objp);
3558 else
3559 s = cache_from_obj(orig_s, objp);
Jesper Dangaard Brouere6cdb582016-03-15 14:53:56 -07003560
3561 debug_check_no_locks_freed(objp, s->object_size);
3562 if (!(s->flags & SLAB_DEBUG_OBJECTS))
3563 debug_check_no_obj_freed(objp, s->object_size);
3564
3565 __cache_free(s, objp, _RET_IP_);
3566 }
3567 local_irq_enable();
3568
3569 /* FIXME: add tracing */
3570}
3571EXPORT_SYMBOL(kmem_cache_free_bulk);
3572
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003574 * kfree - free previously allocated memory
3575 * @objp: pointer returned by kmalloc.
3576 *
Pekka Enberg80e93ef2005-09-09 13:10:16 -07003577 * If @objp is NULL, no operation is performed.
3578 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579 * Don't free memory not originally allocated by kmalloc()
3580 * or you will run into trouble.
3581 */
3582void kfree(const void *objp)
3583{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003584 struct kmem_cache *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585 unsigned long flags;
3586
Pekka Enberg2121db72009-03-25 11:05:57 +02003587 trace_kfree(_RET_IP_, objp);
3588
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003589 if (unlikely(ZERO_OR_NULL_PTR(objp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590 return;
3591 local_irq_save(flags);
3592 kfree_debugcheck(objp);
Pekka Enberg6ed5eb2212006-02-01 03:05:49 -08003593 c = virt_to_cache(objp);
Christoph Lameter8c138bc2012-06-13 10:24:58 -05003594 debug_check_no_locks_freed(objp, c->object_size);
3595
3596 debug_check_no_obj_freed(objp, c->object_size);
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003597 __cache_free(c, (void *)objp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598 local_irq_restore(flags);
3599}
3600EXPORT_SYMBOL(kfree);
3601
Christoph Lametere498be72005-09-09 13:03:32 -07003602/*
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003603 * This initializes kmem_cache_node or resizes various caches for all nodes.
Christoph Lametere498be72005-09-09 13:03:32 -07003604 */
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08003605static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
Christoph Lametere498be72005-09-09 13:03:32 -07003606{
3607 int node;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003608 struct kmem_cache_node *n;
Christoph Lametercafeb022006-03-25 03:06:46 -08003609 struct array_cache *new_shared;
Joonsoo Kimc8522a32014-08-06 16:04:29 -07003610 struct alien_cache **new_alien = NULL;
Christoph Lametere498be72005-09-09 13:03:32 -07003611
Mel Gorman9c09a952008-01-24 05:49:54 -08003612 for_each_online_node(node) {
Christoph Lametercafeb022006-03-25 03:06:46 -08003613
LQYMGTb455def2014-12-10 15:42:13 -08003614 if (use_alien_caches) {
3615 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3616 if (!new_alien)
3617 goto fail;
3618 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003619
Eric Dumazet63109842007-05-06 14:49:28 -07003620 new_shared = NULL;
3621 if (cachep->shared) {
3622 new_shared = alloc_arraycache(node,
Christoph Lameter0718dc22006-03-25 03:06:47 -08003623 cachep->shared*cachep->batchcount,
Pekka Enberg83b519e2009-06-10 19:40:04 +03003624 0xbaadf00d, gfp);
Eric Dumazet63109842007-05-06 14:49:28 -07003625 if (!new_shared) {
3626 free_alien_cache(new_alien);
3627 goto fail;
3628 }
Christoph Lameter0718dc22006-03-25 03:06:47 -08003629 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003630
Christoph Lameter18bf8542014-08-06 16:04:11 -07003631 n = get_node(cachep, node);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003632 if (n) {
3633 struct array_cache *shared = n->shared;
Joonsoo Kim97654df2014-08-06 16:04:25 -07003634 LIST_HEAD(list);
Christoph Lametercafeb022006-03-25 03:06:46 -08003635
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003636 spin_lock_irq(&n->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07003637
Christoph Lametercafeb022006-03-25 03:06:46 -08003638 if (shared)
Christoph Lameter0718dc22006-03-25 03:06:47 -08003639 free_block(cachep, shared->entry,
Joonsoo Kim97654df2014-08-06 16:04:25 -07003640 shared->avail, node, &list);
Christoph Lametere498be72005-09-09 13:03:32 -07003641
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003642 n->shared = new_shared;
3643 if (!n->alien) {
3644 n->alien = new_alien;
Christoph Lametere498be72005-09-09 13:03:32 -07003645 new_alien = NULL;
3646 }
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003647 n->free_limit = (1 + nr_cpus_node(node)) *
Andrew Mortona737b3e2006-03-22 00:08:11 -08003648 cachep->batchcount + cachep->num;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003649 spin_unlock_irq(&n->list_lock);
Joonsoo Kim97654df2014-08-06 16:04:25 -07003650 slabs_destroy(cachep, &list);
Christoph Lametercafeb022006-03-25 03:06:46 -08003651 kfree(shared);
Christoph Lametere498be72005-09-09 13:03:32 -07003652 free_alien_cache(new_alien);
3653 continue;
3654 }
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003655 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
3656 if (!n) {
Christoph Lameter0718dc22006-03-25 03:06:47 -08003657 free_alien_cache(new_alien);
3658 kfree(new_shared);
Christoph Lametere498be72005-09-09 13:03:32 -07003659 goto fail;
Christoph Lameter0718dc22006-03-25 03:06:47 -08003660 }
Christoph Lametere498be72005-09-09 13:03:32 -07003661
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003662 kmem_cache_node_init(n);
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08003663 n->next_reap = jiffies + REAPTIMEOUT_NODE +
3664 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003665 n->shared = new_shared;
3666 n->alien = new_alien;
3667 n->free_limit = (1 + nr_cpus_node(node)) *
Andrew Mortona737b3e2006-03-22 00:08:11 -08003668 cachep->batchcount + cachep->num;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003669 cachep->node[node] = n;
Christoph Lametere498be72005-09-09 13:03:32 -07003670 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003671 return 0;
Christoph Lameter0718dc22006-03-25 03:06:47 -08003672
Andrew Mortona737b3e2006-03-22 00:08:11 -08003673fail:
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003674 if (!cachep->list.next) {
Christoph Lameter0718dc22006-03-25 03:06:47 -08003675 /* Cache is not active yet. Roll back what we did */
3676 node--;
3677 while (node >= 0) {
Christoph Lameter18bf8542014-08-06 16:04:11 -07003678 n = get_node(cachep, node);
3679 if (n) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003680 kfree(n->shared);
3681 free_alien_cache(n->alien);
3682 kfree(n);
Christoph Lameter6a673682013-01-10 19:14:19 +00003683 cachep->node[node] = NULL;
Christoph Lameter0718dc22006-03-25 03:06:47 -08003684 }
3685 node--;
3686 }
3687 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003688 return -ENOMEM;
Christoph Lametere498be72005-09-09 13:03:32 -07003689}
3690
Christoph Lameter18004c52012-07-06 15:25:12 -05003691/* Always called with the slab_mutex held */
Glauber Costa943a4512012-12-18 14:23:03 -08003692static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
Pekka Enberg83b519e2009-06-10 19:40:04 +03003693 int batchcount, int shared, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694{
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003695 struct array_cache __percpu *cpu_cache, *prev;
3696 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003698 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3699 if (!cpu_cache)
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003700 return -ENOMEM;
3701
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003702 prev = cachep->cpu_cache;
3703 cachep->cpu_cache = cpu_cache;
3704 kick_all_cpus_sync();
Christoph Lametere498be72005-09-09 13:03:32 -07003705
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706 check_irq_on();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707 cachep->batchcount = batchcount;
3708 cachep->limit = limit;
Christoph Lametere498be72005-09-09 13:03:32 -07003709 cachep->shared = shared;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003711 if (!prev)
3712 goto alloc_node;
3713
3714 for_each_online_cpu(cpu) {
Joonsoo Kim97654df2014-08-06 16:04:25 -07003715 LIST_HEAD(list);
Christoph Lameter18bf8542014-08-06 16:04:11 -07003716 int node;
3717 struct kmem_cache_node *n;
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003718 struct array_cache *ac = per_cpu_ptr(prev, cpu);
Christoph Lameter18bf8542014-08-06 16:04:11 -07003719
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003720 node = cpu_to_mem(cpu);
Christoph Lameter18bf8542014-08-06 16:04:11 -07003721 n = get_node(cachep, node);
3722 spin_lock_irq(&n->list_lock);
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003723 free_block(cachep, ac->entry, ac->avail, node, &list);
Christoph Lameter18bf8542014-08-06 16:04:11 -07003724 spin_unlock_irq(&n->list_lock);
Joonsoo Kim97654df2014-08-06 16:04:25 -07003725 slabs_destroy(cachep, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726 }
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003727 free_percpu(prev);
3728
3729alloc_node:
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08003730 return alloc_kmem_cache_node(cachep, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731}
3732
Glauber Costa943a4512012-12-18 14:23:03 -08003733static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3734 int batchcount, int shared, gfp_t gfp)
3735{
3736 int ret;
Vladimir Davydov426589f2015-02-12 14:59:23 -08003737 struct kmem_cache *c;
Glauber Costa943a4512012-12-18 14:23:03 -08003738
3739 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3740
3741 if (slab_state < FULL)
3742 return ret;
3743
3744 if ((ret < 0) || !is_root_cache(cachep))
3745 return ret;
3746
Vladimir Davydov426589f2015-02-12 14:59:23 -08003747 lockdep_assert_held(&slab_mutex);
3748 for_each_memcg_cache(c, cachep) {
3749 /* return value determined by the root cache only */
3750 __do_tune_cpucache(c, limit, batchcount, shared, gfp);
Glauber Costa943a4512012-12-18 14:23:03 -08003751 }
3752
3753 return ret;
3754}
3755
Christoph Lameter18004c52012-07-06 15:25:12 -05003756/* Called with slab_mutex held always */
Pekka Enberg83b519e2009-06-10 19:40:04 +03003757static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758{
3759 int err;
Glauber Costa943a4512012-12-18 14:23:03 -08003760 int limit = 0;
3761 int shared = 0;
3762 int batchcount = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763
Glauber Costa943a4512012-12-18 14:23:03 -08003764 if (!is_root_cache(cachep)) {
3765 struct kmem_cache *root = memcg_root_cache(cachep);
3766 limit = root->limit;
3767 shared = root->shared;
3768 batchcount = root->batchcount;
3769 }
3770
3771 if (limit && shared && batchcount)
3772 goto skip_setup;
Andrew Mortona737b3e2006-03-22 00:08:11 -08003773 /*
3774 * The head array serves three purposes:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003775 * - create a LIFO ordering, i.e. return objects that are cache-warm
3776 * - reduce the number of spinlock operations.
Andrew Mortona737b3e2006-03-22 00:08:11 -08003777 * - reduce the number of linked list operations on the slab and
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778 * bufctl chains: array operations are cheaper.
3779 * The numbers are guessed, we should auto-tune as described by
3780 * Bonwick.
3781 */
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003782 if (cachep->size > 131072)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783 limit = 1;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003784 else if (cachep->size > PAGE_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003785 limit = 8;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003786 else if (cachep->size > 1024)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787 limit = 24;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003788 else if (cachep->size > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789 limit = 54;
3790 else
3791 limit = 120;
3792
Andrew Mortona737b3e2006-03-22 00:08:11 -08003793 /*
3794 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795 * allocation behaviour: Most allocs on one cpu, most free operations
3796 * on another cpu. For these cases, an efficient object passing between
3797 * cpus is necessary. This is provided by a shared array. The array
3798 * replaces Bonwick's magazine layer.
3799 * On uniprocessor, it's functionally equivalent (but less efficient)
3800 * to a larger limit. Thus disabled by default.
3801 */
3802 shared = 0;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003803 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804 shared = 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805
3806#if DEBUG
Andrew Mortona737b3e2006-03-22 00:08:11 -08003807 /*
3808 * With debugging enabled, large batchcount lead to excessively long
3809 * periods with disabled local interrupts. Limit the batchcount
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810 */
3811 if (limit > 32)
3812 limit = 32;
3813#endif
Glauber Costa943a4512012-12-18 14:23:03 -08003814 batchcount = (limit + 1) / 2;
3815skip_setup:
3816 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817 if (err)
3818 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003819 cachep->name, -err);
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07003820 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003821}
3822
Christoph Lameter1b552532006-03-22 00:09:07 -08003823/*
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003824 * Drain an array if it contains any elements taking the node lock only if
3825 * necessary. Note that the node listlock also protects the array_cache
Christoph Lameterb18e7e62006-03-22 00:09:07 -08003826 * if drain_array() is used on the shared array.
Christoph Lameter1b552532006-03-22 00:09:07 -08003827 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003828static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
Christoph Lameter1b552532006-03-22 00:09:07 -08003829 struct array_cache *ac, int force, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830{
Joonsoo Kim97654df2014-08-06 16:04:25 -07003831 LIST_HEAD(list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832 int tofree;
3833
Christoph Lameter1b552532006-03-22 00:09:07 -08003834 if (!ac || !ac->avail)
3835 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836 if (ac->touched && !force) {
3837 ac->touched = 0;
Christoph Lameterb18e7e62006-03-22 00:09:07 -08003838 } else {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003839 spin_lock_irq(&n->list_lock);
Christoph Lameterb18e7e62006-03-22 00:09:07 -08003840 if (ac->avail) {
3841 tofree = force ? ac->avail : (ac->limit + 4) / 5;
3842 if (tofree > ac->avail)
3843 tofree = (ac->avail + 1) / 2;
Joonsoo Kim97654df2014-08-06 16:04:25 -07003844 free_block(cachep, ac->entry, tofree, node, &list);
Christoph Lameterb18e7e62006-03-22 00:09:07 -08003845 ac->avail -= tofree;
3846 memmove(ac->entry, &(ac->entry[tofree]),
3847 sizeof(void *) * ac->avail);
3848 }
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003849 spin_unlock_irq(&n->list_lock);
Joonsoo Kim97654df2014-08-06 16:04:25 -07003850 slabs_destroy(cachep, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851 }
3852}
3853
3854/**
3855 * cache_reap - Reclaim memory from caches.
Randy Dunlap05fb6bf2007-02-28 20:12:13 -08003856 * @w: work descriptor
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857 *
3858 * Called from workqueue/eventd every few seconds.
3859 * Purpose:
3860 * - clear the per-cpu caches for this CPU.
3861 * - return freeable pages to the main free memory pool.
3862 *
Andrew Mortona737b3e2006-03-22 00:08:11 -08003863 * If we cannot acquire the cache chain mutex then just give up - we'll try
3864 * again on the next iteration.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003865 */
Christoph Lameter7c5cae32007-02-10 01:42:55 -08003866static void cache_reap(struct work_struct *w)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867{
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07003868 struct kmem_cache *searchp;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003869 struct kmem_cache_node *n;
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07003870 int node = numa_mem_id();
Jean Delvarebf6aede2009-04-02 16:56:54 -07003871 struct delayed_work *work = to_delayed_work(w);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872
Christoph Lameter18004c52012-07-06 15:25:12 -05003873 if (!mutex_trylock(&slab_mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874 /* Give up. Setup the next iteration. */
Christoph Lameter7c5cae32007-02-10 01:42:55 -08003875 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876
Christoph Lameter18004c52012-07-06 15:25:12 -05003877 list_for_each_entry(searchp, &slab_caches, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878 check_irq_on();
3879
Christoph Lameter35386e32006-03-22 00:09:05 -08003880 /*
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003881 * We only take the node lock if absolutely necessary and we
Christoph Lameter35386e32006-03-22 00:09:05 -08003882 * have established with reasonable certainty that
3883 * we can do some work if the lock was obtained.
3884 */
Christoph Lameter18bf8542014-08-06 16:04:11 -07003885 n = get_node(searchp, node);
Christoph Lameter35386e32006-03-22 00:09:05 -08003886
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003887 reap_alien(searchp, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003889 drain_array(searchp, n, cpu_cache_get(searchp), 0, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890
Christoph Lameter35386e32006-03-22 00:09:05 -08003891 /*
3892 * These are racy checks but it does not matter
3893 * if we skip one check or scan twice.
3894 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003895 if (time_after(n->next_reap, jiffies))
Christoph Lameter35386e32006-03-22 00:09:05 -08003896 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08003898 n->next_reap = jiffies + REAPTIMEOUT_NODE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003900 drain_array(searchp, n, n->shared, 0, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003902 if (n->free_touched)
3903 n->free_touched = 0;
Christoph Lametered11d9e2006-06-30 01:55:45 -07003904 else {
3905 int freed;
3906
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003907 freed = drain_freelist(searchp, n, (n->free_limit +
Christoph Lametered11d9e2006-06-30 01:55:45 -07003908 5 * searchp->num - 1) / (5 * searchp->num));
3909 STATS_ADD_REAPED(searchp, freed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910 }
Christoph Lameter35386e32006-03-22 00:09:05 -08003911next:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003912 cond_resched();
3913 }
3914 check_irq_on();
Christoph Lameter18004c52012-07-06 15:25:12 -05003915 mutex_unlock(&slab_mutex);
Christoph Lameter8fce4d82006-03-09 17:33:54 -08003916 next_reap_node();
Christoph Lameter7c5cae32007-02-10 01:42:55 -08003917out:
Andrew Mortona737b3e2006-03-22 00:08:11 -08003918 /* Set up the next iteration */
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08003919 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920}
3921
Linus Torvalds158a9622008-01-02 13:04:48 -08003922#ifdef CONFIG_SLABINFO
Glauber Costa0d7561c2012-10-19 18:20:27 +04003923void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924{
Joonsoo Kim8456a642013-10-24 10:07:49 +09003925 struct page *page;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003926 unsigned long active_objs;
3927 unsigned long num_objs;
3928 unsigned long active_slabs = 0;
3929 unsigned long num_slabs, free_objects = 0, shared_avail = 0;
Christoph Lametere498be72005-09-09 13:03:32 -07003930 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931 char *error = NULL;
Christoph Lametere498be72005-09-09 13:03:32 -07003932 int node;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003933 struct kmem_cache_node *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 active_objs = 0;
3936 num_slabs = 0;
Christoph Lameter18bf8542014-08-06 16:04:11 -07003937 for_each_kmem_cache_node(cachep, node, n) {
Christoph Lametere498be72005-09-09 13:03:32 -07003938
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08003939 check_irq_on();
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003940 spin_lock_irq(&n->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07003941
Joonsoo Kim8456a642013-10-24 10:07:49 +09003942 list_for_each_entry(page, &n->slabs_full, lru) {
3943 if (page->active != cachep->num && !error)
Christoph Lametere498be72005-09-09 13:03:32 -07003944 error = "slabs_full accounting error";
3945 active_objs += cachep->num;
3946 active_slabs++;
3947 }
Joonsoo Kim8456a642013-10-24 10:07:49 +09003948 list_for_each_entry(page, &n->slabs_partial, lru) {
3949 if (page->active == cachep->num && !error)
Joonsoo Kim106a74e2013-10-24 10:07:48 +09003950 error = "slabs_partial accounting error";
Joonsoo Kim8456a642013-10-24 10:07:49 +09003951 if (!page->active && !error)
Joonsoo Kim106a74e2013-10-24 10:07:48 +09003952 error = "slabs_partial accounting error";
Joonsoo Kim8456a642013-10-24 10:07:49 +09003953 active_objs += page->active;
Christoph Lametere498be72005-09-09 13:03:32 -07003954 active_slabs++;
3955 }
Joonsoo Kim8456a642013-10-24 10:07:49 +09003956 list_for_each_entry(page, &n->slabs_free, lru) {
3957 if (page->active && !error)
Joonsoo Kim106a74e2013-10-24 10:07:48 +09003958 error = "slabs_free accounting error";
Christoph Lametere498be72005-09-09 13:03:32 -07003959 num_slabs++;
3960 }
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003961 free_objects += n->free_objects;
3962 if (n->shared)
3963 shared_avail += n->shared->avail;
Christoph Lametere498be72005-09-09 13:03:32 -07003964
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003965 spin_unlock_irq(&n->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003967 num_slabs += active_slabs;
3968 num_objs = num_slabs * cachep->num;
Christoph Lametere498be72005-09-09 13:03:32 -07003969 if (num_objs - active_objs != free_objects && !error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970 error = "free_objects accounting error";
3971
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003972 name = cachep->name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973 if (error)
3974 printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
3975
Glauber Costa0d7561c2012-10-19 18:20:27 +04003976 sinfo->active_objs = active_objs;
3977 sinfo->num_objs = num_objs;
3978 sinfo->active_slabs = active_slabs;
3979 sinfo->num_slabs = num_slabs;
3980 sinfo->shared_avail = shared_avail;
3981 sinfo->limit = cachep->limit;
3982 sinfo->batchcount = cachep->batchcount;
3983 sinfo->shared = cachep->shared;
3984 sinfo->objects_per_slab = cachep->num;
3985 sinfo->cache_order = cachep->gfporder;
3986}
3987
3988void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
3989{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990#if STATS
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003991 { /* node stats */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992 unsigned long high = cachep->high_mark;
3993 unsigned long allocs = cachep->num_allocations;
3994 unsigned long grown = cachep->grown;
3995 unsigned long reaped = cachep->reaped;
3996 unsigned long errors = cachep->errors;
3997 unsigned long max_freeable = cachep->max_freeable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998 unsigned long node_allocs = cachep->node_allocs;
Christoph Lametere498be72005-09-09 13:03:32 -07003999 unsigned long node_frees = cachep->node_frees;
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -07004000 unsigned long overflows = cachep->node_overflow;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001
Joe Perchese92dd4f2010-03-26 19:27:58 -07004002 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
4003 "%4lu %4lu %4lu %4lu %4lu",
4004 allocs, high, grown,
4005 reaped, errors, max_freeable, node_allocs,
4006 node_frees, overflows);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007 }
4008 /* cpu stats */
4009 {
4010 unsigned long allochit = atomic_read(&cachep->allochit);
4011 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4012 unsigned long freehit = atomic_read(&cachep->freehit);
4013 unsigned long freemiss = atomic_read(&cachep->freemiss);
4014
4015 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004016 allochit, allocmiss, freehit, freemiss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017 }
4018#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019}
4020
Linus Torvalds1da177e2005-04-16 15:20:36 -07004021#define MAX_SLABINFO_WRITE 128
4022/**
4023 * slabinfo_write - Tuning for the slab allocator
4024 * @file: unused
4025 * @buffer: user buffer
4026 * @count: data length
4027 * @ppos: unused
4028 */
Glauber Costab7454ad2012-10-19 18:20:25 +04004029ssize_t slabinfo_write(struct file *file, const char __user *buffer,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004030 size_t count, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004032 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033 int limit, batchcount, shared, res;
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004034 struct kmem_cache *cachep;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004035
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036 if (count > MAX_SLABINFO_WRITE)
4037 return -EINVAL;
4038 if (copy_from_user(&kbuf, buffer, count))
4039 return -EFAULT;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004040 kbuf[MAX_SLABINFO_WRITE] = '\0';
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041
4042 tmp = strchr(kbuf, ' ');
4043 if (!tmp)
4044 return -EINVAL;
4045 *tmp = '\0';
4046 tmp++;
4047 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4048 return -EINVAL;
4049
4050 /* Find the cache in the chain of caches. */
Christoph Lameter18004c52012-07-06 15:25:12 -05004051 mutex_lock(&slab_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052 res = -EINVAL;
Christoph Lameter18004c52012-07-06 15:25:12 -05004053 list_for_each_entry(cachep, &slab_caches, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054 if (!strcmp(cachep->name, kbuf)) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08004055 if (limit < 1 || batchcount < 1 ||
4056 batchcount > limit || shared < 0) {
Christoph Lametere498be72005-09-09 13:03:32 -07004057 res = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058 } else {
Christoph Lametere498be72005-09-09 13:03:32 -07004059 res = do_tune_cpucache(cachep, limit,
Pekka Enberg83b519e2009-06-10 19:40:04 +03004060 batchcount, shared,
4061 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062 }
4063 break;
4064 }
4065 }
Christoph Lameter18004c52012-07-06 15:25:12 -05004066 mutex_unlock(&slab_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067 if (res >= 0)
4068 res = count;
4069 return res;
4070}
Al Viro871751e2006-03-25 03:06:39 -08004071
4072#ifdef CONFIG_DEBUG_SLAB_LEAK
4073
Al Viro871751e2006-03-25 03:06:39 -08004074static inline int add_caller(unsigned long *n, unsigned long v)
4075{
4076 unsigned long *p;
4077 int l;
4078 if (!v)
4079 return 1;
4080 l = n[1];
4081 p = n + 2;
4082 while (l) {
4083 int i = l/2;
4084 unsigned long *q = p + 2 * i;
4085 if (*q == v) {
4086 q[1]++;
4087 return 1;
4088 }
4089 if (*q > v) {
4090 l = i;
4091 } else {
4092 p = q + 2;
4093 l -= i + 1;
4094 }
4095 }
4096 if (++n[1] == n[0])
4097 return 0;
4098 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4099 p[0] = v;
4100 p[1] = 1;
4101 return 1;
4102}
4103
Joonsoo Kim8456a642013-10-24 10:07:49 +09004104static void handle_slab(unsigned long *n, struct kmem_cache *c,
4105 struct page *page)
Al Viro871751e2006-03-25 03:06:39 -08004106{
4107 void *p;
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004108 int i, j;
4109 unsigned long v;
Joonsoo Kimb1cb0982013-10-24 10:07:45 +09004110
Al Viro871751e2006-03-25 03:06:39 -08004111 if (n[0] == n[1])
4112 return;
Joonsoo Kim8456a642013-10-24 10:07:49 +09004113 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004114 bool active = true;
4115
4116 for (j = page->active; j < c->num; j++) {
4117 if (get_free_obj(page, j) == i) {
4118 active = false;
4119 break;
4120 }
4121 }
4122
4123 if (!active)
Al Viro871751e2006-03-25 03:06:39 -08004124 continue;
Joonsoo Kimb1cb0982013-10-24 10:07:45 +09004125
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004126 /*
4127 * probe_kernel_read() is used for DEBUG_PAGEALLOC. page table
4128 * mapping is established when actual object allocation and
4129 * we could mistakenly access the unmapped object in the cpu
4130 * cache.
4131 */
4132 if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
4133 continue;
4134
4135 if (!add_caller(n, v))
Al Viro871751e2006-03-25 03:06:39 -08004136 return;
4137 }
4138}
4139
4140static void show_symbol(struct seq_file *m, unsigned long address)
4141{
4142#ifdef CONFIG_KALLSYMS
Al Viro871751e2006-03-25 03:06:39 -08004143 unsigned long offset, size;
Tejun Heo9281ace2007-07-17 04:03:51 -07004144 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
Al Viro871751e2006-03-25 03:06:39 -08004145
Alexey Dobriyana5c43da2007-05-08 00:28:47 -07004146 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
Al Viro871751e2006-03-25 03:06:39 -08004147 seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
Alexey Dobriyana5c43da2007-05-08 00:28:47 -07004148 if (modname[0])
Al Viro871751e2006-03-25 03:06:39 -08004149 seq_printf(m, " [%s]", modname);
4150 return;
4151 }
4152#endif
4153 seq_printf(m, "%p", (void *)address);
4154}
4155
4156static int leaks_show(struct seq_file *m, void *p)
4157{
Thierry Reding0672aa72012-06-22 19:42:49 +02004158 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
Joonsoo Kim8456a642013-10-24 10:07:49 +09004159 struct page *page;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00004160 struct kmem_cache_node *n;
Al Viro871751e2006-03-25 03:06:39 -08004161 const char *name;
Christoph Lameterdb845062013-02-05 18:45:23 +00004162 unsigned long *x = m->private;
Al Viro871751e2006-03-25 03:06:39 -08004163 int node;
4164 int i;
4165
4166 if (!(cachep->flags & SLAB_STORE_USER))
4167 return 0;
4168 if (!(cachep->flags & SLAB_RED_ZONE))
4169 return 0;
4170
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004171 /*
4172 * Set store_user_clean and start to grab stored user information
4173 * for all objects on this cache. If some alloc/free requests comes
4174 * during the processing, information would be wrong so restart
4175 * whole processing.
4176 */
4177 do {
4178 set_store_user_clean(cachep);
4179 drain_cpu_caches(cachep);
Al Viro871751e2006-03-25 03:06:39 -08004180
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004181 x[1] = 0;
Al Viro871751e2006-03-25 03:06:39 -08004182
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004183 for_each_kmem_cache_node(cachep, node, n) {
Al Viro871751e2006-03-25 03:06:39 -08004184
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004185 check_irq_on();
4186 spin_lock_irq(&n->list_lock);
Al Viro871751e2006-03-25 03:06:39 -08004187
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004188 list_for_each_entry(page, &n->slabs_full, lru)
4189 handle_slab(x, cachep, page);
4190 list_for_each_entry(page, &n->slabs_partial, lru)
4191 handle_slab(x, cachep, page);
4192 spin_unlock_irq(&n->list_lock);
4193 }
4194 } while (!is_store_user_clean(cachep));
4195
Al Viro871751e2006-03-25 03:06:39 -08004196 name = cachep->name;
Christoph Lameterdb845062013-02-05 18:45:23 +00004197 if (x[0] == x[1]) {
Al Viro871751e2006-03-25 03:06:39 -08004198 /* Increase the buffer size */
Christoph Lameter18004c52012-07-06 15:25:12 -05004199 mutex_unlock(&slab_mutex);
Christoph Lameterdb845062013-02-05 18:45:23 +00004200 m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
Al Viro871751e2006-03-25 03:06:39 -08004201 if (!m->private) {
4202 /* Too bad, we are really out */
Christoph Lameterdb845062013-02-05 18:45:23 +00004203 m->private = x;
Christoph Lameter18004c52012-07-06 15:25:12 -05004204 mutex_lock(&slab_mutex);
Al Viro871751e2006-03-25 03:06:39 -08004205 return -ENOMEM;
4206 }
Christoph Lameterdb845062013-02-05 18:45:23 +00004207 *(unsigned long *)m->private = x[0] * 2;
4208 kfree(x);
Christoph Lameter18004c52012-07-06 15:25:12 -05004209 mutex_lock(&slab_mutex);
Al Viro871751e2006-03-25 03:06:39 -08004210 /* Now make sure this entry will be retried */
4211 m->count = m->size;
4212 return 0;
4213 }
Christoph Lameterdb845062013-02-05 18:45:23 +00004214 for (i = 0; i < x[1]; i++) {
4215 seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4216 show_symbol(m, x[2*i+2]);
Al Viro871751e2006-03-25 03:06:39 -08004217 seq_putc(m, '\n');
4218 }
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07004219
Al Viro871751e2006-03-25 03:06:39 -08004220 return 0;
4221}
4222
Alexey Dobriyana0ec95a2008-10-06 00:59:10 +04004223static const struct seq_operations slabstats_op = {
Vladimir Davydov1df3b262014-12-10 15:42:16 -08004224 .start = slab_start,
Wanpeng Li276a2432013-07-08 08:08:28 +08004225 .next = slab_next,
4226 .stop = slab_stop,
Al Viro871751e2006-03-25 03:06:39 -08004227 .show = leaks_show,
4228};
Alexey Dobriyana0ec95a2008-10-06 00:59:10 +04004229
4230static int slabstats_open(struct inode *inode, struct file *file)
4231{
Rob Jonesb208ce32014-10-09 15:28:03 -07004232 unsigned long *n;
4233
4234 n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
4235 if (!n)
4236 return -ENOMEM;
4237
4238 *n = PAGE_SIZE / (2 * sizeof(unsigned long));
4239
4240 return 0;
Alexey Dobriyana0ec95a2008-10-06 00:59:10 +04004241}
4242
4243static const struct file_operations proc_slabstats_operations = {
4244 .open = slabstats_open,
4245 .read = seq_read,
4246 .llseek = seq_lseek,
4247 .release = seq_release_private,
4248};
Al Viro871751e2006-03-25 03:06:39 -08004249#endif
Alexey Dobriyana0ec95a2008-10-06 00:59:10 +04004250
4251static int __init slab_proc_init(void)
4252{
4253#ifdef CONFIG_DEBUG_SLAB_LEAK
4254 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4255#endif
4256 return 0;
4257}
4258module_init(slab_proc_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259#endif
4260
Manfred Spraul00e145b2005-09-03 15:55:07 -07004261/**
4262 * ksize - get the actual amount of memory allocated for a given object
4263 * @objp: Pointer to the object
4264 *
4265 * kmalloc may internally round up allocations and return more memory
4266 * than requested. ksize() can be used to determine the actual amount of
4267 * memory allocated. The caller may use this additional memory, even though
4268 * a smaller amount of memory was initially specified with the kmalloc call.
4269 * The caller must guarantee that objp points to a valid object previously
4270 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4271 * must not be freed during the duration of the call.
4272 */
Pekka Enbergfd76bab2007-05-06 14:48:40 -07004273size_t ksize(const void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274{
Christoph Lameteref8b4522007-10-16 01:24:46 -07004275 BUG_ON(!objp);
4276 if (unlikely(objp == ZERO_SIZE_PTR))
Manfred Spraul00e145b2005-09-03 15:55:07 -07004277 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278
Christoph Lameter8c138bc2012-06-13 10:24:58 -05004279 return virt_to_cache(objp)->object_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280}
Kirill A. Shutemovb1aabec2009-02-10 15:21:44 +02004281EXPORT_SYMBOL(ksize);