Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
| 6 | #ifndef __XFS_SUPPORT_KMEM_H__ |
| 7 | #define __XFS_SUPPORT_KMEM_H__ |
| 8 | |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/sched.h> |
| 11 | #include <linux/mm.h> |
Christoph Hellwig | bdfb043 | 2010-01-20 21:55:30 +0000 | [diff] [blame] | 12 | #include <linux/vmalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
| 14 | /* |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 15 | * General memory allocation interfaces |
| 16 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
Al Viro | 77ba787 | 2012-04-02 06:24:04 -0400 | [diff] [blame] | 18 | typedef unsigned __bitwise xfs_km_flags_t; |
Al Viro | 77ba787 | 2012-04-02 06:24:04 -0400 | [diff] [blame] | 19 | #define KM_NOFS ((__force xfs_km_flags_t)0x0004u) |
| 20 | #define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u) |
Gu Zheng | 359d992 | 2013-11-04 18:21:05 +0800 | [diff] [blame] | 21 | #define KM_ZERO ((__force xfs_km_flags_t)0x0010u) |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 22 | |
| 23 | /* |
| 24 | * We use a special process flag to avoid recursive callbacks into |
| 25 | * the filesystem during transactions. We will also issue our own |
| 26 | * warnings, so we explicitly skip any generic ones (silly of us). |
| 27 | */ |
| 28 | static inline gfp_t |
Al Viro | 77ba787 | 2012-04-02 06:24:04 -0400 | [diff] [blame] | 29 | kmem_flags_convert(xfs_km_flags_t flags) |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 30 | { |
| 31 | gfp_t lflags; |
| 32 | |
Tetsuo Handa | 707e0dd | 2019-08-26 12:06:22 -0700 | [diff] [blame] | 33 | BUG_ON(flags & ~(KM_NOFS|KM_MAYFAIL|KM_ZERO)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | |
Tetsuo Handa | 707e0dd | 2019-08-26 12:06:22 -0700 | [diff] [blame] | 35 | lflags = GFP_KERNEL | __GFP_NOWARN; |
| 36 | if (flags & KM_NOFS) |
| 37 | lflags &= ~__GFP_FS; |
Gu Zheng | 359d992 | 2013-11-04 18:21:05 +0800 | [diff] [blame] | 38 | |
Michal Hocko | 91c63ecd | 2017-07-12 14:36:49 -0700 | [diff] [blame] | 39 | /* |
| 40 | * Default page/slab allocator behavior is to retry for ever |
| 41 | * for small allocations. We can override this behavior by using |
| 42 | * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long |
| 43 | * as it is feasible but rather fail than retry forever for all |
| 44 | * request sizes. |
| 45 | */ |
| 46 | if (flags & KM_MAYFAIL) |
| 47 | lflags |= __GFP_RETRY_MAYFAIL; |
| 48 | |
Gu Zheng | 359d992 | 2013-11-04 18:21:05 +0800 | [diff] [blame] | 49 | if (flags & KM_ZERO) |
| 50 | lflags |= __GFP_ZERO; |
| 51 | |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 52 | return lflags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | } |
| 54 | |
Al Viro | 77ba787 | 2012-04-02 06:24:04 -0400 | [diff] [blame] | 55 | extern void *kmem_alloc(size_t, xfs_km_flags_t); |
Dave Chinner | f8f9ee4 | 2019-08-26 12:08:39 -0700 | [diff] [blame^] | 56 | extern void *kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags); |
Dave Chinner | cb0a8d2 | 2018-03-06 17:03:28 -0800 | [diff] [blame] | 57 | extern void *kmem_alloc_large(size_t size, xfs_km_flags_t); |
Christoph Hellwig | 664b60f | 2016-04-06 09:47:01 +1000 | [diff] [blame] | 58 | extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t); |
Wang, Yalin | f3d2155 | 2015-02-02 09:54:18 +1100 | [diff] [blame] | 59 | static inline void kmem_free(const void *ptr) |
| 60 | { |
| 61 | kvfree(ptr); |
| 62 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | |
Christoph Hellwig | bdfb043 | 2010-01-20 21:55:30 +0000 | [diff] [blame] | 64 | |
Gu Zheng | 359d992 | 2013-11-04 18:21:05 +0800 | [diff] [blame] | 65 | static inline void * |
| 66 | kmem_zalloc(size_t size, xfs_km_flags_t flags) |
| 67 | { |
| 68 | return kmem_alloc(size, flags | KM_ZERO); |
| 69 | } |
| 70 | |
Dave Chinner | cb0a8d2 | 2018-03-06 17:03:28 -0800 | [diff] [blame] | 71 | static inline void * |
| 72 | kmem_zalloc_large(size_t size, xfs_km_flags_t flags) |
| 73 | { |
| 74 | return kmem_alloc_large(size, flags | KM_ZERO); |
| 75 | } |
| 76 | |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 77 | /* |
| 78 | * Zone interfaces |
| 79 | */ |
| 80 | |
| 81 | #define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN |
| 82 | #define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT |
Paul Jackson | b019600 | 2006-03-24 03:16:09 -0800 | [diff] [blame] | 83 | #define KM_ZONE_SPREAD SLAB_MEM_SPREAD |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 84 | #define KM_ZONE_ACCOUNT SLAB_ACCOUNT |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 85 | |
| 86 | #define kmem_zone kmem_cache |
| 87 | #define kmem_zone_t struct kmem_cache |
| 88 | |
| 89 | static inline kmem_zone_t * |
| 90 | kmem_zone_init(int size, char *zone_name) |
| 91 | { |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 92 | return kmem_cache_create(zone_name, size, 0, 0, NULL); |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | static inline kmem_zone_t * |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 96 | kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags, |
Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 97 | void (*construct)(void *)) |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 98 | { |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 99 | return kmem_cache_create(zone_name, size, 0, flags, construct); |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | static inline void |
| 103 | kmem_zone_free(kmem_zone_t *zone, void *ptr) |
| 104 | { |
| 105 | kmem_cache_free(zone, ptr); |
| 106 | } |
| 107 | |
| 108 | static inline void |
| 109 | kmem_zone_destroy(kmem_zone_t *zone) |
| 110 | { |
Tim Hansen | 478f8da | 2017-11-08 12:00:40 -0800 | [diff] [blame] | 111 | kmem_cache_destroy(zone); |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 112 | } |
| 113 | |
Al Viro | 77ba787 | 2012-04-02 06:24:04 -0400 | [diff] [blame] | 114 | extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t); |
Gu Zheng | 359d992 | 2013-11-04 18:21:05 +0800 | [diff] [blame] | 115 | |
| 116 | static inline void * |
| 117 | kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags) |
| 118 | { |
| 119 | return kmem_zone_alloc(zone, flags | KM_ZERO); |
| 120 | } |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 121 | |
Christoph Hellwig | 72945d8 | 2019-06-28 19:27:19 -0700 | [diff] [blame] | 122 | static inline struct page * |
| 123 | kmem_to_page(void *addr) |
| 124 | { |
| 125 | if (is_vmalloc_addr(addr)) |
| 126 | return vmalloc_to_page(addr); |
| 127 | return virt_to_page(addr); |
| 128 | } |
| 129 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | #endif /* __XFS_SUPPORT_KMEM_H__ */ |