Nishad Kamdar | 508578f | 2020-05-12 16:54:17 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
| 6 | #ifndef __XFS_SUPPORT_KMEM_H__ |
| 7 | #define __XFS_SUPPORT_KMEM_H__ |
| 8 | |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/sched.h> |
| 11 | #include <linux/mm.h> |
Christoph Hellwig | bdfb043 | 2010-01-20 21:55:30 +0000 | [diff] [blame] | 12 | #include <linux/vmalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
| 14 | /* |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 15 | * General memory allocation interfaces |
| 16 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
Al Viro | 77ba787 | 2012-04-02 06:24:04 -0400 | [diff] [blame] | 18 | typedef unsigned __bitwise xfs_km_flags_t; |
Al Viro | 77ba787 | 2012-04-02 06:24:04 -0400 | [diff] [blame] | 19 | #define KM_NOFS ((__force xfs_km_flags_t)0x0004u) |
| 20 | #define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u) |
Gu Zheng | 359d992 | 2013-11-04 18:21:05 +0800 | [diff] [blame] | 21 | #define KM_ZERO ((__force xfs_km_flags_t)0x0010u) |
Darrick J. Wong | 6dcde60 | 2020-05-26 09:33:11 -0700 | [diff] [blame] | 22 | #define KM_NOLOCKDEP ((__force xfs_km_flags_t)0x0020u) |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 23 | |
| 24 | /* |
| 25 | * We use a special process flag to avoid recursive callbacks into |
| 26 | * the filesystem during transactions. We will also issue our own |
| 27 | * warnings, so we explicitly skip any generic ones (silly of us). |
| 28 | */ |
| 29 | static inline gfp_t |
Al Viro | 77ba787 | 2012-04-02 06:24:04 -0400 | [diff] [blame] | 30 | kmem_flags_convert(xfs_km_flags_t flags) |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 31 | { |
| 32 | gfp_t lflags; |
| 33 | |
Darrick J. Wong | 6dcde60 | 2020-05-26 09:33:11 -0700 | [diff] [blame] | 34 | BUG_ON(flags & ~(KM_NOFS | KM_MAYFAIL | KM_ZERO | KM_NOLOCKDEP)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | |
Tetsuo Handa | 707e0dd | 2019-08-26 12:06:22 -0700 | [diff] [blame] | 36 | lflags = GFP_KERNEL | __GFP_NOWARN; |
| 37 | if (flags & KM_NOFS) |
| 38 | lflags &= ~__GFP_FS; |
Gu Zheng | 359d992 | 2013-11-04 18:21:05 +0800 | [diff] [blame] | 39 | |
Michal Hocko | 91c63ecd | 2017-07-12 14:36:49 -0700 | [diff] [blame] | 40 | /* |
| 41 | * Default page/slab allocator behavior is to retry for ever |
| 42 | * for small allocations. We can override this behavior by using |
| 43 | * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long |
| 44 | * as it is feasible but rather fail than retry forever for all |
| 45 | * request sizes. |
| 46 | */ |
| 47 | if (flags & KM_MAYFAIL) |
| 48 | lflags |= __GFP_RETRY_MAYFAIL; |
| 49 | |
Gu Zheng | 359d992 | 2013-11-04 18:21:05 +0800 | [diff] [blame] | 50 | if (flags & KM_ZERO) |
| 51 | lflags |= __GFP_ZERO; |
| 52 | |
Darrick J. Wong | 6dcde60 | 2020-05-26 09:33:11 -0700 | [diff] [blame] | 53 | if (flags & KM_NOLOCKDEP) |
| 54 | lflags |= __GFP_NOLOCKDEP; |
| 55 | |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 56 | return lflags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | } |
| 58 | |
Al Viro | 77ba787 | 2012-04-02 06:24:04 -0400 | [diff] [blame] | 59 | extern void *kmem_alloc(size_t, xfs_km_flags_t); |
Dave Chinner | f8f9ee4 | 2019-08-26 12:08:39 -0700 | [diff] [blame] | 60 | extern void *kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags); |
Dave Chinner | cb0a8d2 | 2018-03-06 17:03:28 -0800 | [diff] [blame] | 61 | extern void *kmem_alloc_large(size_t size, xfs_km_flags_t); |
Wang, Yalin | f3d2155 | 2015-02-02 09:54:18 +1100 | [diff] [blame] | 62 | static inline void kmem_free(const void *ptr) |
| 63 | { |
| 64 | kvfree(ptr); |
| 65 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | |
Christoph Hellwig | bdfb043 | 2010-01-20 21:55:30 +0000 | [diff] [blame] | 67 | |
Gu Zheng | 359d992 | 2013-11-04 18:21:05 +0800 | [diff] [blame] | 68 | static inline void * |
| 69 | kmem_zalloc(size_t size, xfs_km_flags_t flags) |
| 70 | { |
| 71 | return kmem_alloc(size, flags | KM_ZERO); |
| 72 | } |
| 73 | |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 74 | /* |
| 75 | * Zone interfaces |
| 76 | */ |
| 77 | |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 78 | #define kmem_zone kmem_cache |
| 79 | #define kmem_zone_t struct kmem_cache |
| 80 | |
Christoph Hellwig | 72945d8 | 2019-06-28 19:27:19 -0700 | [diff] [blame] | 81 | static inline struct page * |
| 82 | kmem_to_page(void *addr) |
| 83 | { |
| 84 | if (is_vmalloc_addr(addr)) |
| 85 | return vmalloc_to_page(addr); |
| 86 | return virt_to_page(addr); |
| 87 | } |
| 88 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | #endif /* __XFS_SUPPORT_KMEM_H__ */ |