Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
Dave Chinner | 0ad9568 | 2019-08-26 12:08:10 -0700 | [diff] [blame] | 6 | #include "xfs.h" |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 7 | #include <linux/backing-dev.h> |
Dave Chinner | 4f10700 | 2011-03-07 10:00:35 +1100 | [diff] [blame] | 8 | #include "xfs_message.h" |
Dave Chinner | 0ad9568 | 2019-08-26 12:08:10 -0700 | [diff] [blame] | 9 | #include "xfs_trace.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | void * |
Al Viro | 77ba787 | 2012-04-02 06:24:04 -0400 | [diff] [blame] | 12 | kmem_alloc(size_t size, xfs_km_flags_t flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | { |
Al Viro | 27496a8 | 2005-10-21 03:20:48 -0400 | [diff] [blame] | 14 | int retries = 0; |
| 15 | gfp_t lflags = kmem_flags_convert(flags); |
| 16 | void *ptr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
Dave Chinner | 0ad9568 | 2019-08-26 12:08:10 -0700 | [diff] [blame] | 18 | trace_kmem_alloc(size, flags, _RET_IP_); |
| 19 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | do { |
Christoph Hellwig | bdfb043 | 2010-01-20 21:55:30 +0000 | [diff] [blame] | 21 | ptr = kmalloc(size, lflags); |
Tetsuo Handa | 707e0dd | 2019-08-26 12:06:22 -0700 | [diff] [blame] | 22 | if (ptr || (flags & KM_MAYFAIL)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | return ptr; |
| 24 | if (!(++retries % 100)) |
Dave Chinner | 4f10700 | 2011-03-07 10:00:35 +1100 | [diff] [blame] | 25 | xfs_err(NULL, |
Eric Sandeen | 847f9f6 | 2015-10-12 16:04:45 +1100 | [diff] [blame] | 26 | "%s(%u) possible memory allocation deadlock size %u in %s (mode:0x%x)", |
Tetsuo Handa | 5bf97b1 | 2015-10-12 15:41:29 +1100 | [diff] [blame] | 27 | current->comm, current->pid, |
Eric Sandeen | 847f9f6 | 2015-10-12 16:04:45 +1100 | [diff] [blame] | 28 | (unsigned int)size, __func__, lflags); |
Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 29 | congestion_wait(BLK_RW_ASYNC, HZ/50); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | } while (1); |
| 31 | } |
| 32 | |
Dave Chinner | f8f9ee4 | 2019-08-26 12:08:39 -0700 | [diff] [blame] | 33 | |
| 34 | /* |
Joe Perches | cf085a1 | 2019-11-07 13:24:52 -0800 | [diff] [blame] | 35 | * __vmalloc() will allocate data pages and auxiliary structures (e.g. |
Dave Chinner | f8f9ee4 | 2019-08-26 12:08:39 -0700 | [diff] [blame] | 36 | * pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence |
| 37 | * we need to tell memory reclaim that we are in such a context via |
| 38 | * PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here |
| 39 | * and potentially deadlocking. |
| 40 | */ |
| 41 | static void * |
| 42 | __kmem_vmalloc(size_t size, xfs_km_flags_t flags) |
Dave Chinner | fdd3cce | 2013-09-02 20:53:00 +1000 | [diff] [blame] | 43 | { |
Michal Hocko | 9ba1fb2 | 2017-05-03 14:53:19 -0700 | [diff] [blame] | 44 | unsigned nofs_flag = 0; |
Dave Chinner | fdd3cce | 2013-09-02 20:53:00 +1000 | [diff] [blame] | 45 | void *ptr; |
Dave Chinner | f8f9ee4 | 2019-08-26 12:08:39 -0700 | [diff] [blame] | 46 | gfp_t lflags = kmem_flags_convert(flags); |
Dave Chinner | fdd3cce | 2013-09-02 20:53:00 +1000 | [diff] [blame] | 47 | |
Michal Hocko | 9ba1fb2 | 2017-05-03 14:53:19 -0700 | [diff] [blame] | 48 | if (flags & KM_NOFS) |
| 49 | nofs_flag = memalloc_nofs_save(); |
Dave Chinner | ae687e5 | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 50 | |
Christoph Hellwig | 88dca4c | 2020-06-01 21:51:40 -0700 | [diff] [blame] | 51 | ptr = __vmalloc(size, lflags); |
Dave Chinner | ae687e5 | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 52 | |
Michal Hocko | 9ba1fb2 | 2017-05-03 14:53:19 -0700 | [diff] [blame] | 53 | if (flags & KM_NOFS) |
| 54 | memalloc_nofs_restore(nofs_flag); |
Dave Chinner | ae687e5 | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 55 | |
| 56 | return ptr; |
Dave Chinner | fdd3cce | 2013-09-02 20:53:00 +1000 | [diff] [blame] | 57 | } |
| 58 | |
Dave Chinner | f8f9ee4 | 2019-08-26 12:08:39 -0700 | [diff] [blame] | 59 | /* |
| 60 | * Same as kmem_alloc_large, except we guarantee the buffer returned is aligned |
| 61 | * to the @align_mask. We only guarantee alignment up to page size, we'll clamp |
| 62 | * alignment at page size if it is larger. vmalloc always returns a PAGE_SIZE |
| 63 | * aligned region. |
| 64 | */ |
| 65 | void * |
| 66 | kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags) |
| 67 | { |
| 68 | void *ptr; |
| 69 | |
| 70 | trace_kmem_alloc_io(size, flags, _RET_IP_); |
| 71 | |
| 72 | if (WARN_ON_ONCE(align_mask >= PAGE_SIZE)) |
| 73 | align_mask = PAGE_SIZE - 1; |
| 74 | |
| 75 | ptr = kmem_alloc(size, flags | KM_MAYFAIL); |
| 76 | if (ptr) { |
| 77 | if (!((uintptr_t)ptr & align_mask)) |
| 78 | return ptr; |
| 79 | kfree(ptr); |
| 80 | } |
| 81 | return __kmem_vmalloc(size, flags); |
| 82 | } |
| 83 | |
| 84 | void * |
| 85 | kmem_alloc_large(size_t size, xfs_km_flags_t flags) |
| 86 | { |
| 87 | void *ptr; |
| 88 | |
| 89 | trace_kmem_alloc_large(size, flags, _RET_IP_); |
| 90 | |
| 91 | ptr = kmem_alloc(size, flags | KM_MAYFAIL); |
| 92 | if (ptr) |
| 93 | return ptr; |
| 94 | return __kmem_vmalloc(size, flags); |
| 95 | } |