blob: 9f32af534ce4f05c57d05a9d7d3a599aababa7f4 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Dave Chinner0ad95682019-08-26 12:08:10 -07006#include "xfs.h"
Andrew Morton3fcfab12006-10-19 23:28:16 -07007#include <linux/backing-dev.h>
Dave Chinner4f107002011-03-07 10:00:35 +11008#include "xfs_message.h"
Dave Chinner0ad95682019-08-26 12:08:10 -07009#include "xfs_trace.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
Linus Torvalds1da177e2005-04-16 15:20:36 -070011void *
Al Viro77ba7872012-04-02 06:24:04 -040012kmem_alloc(size_t size, xfs_km_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013{
Al Viro27496a82005-10-21 03:20:48 -040014 int retries = 0;
15 gfp_t lflags = kmem_flags_convert(flags);
16 void *ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Dave Chinner0ad95682019-08-26 12:08:10 -070018 trace_kmem_alloc(size, flags, _RET_IP_);
19
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 do {
Christoph Hellwigbdfb0432010-01-20 21:55:30 +000021 ptr = kmalloc(size, lflags);
Tetsuo Handa707e0dd2019-08-26 12:06:22 -070022 if (ptr || (flags & KM_MAYFAIL))
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 return ptr;
24 if (!(++retries % 100))
Dave Chinner4f107002011-03-07 10:00:35 +110025 xfs_err(NULL,
Eric Sandeen847f9f62015-10-12 16:04:45 +110026 "%s(%u) possible memory allocation deadlock size %u in %s (mode:0x%x)",
Tetsuo Handa5bf97b12015-10-12 15:41:29 +110027 current->comm, current->pid,
Eric Sandeen847f9f62015-10-12 16:04:45 +110028 (unsigned int)size, __func__, lflags);
Jens Axboe8aa7e842009-07-09 14:52:32 +020029 congestion_wait(BLK_RW_ASYNC, HZ/50);
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 } while (1);
31}
32
33void *
Dave Chinnercb0a8d22018-03-06 17:03:28 -080034kmem_alloc_large(size_t size, xfs_km_flags_t flags)
Dave Chinnerfdd3cce2013-09-02 20:53:00 +100035{
Michal Hocko9ba1fb22017-05-03 14:53:19 -070036 unsigned nofs_flag = 0;
Dave Chinnerfdd3cce2013-09-02 20:53:00 +100037 void *ptr;
Dave Chinnerae687e52014-03-07 16:19:14 +110038 gfp_t lflags;
Dave Chinnerfdd3cce2013-09-02 20:53:00 +100039
Dave Chinner0ad95682019-08-26 12:08:10 -070040 trace_kmem_alloc_large(size, flags, _RET_IP_);
41
Dave Chinnercb0a8d22018-03-06 17:03:28 -080042 ptr = kmem_alloc(size, flags | KM_MAYFAIL);
Dave Chinnerfdd3cce2013-09-02 20:53:00 +100043 if (ptr)
44 return ptr;
Dave Chinnerae687e52014-03-07 16:19:14 +110045
46 /*
47 * __vmalloc() will allocate data pages and auxillary structures (e.g.
48 * pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context
49 * here. Hence we need to tell memory reclaim that we are in such a
Michal Hocko9ba1fb22017-05-03 14:53:19 -070050 * context via PF_MEMALLOC_NOFS to prevent memory reclaim re-entering
Dave Chinnerae687e52014-03-07 16:19:14 +110051 * the filesystem here and potentially deadlocking.
52 */
Michal Hocko9ba1fb22017-05-03 14:53:19 -070053 if (flags & KM_NOFS)
54 nofs_flag = memalloc_nofs_save();
Dave Chinnerae687e52014-03-07 16:19:14 +110055
56 lflags = kmem_flags_convert(flags);
Dave Chinnercb0a8d22018-03-06 17:03:28 -080057 ptr = __vmalloc(size, lflags, PAGE_KERNEL);
Dave Chinnerae687e52014-03-07 16:19:14 +110058
Michal Hocko9ba1fb22017-05-03 14:53:19 -070059 if (flags & KM_NOFS)
60 memalloc_nofs_restore(nofs_flag);
Dave Chinnerae687e52014-03-07 16:19:14 +110061
62 return ptr;
Dave Chinnerfdd3cce2013-09-02 20:53:00 +100063}
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065void *
Christoph Hellwig664b60f2016-04-06 09:47:01 +100066kmem_realloc(const void *old, size_t newsize, xfs_km_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067{
Christoph Hellwig664b60f2016-04-06 09:47:01 +100068 int retries = 0;
69 gfp_t lflags = kmem_flags_convert(flags);
70 void *ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Dave Chinner0ad95682019-08-26 12:08:10 -070072 trace_kmem_realloc(newsize, flags, _RET_IP_);
73
Christoph Hellwig664b60f2016-04-06 09:47:01 +100074 do {
75 ptr = krealloc(old, newsize, lflags);
Tetsuo Handa707e0dd2019-08-26 12:06:22 -070076 if (ptr || (flags & KM_MAYFAIL))
Christoph Hellwig664b60f2016-04-06 09:47:01 +100077 return ptr;
78 if (!(++retries % 100))
79 xfs_err(NULL,
80 "%s(%u) possible memory allocation deadlock size %zu in %s (mode:0x%x)",
81 current->comm, current->pid,
82 newsize, __func__, lflags);
83 congestion_wait(BLK_RW_ASYNC, HZ/50);
84 } while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
87void *
Al Viro77ba7872012-04-02 06:24:04 -040088kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089{
Al Viro27496a82005-10-21 03:20:48 -040090 int retries = 0;
91 gfp_t lflags = kmem_flags_convert(flags);
92 void *ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Dave Chinner0ad95682019-08-26 12:08:10 -070094 trace_kmem_zone_alloc(kmem_cache_size(zone), flags, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 do {
96 ptr = kmem_cache_alloc(zone, lflags);
Tetsuo Handa707e0dd2019-08-26 12:06:22 -070097 if (ptr || (flags & KM_MAYFAIL))
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 return ptr;
99 if (!(++retries % 100))
Dave Chinner4f107002011-03-07 10:00:35 +1100100 xfs_err(NULL,
Tetsuo Handa5bf97b12015-10-12 15:41:29 +1100101 "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
102 current->comm, current->pid,
103 __func__, lflags);
Jens Axboe8aa7e842009-07-09 14:52:32 +0200104 congestion_wait(BLK_RW_ASYNC, HZ/50);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 } while (1);
106}