blob: b987dc2c685170ad1ce08768553c7a8cee02d7a6 [file] [log] [blame]
Nishad Kamdar508578f2020-05-12 16:54:17 -07001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
6#ifndef __XFS_SUPPORT_KMEM_H__
7#define __XFS_SUPPORT_KMEM_H__
8
9#include <linux/slab.h>
10#include <linux/sched.h>
11#include <linux/mm.h>
Christoph Hellwigbdfb0432010-01-20 21:55:30 +000012#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
14/*
Nathan Scott87582802006-03-14 13:18:19 +110015 * General memory allocation interfaces
16 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Al Viro77ba7872012-04-02 06:24:04 -040018typedef unsigned __bitwise xfs_km_flags_t;
Al Viro77ba7872012-04-02 06:24:04 -040019#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
20#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
Gu Zheng359d9922013-11-04 18:21:05 +080021#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
Darrick J. Wong6dcde602020-05-26 09:33:11 -070022#define KM_NOLOCKDEP ((__force xfs_km_flags_t)0x0020u)
Nathan Scott87582802006-03-14 13:18:19 +110023
24/*
25 * We use a special process flag to avoid recursive callbacks into
26 * the filesystem during transactions. We will also issue our own
27 * warnings, so we explicitly skip any generic ones (silly of us).
28 */
29static inline gfp_t
Al Viro77ba7872012-04-02 06:24:04 -040030kmem_flags_convert(xfs_km_flags_t flags)
Nathan Scott87582802006-03-14 13:18:19 +110031{
32 gfp_t lflags;
33
Darrick J. Wong6dcde602020-05-26 09:33:11 -070034 BUG_ON(flags & ~(KM_NOFS | KM_MAYFAIL | KM_ZERO | KM_NOLOCKDEP));
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Tetsuo Handa707e0dd2019-08-26 12:06:22 -070036 lflags = GFP_KERNEL | __GFP_NOWARN;
37 if (flags & KM_NOFS)
38 lflags &= ~__GFP_FS;
Gu Zheng359d9922013-11-04 18:21:05 +080039
Michal Hocko91c63ecd2017-07-12 14:36:49 -070040 /*
41 * Default page/slab allocator behavior is to retry for ever
42 * for small allocations. We can override this behavior by using
43 * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
44 * as it is feasible but rather fail than retry forever for all
45 * request sizes.
46 */
47 if (flags & KM_MAYFAIL)
48 lflags |= __GFP_RETRY_MAYFAIL;
49
Gu Zheng359d9922013-11-04 18:21:05 +080050 if (flags & KM_ZERO)
51 lflags |= __GFP_ZERO;
52
Darrick J. Wong6dcde602020-05-26 09:33:11 -070053 if (flags & KM_NOLOCKDEP)
54 lflags |= __GFP_NOLOCKDEP;
55
Nathan Scott87582802006-03-14 13:18:19 +110056 return lflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Al Viro77ba7872012-04-02 06:24:04 -040059extern void *kmem_alloc(size_t, xfs_km_flags_t);
Wang, Yalinf3d21552015-02-02 09:54:18 +110060static inline void kmem_free(const void *ptr)
61{
62 kvfree(ptr);
63}
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Christoph Hellwigbdfb0432010-01-20 21:55:30 +000065
Gu Zheng359d9922013-11-04 18:21:05 +080066static inline void *
67kmem_zalloc(size_t size, xfs_km_flags_t flags)
68{
69 return kmem_alloc(size, flags | KM_ZERO);
70}
71
Nathan Scott87582802006-03-14 13:18:19 +110072/*
73 * Zone interfaces
74 */
Christoph Hellwig72945d82019-06-28 19:27:19 -070075static inline struct page *
76kmem_to_page(void *addr)
77{
78 if (is_vmalloc_addr(addr))
79 return vmalloc_to_page(addr);
80 return virt_to_page(addr);
81}
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#endif /* __XFS_SUPPORT_KMEM_H__ */