blob: 8170d95cf930460c1ef0cfc944f06c634b4a43d5 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
6#ifndef __XFS_SUPPORT_KMEM_H__
7#define __XFS_SUPPORT_KMEM_H__
8
9#include <linux/slab.h>
10#include <linux/sched.h>
11#include <linux/mm.h>
Christoph Hellwigbdfb0432010-01-20 21:55:30 +000012#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
14/*
Nathan Scott87582802006-03-14 13:18:19 +110015 * General memory allocation interfaces
16 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Al Viro77ba7872012-04-02 06:24:04 -040018typedef unsigned __bitwise xfs_km_flags_t;
Al Viro77ba7872012-04-02 06:24:04 -040019#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
20#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
Gu Zheng359d9922013-11-04 18:21:05 +080021#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
Nathan Scott87582802006-03-14 13:18:19 +110022
23/*
24 * We use a special process flag to avoid recursive callbacks into
25 * the filesystem during transactions. We will also issue our own
26 * warnings, so we explicitly skip any generic ones (silly of us).
27 */
28static inline gfp_t
Al Viro77ba7872012-04-02 06:24:04 -040029kmem_flags_convert(xfs_km_flags_t flags)
Nathan Scott87582802006-03-14 13:18:19 +110030{
31 gfp_t lflags;
32
Tetsuo Handa707e0dd2019-08-26 12:06:22 -070033 BUG_ON(flags & ~(KM_NOFS|KM_MAYFAIL|KM_ZERO));
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Tetsuo Handa707e0dd2019-08-26 12:06:22 -070035 lflags = GFP_KERNEL | __GFP_NOWARN;
36 if (flags & KM_NOFS)
37 lflags &= ~__GFP_FS;
Gu Zheng359d9922013-11-04 18:21:05 +080038
Michal Hocko91c63ecd2017-07-12 14:36:49 -070039 /*
40 * Default page/slab allocator behavior is to retry for ever
41 * for small allocations. We can override this behavior by using
42 * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
43 * as it is feasible but rather fail than retry forever for all
44 * request sizes.
45 */
46 if (flags & KM_MAYFAIL)
47 lflags |= __GFP_RETRY_MAYFAIL;
48
Gu Zheng359d9922013-11-04 18:21:05 +080049 if (flags & KM_ZERO)
50 lflags |= __GFP_ZERO;
51
Nathan Scott87582802006-03-14 13:18:19 +110052 return lflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053}
54
Al Viro77ba7872012-04-02 06:24:04 -040055extern void *kmem_alloc(size_t, xfs_km_flags_t);
Dave Chinnerf8f9ee42019-08-26 12:08:39 -070056extern void *kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags);
Dave Chinnercb0a8d22018-03-06 17:03:28 -080057extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
Christoph Hellwig664b60f2016-04-06 09:47:01 +100058extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t);
Wang, Yalinf3d21552015-02-02 09:54:18 +110059static inline void kmem_free(const void *ptr)
60{
61 kvfree(ptr);
62}
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Christoph Hellwigbdfb0432010-01-20 21:55:30 +000064
Gu Zheng359d9922013-11-04 18:21:05 +080065static inline void *
66kmem_zalloc(size_t size, xfs_km_flags_t flags)
67{
68 return kmem_alloc(size, flags | KM_ZERO);
69}
70
Dave Chinnercb0a8d22018-03-06 17:03:28 -080071static inline void *
72kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
73{
74 return kmem_alloc_large(size, flags | KM_ZERO);
75}
76
Nathan Scott87582802006-03-14 13:18:19 +110077/*
78 * Zone interfaces
79 */
80
81#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
82#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
Paul Jacksonb0196002006-03-24 03:16:09 -080083#define KM_ZONE_SPREAD SLAB_MEM_SPREAD
Vladimir Davydov5d097052016-01-14 15:18:21 -080084#define KM_ZONE_ACCOUNT SLAB_ACCOUNT
Nathan Scott87582802006-03-14 13:18:19 +110085
86#define kmem_zone kmem_cache
87#define kmem_zone_t struct kmem_cache
88
89static inline kmem_zone_t *
90kmem_zone_init(int size, char *zone_name)
91{
Paul Mundt20c2df82007-07-20 10:11:58 +090092 return kmem_cache_create(zone_name, size, 0, 0, NULL);
Nathan Scott87582802006-03-14 13:18:19 +110093}
94
95static inline kmem_zone_t *
Alexey Dobriyand50112e2017-11-15 17:32:18 -080096kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -070097 void (*construct)(void *))
Nathan Scott87582802006-03-14 13:18:19 +110098{
Paul Mundt20c2df82007-07-20 10:11:58 +090099 return kmem_cache_create(zone_name, size, 0, flags, construct);
Nathan Scott87582802006-03-14 13:18:19 +1100100}
101
102static inline void
103kmem_zone_free(kmem_zone_t *zone, void *ptr)
104{
105 kmem_cache_free(zone, ptr);
106}
107
108static inline void
109kmem_zone_destroy(kmem_zone_t *zone)
110{
Tim Hansen478f8da2017-11-08 12:00:40 -0800111 kmem_cache_destroy(zone);
Nathan Scott87582802006-03-14 13:18:19 +1100112}
113
Al Viro77ba7872012-04-02 06:24:04 -0400114extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
Gu Zheng359d9922013-11-04 18:21:05 +0800115
116static inline void *
117kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
118{
119 return kmem_zone_alloc(zone, flags | KM_ZERO);
120}
Nathan Scott87582802006-03-14 13:18:19 +1100121
Christoph Hellwig72945d82019-06-28 19:27:19 -0700122static inline struct page *
123kmem_to_page(void *addr)
124{
125 if (is_vmalloc_addr(addr))
126 return vmalloc_to_page(addr);
127 return virt_to_page(addr);
128}
129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#endif /* __XFS_SUPPORT_KMEM_H__ */