blob: 76dad53a410ac85a3d4dc965c7010865cdf63361 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef _LINUX_VMALLOC_H
3#define _LINUX_VMALLOC_H
4
5#include <linux/spinlock.h>
Nick Piggindb64fe02008-10-18 20:27:03 -07006#include <linux/init.h>
Atsushi Kumagai13ba3fc2013-04-29 15:07:40 -07007#include <linux/list.h>
Chris Wilson80c4bd72016-05-20 16:57:38 -07008#include <linux/llist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <asm/page.h> /* pgprot_t */
Atsushi Kumagai13ba3fc2013-04-29 15:07:40 -070010#include <linux/rbtree.h>
Kees Cook3b3b1a29e2018-05-08 12:55:26 -070011#include <linux/overflow.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
Ingo Molnar1f059df2019-11-28 08:19:36 +010013#include <asm/vmalloc.h>
14
Hugh Dickins605d9282008-08-16 11:07:21 +010015struct vm_area_struct; /* vma defining user mapping in mm_types.h */
Chris Wilson4da56b92016-04-04 14:46:42 +010016struct notifier_block; /* in notifier.h */
Nick Piggin83342312006-06-23 02:03:20 -070017
Hugh Dickins605d9282008-08-16 11:07:21 +010018/* bits in flags of vmalloc's vm_struct below */
Zhang Yanfei20fc02b2013-07-08 15:59:58 -070019#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
20#define VM_ALLOC 0x00000002 /* vmalloc() */
21#define VM_MAP 0x00000004 /* vmap()ed pages */
22#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
Christoph Hellwigfe9041c2019-06-03 08:55:13 +020023#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
Zhang Yanfei20fc02b2013-07-08 15:59:58 -070024#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
Andrey Ryabinin71394fe2015-02-13 14:40:03 -080025#define VM_NO_GUARD 0x00000040 /* don't add guard page */
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -070026#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
Rick Edgecombe9abdd2c2021-02-04 18:32:24 -080027#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
28#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -080029
30/*
31 * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
32 *
33 * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
34 * shadow memory has been mapped. It's used to handle allocation errors so that
35 * we don't try to poision shadow on free if it was never allocated.
36 *
37 * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
38 * determine which allocations need the module shadow freed.
39 */
40
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* bits [20..32] reserved for arch specific ioremap internals */
42
Deepak Saxenafd195c42005-09-03 15:54:58 -070043/*
44 * Maximum alignment for ioremap() regions.
45 * Can be overriden by arch-specific value.
46 */
47#ifndef IOREMAP_MAX_ORDER
48#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
49#endif
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051struct vm_struct {
Eric Dumazet2b4ac442006-11-10 12:27:48 -080052 struct vm_struct *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 void *addr;
54 unsigned long size;
55 unsigned long flags;
56 struct page **pages;
57 unsigned int nr_pages;
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090058 phys_addr_t phys_addr;
Marek Szyprowski5e6cafc2012-04-13 12:32:09 +020059 const void *caller;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060};
61
Atsushi Kumagai13ba3fc2013-04-29 15:07:40 -070062struct vmap_area {
63 unsigned long va_start;
64 unsigned long va_end;
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -070065
Atsushi Kumagai13ba3fc2013-04-29 15:07:40 -070066 struct rb_node rb_node; /* address sorted rbtree */
67 struct list_head list; /* address sorted list */
Pengfei Li688fcbf2019-09-23 15:36:39 -070068
69 /*
70 * The following three variables can be packed, because
71 * a vmap_area object is always one of the three states:
72 * 1) in "free" tree (root is vmap_area_root)
73 * 2) in "busy" tree (root is free_vmap_area_root)
74 * 3) in purge list (head is vmap_purge_list)
75 */
76 union {
77 unsigned long subtree_max_size; /* in "free" tree */
78 struct vm_struct *vm; /* in "busy" tree */
79 struct llist_node purge_list; /* in purge list */
80 };
Atsushi Kumagai13ba3fc2013-04-29 15:07:40 -070081};
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083/*
84 * Highlevel APIs for driver use
85 */
Nick Piggindb64fe02008-10-18 20:27:03 -070086extern void vm_unmap_ram(const void *mem, unsigned int count);
Christoph Hellwigd4efd792020-06-01 21:51:27 -070087extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
Nick Piggindb64fe02008-10-18 20:27:03 -070088extern void vm_unmap_aliases(void);
89
90#ifdef CONFIG_MMU
91extern void __init vmalloc_init(void);
Roman Gushchin97105f02019-07-11 21:00:13 -070092extern unsigned long vmalloc_nr_pages(void);
Nick Piggindb64fe02008-10-18 20:27:03 -070093#else
94static inline void vmalloc_init(void)
95{
96}
Roman Gushchin97105f02019-07-11 21:00:13 -070097static inline unsigned long vmalloc_nr_pages(void) { return 0; }
Nick Piggindb64fe02008-10-18 20:27:03 -070098#endif
99
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100extern void *vmalloc(unsigned long size);
Dave Younge1ca7782010-10-26 14:22:06 -0700101extern void *vzalloc(unsigned long size);
Nick Piggin83342312006-06-23 02:03:20 -0700102extern void *vmalloc_user(unsigned long size);
Christoph Lameter930fc452005-10-29 18:15:41 -0700103extern void *vmalloc_node(unsigned long size, int node);
Dave Younge1ca7782010-10-26 14:22:06 -0700104extern void *vzalloc_node(unsigned long size, int node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105extern void *vmalloc_32(unsigned long size);
Nick Piggin83342312006-06-23 02:03:20 -0700106extern void *vmalloc_32_user(unsigned long size);
Christoph Hellwig88dca4c2020-06-01 21:51:40 -0700107extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
David Rientjesd0a21262011-01-13 15:46:02 -0800108extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
109 unsigned long start, unsigned long end, gfp_t gfp_mask,
Andrey Ryabinincb9e3c22015-02-13 14:40:07 -0800110 pgprot_t prot, unsigned long vm_flags, int node,
111 const void *caller);
Christoph Hellwig2b905942020-06-01 21:51:53 -0700112void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
113 int node, const void *caller);
Andrey Ryabinincb9e3c22015-02-13 14:40:07 -0800114
Christoph Lameterb3bdda02008-02-04 22:28:32 -0800115extern void vfree(const void *addr);
Andrey Ryabininbf22e372016-12-12 16:44:10 -0800116extern void vfree_atomic(const void *addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118extern void *vmap(struct page **pages, unsigned int count,
119 unsigned long flags, pgprot_t prot);
Christoph Hellwig3e9a9e22020-10-17 16:15:10 -0700120void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
Christoph Lameterb3bdda02008-02-04 22:28:32 -0800121extern void vunmap(const void *addr);
Nick Piggin83342312006-06-23 02:03:20 -0700122
HATAYAMA Daisukee69e9d4a2013-07-03 15:02:18 -0700123extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
124 unsigned long uaddr, void *kaddr,
Jann Hornbdebd6a22020-04-20 18:14:11 -0700125 unsigned long pgoff, unsigned long size);
HATAYAMA Daisukee69e9d4a2013-07-03 15:02:18 -0700126
Nick Piggin83342312006-06-23 02:03:20 -0700127extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
128 unsigned long pgoff);
Joerg Roedel763802b52020-03-21 18:22:41 -0700129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130/*
Joerg Roedel2ba3e692020-06-01 21:52:22 -0700131 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
132 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
133 * needs to be called.
134 */
135#ifndef ARCH_PAGE_TABLE_SYNC_MASK
136#define ARCH_PAGE_TABLE_SYNC_MASK 0
137#endif
138
139/*
140 * There is no default implementation for arch_sync_kernel_mappings(). It is
141 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
142 * is 0.
143 */
144void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
145
146/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 * Lowlevel-APIs (not for driver use!)
148 */
Jeremy Fitzhardinge95851162007-07-21 17:11:35 +0200149
150static inline size_t get_vm_area_size(const struct vm_struct *area)
151{
Andrey Ryabinin71394fe2015-02-13 14:40:03 -0800152 if (!(area->flags & VM_NO_GUARD))
153 /* return actual size without guard page */
154 return area->size - PAGE_SIZE;
155 else
156 return area->size;
157
Jeremy Fitzhardinge95851162007-07-21 17:11:35 +0200158}
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
Christoph Lameter23016962008-04-28 02:12:42 -0700161extern struct vm_struct *get_vm_area_caller(unsigned long size,
Marek Szyprowski5e6cafc2012-04-13 12:32:09 +0200162 unsigned long flags, const void *caller);
Benjamin Herrenschmidtc2968612009-02-18 14:48:12 -0800163extern struct vm_struct *__get_vm_area_caller(unsigned long size,
164 unsigned long flags,
165 unsigned long start, unsigned long end,
Marek Szyprowski5e6cafc2012-04-13 12:32:09 +0200166 const void *caller);
Christoph Hellwig301fa9f2020-10-17 16:15:39 -0700167void free_vm_area(struct vm_struct *area);
Christoph Lameterb3bdda02008-02-04 22:28:32 -0800168extern struct vm_struct *remove_vm_area(const void *addr);
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200169extern struct vm_struct *find_vm_area(const void *addr);
Benjamin Herrenschmidtc19c03f2007-06-04 15:15:35 +1000170
Graf Yangb554cb42011-03-28 12:53:29 +0100171#ifdef CONFIG_MMU
Tejun Heo8fc48982009-02-20 16:29:08 +0900172extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
173 pgprot_t prot, struct page **pages);
Christoph Hellwiged1f3242020-06-01 21:51:19 -0700174int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
175 struct page **pages);
Tejun Heo8fc48982009-02-20 16:29:08 +0900176extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
Benjamin Herrenschmidtc19c03f2007-06-04 15:15:35 +1000177extern void unmap_kernel_range(unsigned long addr, unsigned long size);
Rick Edgecombe868b1042019-04-25 17:11:36 -0700178static inline void set_vm_flush_reset_perms(void *addr)
179{
180 struct vm_struct *vm = find_vm_area(addr);
181
182 if (vm)
183 vm->flags |= VM_FLUSH_RESET_PERMS;
184}
Graf Yangb554cb42011-03-28 12:53:29 +0100185#else
186static inline int
187map_kernel_range_noflush(unsigned long start, unsigned long size,
188 pgprot_t prot, struct page **pages)
189{
190 return size >> PAGE_SHIFT;
191}
Christoph Hellwiged1f3242020-06-01 21:51:19 -0700192#define map_kernel_range map_kernel_range_noflush
Graf Yangb554cb42011-03-28 12:53:29 +0100193static inline void
194unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
195{
196}
Christoph Hellwiged1f3242020-06-01 21:51:19 -0700197#define unmap_kernel_range unmap_kernel_range_noflush
Rick Edgecombe868b1042019-04-25 17:11:36 -0700198static inline void set_vm_flush_reset_perms(void *addr)
199{
200}
Graf Yangb554cb42011-03-28 12:53:29 +0100201#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
KOSAKI Motohiro69beeb12009-01-06 14:39:46 -0800203/* for /dev/kmem */
204extern long vread(char *buf, char *addr, unsigned long count);
205extern long vwrite(char *buf, char *addr, unsigned long count);
206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207/*
208 * Internals. Dont't use..
209 */
Joonsoo Kimf1c40692013-04-29 15:07:37 -0700210extern struct list_head vmap_area_list;
Nicolas Pitrebe9b7332011-08-25 00:24:21 -0400211extern __init void vm_area_add_early(struct vm_struct *vm);
Tejun Heoc0c0a292009-02-24 11:57:21 +0900212extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
Tejun Heo4f8b02b2010-09-03 18:22:47 +0200214#ifdef CONFIG_SMP
Graf Yangb554cb42011-03-28 12:53:29 +0100215# ifdef CONFIG_MMU
Tejun Heoca23e402009-08-14 15:00:52 +0900216struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
217 const size_t *sizes, int nr_vms,
David Rientjesec3f64f2011-01-13 15:46:01 -0800218 size_t align);
Tejun Heoca23e402009-08-14 15:00:52 +0900219
220void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
Graf Yangb554cb42011-03-28 12:53:29 +0100221# else
222static inline struct vm_struct **
223pcpu_get_vm_areas(const unsigned long *offsets,
224 const size_t *sizes, int nr_vms,
225 size_t align)
226{
227 return NULL;
228}
229
230static inline void
231pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
232{
233}
234# endif
Tejun Heo4f8b02b2010-09-03 18:22:47 +0200235#endif
Tejun Heoca23e402009-08-14 15:00:52 +0900236
Joonsoo Kimdb3808c2013-04-29 15:07:28 -0700237#ifdef CONFIG_MMU
238#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
Joonsoo Kimdb3808c2013-04-29 15:07:28 -0700239#else
Joonsoo Kimdb3808c2013-04-29 15:07:28 -0700240#define VMALLOC_TOTAL 0UL
Joonsoo Kimdb3808c2013-04-29 15:07:28 -0700241#endif
242
Chris Wilson4da56b92016-04-04 14:46:42 +0100243int register_vmap_purge_notifier(struct notifier_block *nb);
244int unregister_vmap_purge_notifier(struct notifier_block *nb);
245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246#endif /* _LINUX_VMALLOC_H */