blob: 14e6202ce47f1395b61a45dc2d7455723dfdc0aa [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef _LINUX_HIGHMEM_H
3#define _LINUX_HIGHMEM_H
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/fs.h>
Cesar Eduardo Barros597781f2010-08-09 17:18:32 -07006#include <linux/kernel.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -05007#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/mm.h>
Peter Zijlstraad76fb62006-12-06 20:32:21 -08009#include <linux/uaccess.h>
Catalin Marinas43b3a0c2010-11-11 14:05:10 -080010#include <linux/hardirq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12#include <asm/cacheflush.h>
13
James Bottomley03beb072006-03-26 01:36:57 -080014#ifndef ARCH_HAS_FLUSH_ANON_PAGE
Russell Kinga6f36be2006-12-30 22:24:19 +000015static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
James Bottomley03beb072006-03-26 01:36:57 -080016{
17}
18#endif
19
James Bottomley5a3a5a92006-03-26 01:36:59 -080020#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
21static inline void flush_kernel_dcache_page(struct page *page)
22{
23}
James Bottomley9df5f7412010-01-25 11:42:20 -060024static inline void flush_kernel_vmap_range(void *vaddr, int size)
25{
26}
27static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
28{
29}
James Bottomley5a3a5a92006-03-26 01:36:59 -080030#endif
31
Kumar Gala3688e07f2009-04-01 23:38:49 -050032#include <asm/kmap_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Kumar Gala3688e07f2009-04-01 23:38:49 -050034#ifdef CONFIG_HIGHMEM
Ira Weiny20b271d2020-06-04 16:47:58 -070035extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
Ira Weinyabca2502020-06-04 16:47:46 -070036extern void kunmap_atomic_high(void *kvaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/highmem.h>
38
Ira Weiny525aaf92020-06-04 16:47:30 -070039#ifndef ARCH_HAS_KMAP_FLUSH_TLB
40static inline void kmap_flush_tlb(unsigned long addr) { }
41#endif
42
Ira Weiny090e77e2020-06-04 16:48:18 -070043#ifndef kmap_prot
44#define kmap_prot PAGE_KERNEL
45#endif
46
Ira Weiny525aaf92020-06-04 16:47:30 -070047void *kmap_high(struct page *page);
48static inline void *kmap(struct page *page)
49{
50 void *addr;
51
52 might_sleep();
53 if (!PageHighMem(page))
54 addr = page_address(page);
55 else
56 addr = kmap_high(page);
57 kmap_flush_tlb((unsigned long)addr);
58 return addr;
59}
60
Ira Weinye23c4592020-06-04 16:47:34 -070061void kunmap_high(struct page *page);
62
63static inline void kunmap(struct page *page)
64{
65 might_sleep();
66 if (!PageHighMem(page))
67 return;
68 kunmap_high(page);
69}
70
Ira Weiny78b6d912020-06-04 16:47:42 -070071/*
72 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
73 * no global lock is needed and because the kmap code must perform a global TLB
74 * invalidation when the kmap pool wraps.
75 *
Randy Dunlap3ecabd32020-08-11 18:32:33 -070076 * However when holding an atomic kmap it is not legal to sleep, so atomic
Ira Weiny78b6d912020-06-04 16:47:42 -070077 * kmaps are appropriate for short, tight code paths only.
78 *
79 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
80 * gives a more generic (and caching) interface. But kmap_atomic can
81 * be used in IRQ contexts, so in some (very limited) cases we need
82 * it.
83 */
Ira Weiny20b271d2020-06-04 16:47:58 -070084static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
Ira Weiny78b6d912020-06-04 16:47:42 -070085{
86 preempt_disable();
87 pagefault_disable();
88 if (!PageHighMem(page))
89 return page_address(page);
Ira Weiny20b271d2020-06-04 16:47:58 -070090 return kmap_atomic_high_prot(page, prot);
Ira Weiny78b6d912020-06-04 16:47:42 -070091}
Ira Weiny20b271d2020-06-04 16:47:58 -070092#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot)
Ira Weiny78b6d912020-06-04 16:47:42 -070093
Linus Torvalds1da177e2005-04-16 15:20:36 -070094/* declarations for linux/mm/highmem.c */
95unsigned int nr_free_highpages(void);
Arun KSca79b0c2018-12-28 00:34:29 -080096extern atomic_long_t _totalhigh_pages;
97static inline unsigned long totalhigh_pages(void)
98{
99 return (unsigned long)atomic_long_read(&_totalhigh_pages);
100}
101
102static inline void totalhigh_pages_inc(void)
103{
104 atomic_long_inc(&_totalhigh_pages);
105}
106
107static inline void totalhigh_pages_dec(void)
108{
109 atomic_long_dec(&_totalhigh_pages);
110}
111
112static inline void totalhigh_pages_add(long count)
113{
114 atomic_long_add(count, &_totalhigh_pages);
115}
116
117static inline void totalhigh_pages_set(long val)
118{
119 atomic_long_set(&_totalhigh_pages, val);
120}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +0200122void kmap_flush_unused(void);
123
Mel Gorman5a178112012-07-31 16:45:02 -0700124struct page *kmap_to_page(void *addr);
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126#else /* CONFIG_HIGHMEM */
127
128static inline unsigned int nr_free_highpages(void) { return 0; }
129
Mel Gorman5a178112012-07-31 16:45:02 -0700130static inline struct page *kmap_to_page(void *addr)
131{
132 return virt_to_page(addr);
133}
134
Arun KSca79b0c2018-12-28 00:34:29 -0800135static inline unsigned long totalhigh_pages(void) { return 0UL; }
Christoph Lameterc1f60a52006-09-25 23:31:11 -0700136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137static inline void *kmap(struct page *page)
138{
139 might_sleep();
140 return page_address(page);
141}
142
Ira Weinye23c4592020-06-04 16:47:34 -0700143static inline void kunmap_high(struct page *page)
144{
145}
146
Matthew Wilcox31c91132009-06-16 15:32:45 -0700147static inline void kunmap(struct page *page)
148{
Ira Weiny7438f362020-06-04 16:48:10 -0700149#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
150 kunmap_flush_on_unmap(page_address(page));
151#endif
Matthew Wilcox31c91132009-06-16 15:32:45 -0700152}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Cong Wanga24401b2011-11-26 10:53:39 +0800154static inline void *kmap_atomic(struct page *page)
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +0200155{
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +0200156 preempt_disable();
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +0200157 pagefault_disable();
158 return page_address(page);
159}
Cong Wanga24401b2011-11-26 10:53:39 +0800160#define kmap_atomic_prot(page, prot) kmap_atomic(page)
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +0200161
Ira Weinyabca2502020-06-04 16:47:46 -0700162static inline void kunmap_atomic_high(void *addr)
Andi Kleen4e60c862010-08-09 17:19:03 -0700163{
Ira Weinyabca2502020-06-04 16:47:46 -0700164 /*
Ira Weiny7438f362020-06-04 16:48:10 -0700165 * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
Ira Weinyabca2502020-06-04 16:47:46 -0700166 * handles re-enabling faults + preemption
167 */
Ira Weiny7438f362020-06-04 16:48:10 -0700168#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
169 kunmap_flush_on_unmap(addr);
170#endif
Andi Kleen4e60c862010-08-09 17:19:03 -0700171}
172
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700173#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +0200174
175#define kmap_flush_unused() do {} while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
177#endif /* CONFIG_HIGHMEM */
178
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700179#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
180
181DECLARE_PER_CPU(int, __kmap_atomic_idx);
182
183static inline int kmap_atomic_idx_push(void)
184{
Christoph Lametercfb82432010-12-06 11:40:03 -0600185 int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
186
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700187#ifdef CONFIG_DEBUG_HIGHMEM
188 WARN_ON_ONCE(in_irq() && !irqs_disabled());
Chintan Pandya1d352bf2014-08-06 16:08:18 -0700189 BUG_ON(idx >= KM_TYPE_NR);
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700190#endif
191 return idx;
192}
193
Peter Zijlstra20273942010-10-27 15:32:58 -0700194static inline int kmap_atomic_idx(void)
195{
Christoph Lametercfb82432010-12-06 11:40:03 -0600196 return __this_cpu_read(__kmap_atomic_idx) - 1;
Peter Zijlstra20273942010-10-27 15:32:58 -0700197}
198
Christoph Lametercfb82432010-12-06 11:40:03 -0600199static inline void kmap_atomic_idx_pop(void)
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700200{
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700201#ifdef CONFIG_DEBUG_HIGHMEM
Christoph Lametercfb82432010-12-06 11:40:03 -0600202 int idx = __this_cpu_dec_return(__kmap_atomic_idx);
203
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700204 BUG_ON(idx < 0);
Christoph Lametercfb82432010-12-06 11:40:03 -0600205#else
206 __this_cpu_dec(__kmap_atomic_idx);
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700207#endif
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700208}
209
210#endif
211
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700212/*
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700213 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
214 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
215 */
Cong Wang1285e4c2012-06-22 23:17:53 +0800216#define kunmap_atomic(addr) \
Cong Wang980c19e2011-11-25 22:08:45 +0800217do { \
218 BUILD_BUG_ON(__same_type((addr), struct page *)); \
Ira Weinyabca2502020-06-04 16:47:46 -0700219 kunmap_atomic_high(addr); \
220 pagefault_enable(); \
221 preempt_enable(); \
Cong Wang980c19e2011-11-25 22:08:45 +0800222} while (0)
223
Cong Wang980c19e2011-11-25 22:08:45 +0800224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
Russell King487ff322008-11-27 11:13:58 +0000226#ifndef clear_user_highpage
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
228{
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800229 void *addr = kmap_atomic(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 clear_user_page(addr, vaddr, page);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800231 kunmap_atomic(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232}
Russell King487ff322008-11-27 11:13:58 +0000233#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
235#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
Mel Gorman769848c2007-07-17 04:03:05 -0700236/**
237 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
238 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
239 * @vma: The VMA the page is to be allocated for
240 * @vaddr: The virtual address the page will be inserted into
241 *
242 * This function will allocate a page for a VMA but the caller is expected
243 * to specify via movableflags whether the page will be movable in the
244 * future or not
245 *
246 * An architecture may override this function by defining
247 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
248 * implementation.
249 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250static inline struct page *
Mel Gorman769848c2007-07-17 04:03:05 -0700251__alloc_zeroed_user_highpage(gfp_t movableflags,
252 struct vm_area_struct *vma,
253 unsigned long vaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254{
Mel Gorman769848c2007-07-17 04:03:05 -0700255 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
256 vma, vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
258 if (page)
259 clear_user_highpage(page, vaddr);
260
261 return page;
262}
263#endif
264
Mel Gorman769848c2007-07-17 04:03:05 -0700265/**
Mel Gorman769848c2007-07-17 04:03:05 -0700266 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
267 * @vma: The VMA the page is to be allocated for
268 * @vaddr: The virtual address the page will be inserted into
269 *
270 * This function will allocate a page for a VMA that the caller knows will
271 * be able to migrate in the future using move_pages() or reclaimed
272 */
273static inline struct page *
274alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
275 unsigned long vaddr)
276{
277 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
278}
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280static inline void clear_highpage(struct page *page)
281{
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800282 void *kaddr = kmap_atomic(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 clear_page(kaddr);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800284 kunmap_atomic(kaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285}
286
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800287static inline void zero_user_segments(struct page *page,
288 unsigned start1, unsigned end1,
289 unsigned start2, unsigned end2)
290{
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800291 void *kaddr = kmap_atomic(page);
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800292
293 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
294
295 if (end1 > start1)
296 memset(kaddr + start1, 0, end1 - start1);
297
298 if (end2 > start2)
299 memset(kaddr + start2, 0, end2 - start2);
300
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800301 kunmap_atomic(kaddr);
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800302 flush_dcache_page(page);
303}
304
305static inline void zero_user_segment(struct page *page,
306 unsigned start, unsigned end)
307{
308 zero_user_segments(page, start, end, 0, 0);
309}
310
311static inline void zero_user(struct page *page,
312 unsigned start, unsigned size)
313{
314 zero_user_segments(page, start, start + size, 0, 0);
315}
Nate Diller01f27052007-05-09 02:35:07 -0700316
Atsushi Nemoto77fff4a2006-12-12 17:14:54 +0000317#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
318
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000319static inline void copy_user_highpage(struct page *to, struct page *from,
320 unsigned long vaddr, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321{
322 char *vfrom, *vto;
323
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800324 vfrom = kmap_atomic(from);
325 vto = kmap_atomic(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 copy_user_page(vto, vfrom, vaddr, to);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800327 kunmap_atomic(vto);
328 kunmap_atomic(vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329}
330
Atsushi Nemoto77fff4a2006-12-12 17:14:54 +0000331#endif
332
Khalid Aziza4602b62018-02-21 10:15:51 -0700333#ifndef __HAVE_ARCH_COPY_HIGHPAGE
334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335static inline void copy_highpage(struct page *to, struct page *from)
336{
337 char *vfrom, *vto;
338
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800339 vfrom = kmap_atomic(from);
340 vto = kmap_atomic(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 copy_page(vto, vfrom);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800342 kunmap_atomic(vto);
343 kunmap_atomic(vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344}
345
Khalid Aziza4602b62018-02-21 10:15:51 -0700346#endif
347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348#endif /* _LINUX_HIGHMEM_H */