Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_HIGHMEM_H |
| 3 | #define _LINUX_HIGHMEM_H |
| 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/fs.h> |
Cesar Eduardo Barros | 597781f | 2010-08-09 17:18:32 -0700 | [diff] [blame] | 6 | #include <linux/kernel.h> |
Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 7 | #include <linux/bug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/mm.h> |
Peter Zijlstra | ad76fb6 | 2006-12-06 20:32:21 -0800 | [diff] [blame] | 9 | #include <linux/uaccess.h> |
Catalin Marinas | 43b3a0c | 2010-11-11 14:05:10 -0800 | [diff] [blame] | 10 | #include <linux/hardirq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
| 12 | #include <asm/cacheflush.h> |
| 13 | |
James Bottomley | 03beb07 | 2006-03-26 01:36:57 -0800 | [diff] [blame] | 14 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE |
Russell King | a6f36be | 2006-12-30 22:24:19 +0000 | [diff] [blame] | 15 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
James Bottomley | 03beb07 | 2006-03-26 01:36:57 -0800 | [diff] [blame] | 16 | { |
| 17 | } |
| 18 | #endif |
| 19 | |
James Bottomley | 5a3a5a9 | 2006-03-26 01:36:59 -0800 | [diff] [blame] | 20 | #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
| 21 | static inline void flush_kernel_dcache_page(struct page *page) |
| 22 | { |
| 23 | } |
James Bottomley | 9df5f741 | 2010-01-25 11:42:20 -0600 | [diff] [blame] | 24 | static inline void flush_kernel_vmap_range(void *vaddr, int size) |
| 25 | { |
| 26 | } |
| 27 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) |
| 28 | { |
| 29 | } |
James Bottomley | 5a3a5a9 | 2006-03-26 01:36:59 -0800 | [diff] [blame] | 30 | #endif |
| 31 | |
Kumar Gala | 3688e07f | 2009-04-01 23:38:49 -0500 | [diff] [blame] | 32 | #include <asm/kmap_types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
Kumar Gala | 3688e07f | 2009-04-01 23:38:49 -0500 | [diff] [blame] | 34 | #ifdef CONFIG_HIGHMEM |
Ira Weiny | 20b271d | 2020-06-04 16:47:58 -0700 | [diff] [blame] | 35 | extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); |
Ira Weiny | abca250 | 2020-06-04 16:47:46 -0700 | [diff] [blame] | 36 | extern void kunmap_atomic_high(void *kvaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <asm/highmem.h> |
| 38 | |
Ira Weiny | 525aaf9 | 2020-06-04 16:47:30 -0700 | [diff] [blame] | 39 | #ifndef ARCH_HAS_KMAP_FLUSH_TLB |
| 40 | static inline void kmap_flush_tlb(unsigned long addr) { } |
| 41 | #endif |
| 42 | |
Ira Weiny | 090e77e | 2020-06-04 16:48:18 -0700 | [diff] [blame] | 43 | #ifndef kmap_prot |
| 44 | #define kmap_prot PAGE_KERNEL |
| 45 | #endif |
| 46 | |
Ira Weiny | 525aaf9 | 2020-06-04 16:47:30 -0700 | [diff] [blame] | 47 | void *kmap_high(struct page *page); |
| 48 | static inline void *kmap(struct page *page) |
| 49 | { |
| 50 | void *addr; |
| 51 | |
| 52 | might_sleep(); |
| 53 | if (!PageHighMem(page)) |
| 54 | addr = page_address(page); |
| 55 | else |
| 56 | addr = kmap_high(page); |
| 57 | kmap_flush_tlb((unsigned long)addr); |
| 58 | return addr; |
| 59 | } |
| 60 | |
Ira Weiny | e23c459 | 2020-06-04 16:47:34 -0700 | [diff] [blame] | 61 | void kunmap_high(struct page *page); |
| 62 | |
| 63 | static inline void kunmap(struct page *page) |
| 64 | { |
| 65 | might_sleep(); |
| 66 | if (!PageHighMem(page)) |
| 67 | return; |
| 68 | kunmap_high(page); |
| 69 | } |
| 70 | |
Ira Weiny | 78b6d91 | 2020-06-04 16:47:42 -0700 | [diff] [blame] | 71 | /* |
| 72 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because |
| 73 | * no global lock is needed and because the kmap code must perform a global TLB |
| 74 | * invalidation when the kmap pool wraps. |
| 75 | * |
Randy Dunlap | 3ecabd3 | 2020-08-11 18:32:33 -0700 | [diff] [blame] | 76 | * However when holding an atomic kmap it is not legal to sleep, so atomic |
Ira Weiny | 78b6d91 | 2020-06-04 16:47:42 -0700 | [diff] [blame] | 77 | * kmaps are appropriate for short, tight code paths only. |
| 78 | * |
| 79 | * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap |
| 80 | * gives a more generic (and caching) interface. But kmap_atomic can |
| 81 | * be used in IRQ contexts, so in some (very limited) cases we need |
| 82 | * it. |
| 83 | */ |
Ira Weiny | 20b271d | 2020-06-04 16:47:58 -0700 | [diff] [blame] | 84 | static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
Ira Weiny | 78b6d91 | 2020-06-04 16:47:42 -0700 | [diff] [blame] | 85 | { |
| 86 | preempt_disable(); |
| 87 | pagefault_disable(); |
| 88 | if (!PageHighMem(page)) |
| 89 | return page_address(page); |
Ira Weiny | 20b271d | 2020-06-04 16:47:58 -0700 | [diff] [blame] | 90 | return kmap_atomic_high_prot(page, prot); |
Ira Weiny | 78b6d91 | 2020-06-04 16:47:42 -0700 | [diff] [blame] | 91 | } |
Ira Weiny | 20b271d | 2020-06-04 16:47:58 -0700 | [diff] [blame] | 92 | #define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot) |
Ira Weiny | 78b6d91 | 2020-06-04 16:47:42 -0700 | [diff] [blame] | 93 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | /* declarations for linux/mm/highmem.c */ |
| 95 | unsigned int nr_free_highpages(void); |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 96 | extern atomic_long_t _totalhigh_pages; |
| 97 | static inline unsigned long totalhigh_pages(void) |
| 98 | { |
| 99 | return (unsigned long)atomic_long_read(&_totalhigh_pages); |
| 100 | } |
| 101 | |
| 102 | static inline void totalhigh_pages_inc(void) |
| 103 | { |
| 104 | atomic_long_inc(&_totalhigh_pages); |
| 105 | } |
| 106 | |
| 107 | static inline void totalhigh_pages_dec(void) |
| 108 | { |
| 109 | atomic_long_dec(&_totalhigh_pages); |
| 110 | } |
| 111 | |
| 112 | static inline void totalhigh_pages_add(long count) |
| 113 | { |
| 114 | atomic_long_add(count, &_totalhigh_pages); |
| 115 | } |
| 116 | |
| 117 | static inline void totalhigh_pages_set(long val) |
| 118 | { |
| 119 | atomic_long_set(&_totalhigh_pages, val); |
| 120 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 122 | void kmap_flush_unused(void); |
| 123 | |
Mel Gorman | 5a17811 | 2012-07-31 16:45:02 -0700 | [diff] [blame] | 124 | struct page *kmap_to_page(void *addr); |
| 125 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | #else /* CONFIG_HIGHMEM */ |
| 127 | |
| 128 | static inline unsigned int nr_free_highpages(void) { return 0; } |
| 129 | |
Mel Gorman | 5a17811 | 2012-07-31 16:45:02 -0700 | [diff] [blame] | 130 | static inline struct page *kmap_to_page(void *addr) |
| 131 | { |
| 132 | return virt_to_page(addr); |
| 133 | } |
| 134 | |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 135 | static inline unsigned long totalhigh_pages(void) { return 0UL; } |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 136 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | static inline void *kmap(struct page *page) |
| 138 | { |
| 139 | might_sleep(); |
| 140 | return page_address(page); |
| 141 | } |
| 142 | |
Ira Weiny | e23c459 | 2020-06-04 16:47:34 -0700 | [diff] [blame] | 143 | static inline void kunmap_high(struct page *page) |
| 144 | { |
| 145 | } |
| 146 | |
Matthew Wilcox | 31c9113 | 2009-06-16 15:32:45 -0700 | [diff] [blame] | 147 | static inline void kunmap(struct page *page) |
| 148 | { |
Ira Weiny | 7438f36 | 2020-06-04 16:48:10 -0700 | [diff] [blame] | 149 | #ifdef ARCH_HAS_FLUSH_ON_KUNMAP |
| 150 | kunmap_flush_on_unmap(page_address(page)); |
| 151 | #endif |
Matthew Wilcox | 31c9113 | 2009-06-16 15:32:45 -0700 | [diff] [blame] | 152 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | |
Cong Wang | a24401b | 2011-11-26 10:53:39 +0800 | [diff] [blame] | 154 | static inline void *kmap_atomic(struct page *page) |
Geert Uytterhoeven | 254f9c5 | 2007-05-01 22:33:07 +0200 | [diff] [blame] | 155 | { |
David Hildenbrand | 2cb7c9c | 2015-05-11 17:52:09 +0200 | [diff] [blame] | 156 | preempt_disable(); |
Geert Uytterhoeven | 254f9c5 | 2007-05-01 22:33:07 +0200 | [diff] [blame] | 157 | pagefault_disable(); |
| 158 | return page_address(page); |
| 159 | } |
Cong Wang | a24401b | 2011-11-26 10:53:39 +0800 | [diff] [blame] | 160 | #define kmap_atomic_prot(page, prot) kmap_atomic(page) |
Geert Uytterhoeven | 254f9c5 | 2007-05-01 22:33:07 +0200 | [diff] [blame] | 161 | |
Ira Weiny | abca250 | 2020-06-04 16:47:46 -0700 | [diff] [blame] | 162 | static inline void kunmap_atomic_high(void *addr) |
Andi Kleen | 4e60c86 | 2010-08-09 17:19:03 -0700 | [diff] [blame] | 163 | { |
Ira Weiny | abca250 | 2020-06-04 16:47:46 -0700 | [diff] [blame] | 164 | /* |
Ira Weiny | 7438f36 | 2020-06-04 16:48:10 -0700 | [diff] [blame] | 165 | * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic() |
Ira Weiny | abca250 | 2020-06-04 16:47:46 -0700 | [diff] [blame] | 166 | * handles re-enabling faults + preemption |
| 167 | */ |
Ira Weiny | 7438f36 | 2020-06-04 16:48:10 -0700 | [diff] [blame] | 168 | #ifdef ARCH_HAS_FLUSH_ON_KUNMAP |
| 169 | kunmap_flush_on_unmap(addr); |
| 170 | #endif |
Andi Kleen | 4e60c86 | 2010-08-09 17:19:03 -0700 | [diff] [blame] | 171 | } |
| 172 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 173 | #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 174 | |
| 175 | #define kmap_flush_unused() do {} while(0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | |
| 177 | #endif /* CONFIG_HIGHMEM */ |
| 178 | |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 179 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) |
| 180 | |
| 181 | DECLARE_PER_CPU(int, __kmap_atomic_idx); |
| 182 | |
| 183 | static inline int kmap_atomic_idx_push(void) |
| 184 | { |
Christoph Lameter | cfb8243 | 2010-12-06 11:40:03 -0600 | [diff] [blame] | 185 | int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; |
| 186 | |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 187 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 188 | WARN_ON_ONCE(in_irq() && !irqs_disabled()); |
Chintan Pandya | 1d352bf | 2014-08-06 16:08:18 -0700 | [diff] [blame] | 189 | BUG_ON(idx >= KM_TYPE_NR); |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 190 | #endif |
| 191 | return idx; |
| 192 | } |
| 193 | |
Peter Zijlstra | 2027394 | 2010-10-27 15:32:58 -0700 | [diff] [blame] | 194 | static inline int kmap_atomic_idx(void) |
| 195 | { |
Christoph Lameter | cfb8243 | 2010-12-06 11:40:03 -0600 | [diff] [blame] | 196 | return __this_cpu_read(__kmap_atomic_idx) - 1; |
Peter Zijlstra | 2027394 | 2010-10-27 15:32:58 -0700 | [diff] [blame] | 197 | } |
| 198 | |
Christoph Lameter | cfb8243 | 2010-12-06 11:40:03 -0600 | [diff] [blame] | 199 | static inline void kmap_atomic_idx_pop(void) |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 200 | { |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 201 | #ifdef CONFIG_DEBUG_HIGHMEM |
Christoph Lameter | cfb8243 | 2010-12-06 11:40:03 -0600 | [diff] [blame] | 202 | int idx = __this_cpu_dec_return(__kmap_atomic_idx); |
| 203 | |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 204 | BUG_ON(idx < 0); |
Christoph Lameter | cfb8243 | 2010-12-06 11:40:03 -0600 | [diff] [blame] | 205 | #else |
| 206 | __this_cpu_dec(__kmap_atomic_idx); |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 207 | #endif |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 208 | } |
| 209 | |
| 210 | #endif |
| 211 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 212 | /* |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 213 | * Prevent people trying to call kunmap_atomic() as if it were kunmap() |
| 214 | * kunmap_atomic() should get the return value of kmap_atomic, not the page. |
| 215 | */ |
Cong Wang | 1285e4c | 2012-06-22 23:17:53 +0800 | [diff] [blame] | 216 | #define kunmap_atomic(addr) \ |
Cong Wang | 980c19e | 2011-11-25 22:08:45 +0800 | [diff] [blame] | 217 | do { \ |
| 218 | BUILD_BUG_ON(__same_type((addr), struct page *)); \ |
Ira Weiny | abca250 | 2020-06-04 16:47:46 -0700 | [diff] [blame] | 219 | kunmap_atomic_high(addr); \ |
| 220 | pagefault_enable(); \ |
| 221 | preempt_enable(); \ |
Cong Wang | 980c19e | 2011-11-25 22:08:45 +0800 | [diff] [blame] | 222 | } while (0) |
| 223 | |
Cong Wang | 980c19e | 2011-11-25 22:08:45 +0800 | [diff] [blame] | 224 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ |
Russell King | 487ff32 | 2008-11-27 11:13:58 +0000 | [diff] [blame] | 226 | #ifndef clear_user_highpage |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) |
| 228 | { |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 229 | void *addr = kmap_atomic(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | clear_user_page(addr, vaddr, page); |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 231 | kunmap_atomic(addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | } |
Russell King | 487ff32 | 2008-11-27 11:13:58 +0000 | [diff] [blame] | 233 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | |
| 235 | #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 236 | /** |
| 237 | * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags |
| 238 | * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE |
| 239 | * @vma: The VMA the page is to be allocated for |
| 240 | * @vaddr: The virtual address the page will be inserted into |
| 241 | * |
| 242 | * This function will allocate a page for a VMA but the caller is expected |
| 243 | * to specify via movableflags whether the page will be movable in the |
| 244 | * future or not |
| 245 | * |
| 246 | * An architecture may override this function by defining |
| 247 | * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own |
| 248 | * implementation. |
| 249 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | static inline struct page * |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 251 | __alloc_zeroed_user_highpage(gfp_t movableflags, |
| 252 | struct vm_area_struct *vma, |
| 253 | unsigned long vaddr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | { |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 255 | struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, |
| 256 | vma, vaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | |
| 258 | if (page) |
| 259 | clear_user_highpage(page, vaddr); |
| 260 | |
| 261 | return page; |
| 262 | } |
| 263 | #endif |
| 264 | |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 265 | /** |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 266 | * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move |
| 267 | * @vma: The VMA the page is to be allocated for |
| 268 | * @vaddr: The virtual address the page will be inserted into |
| 269 | * |
| 270 | * This function will allocate a page for a VMA that the caller knows will |
| 271 | * be able to migrate in the future using move_pages() or reclaimed |
| 272 | */ |
| 273 | static inline struct page * |
| 274 | alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, |
| 275 | unsigned long vaddr) |
| 276 | { |
| 277 | return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); |
| 278 | } |
| 279 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | static inline void clear_highpage(struct page *page) |
| 281 | { |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 282 | void *kaddr = kmap_atomic(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | clear_page(kaddr); |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 284 | kunmap_atomic(kaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | } |
| 286 | |
Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 287 | static inline void zero_user_segments(struct page *page, |
| 288 | unsigned start1, unsigned end1, |
| 289 | unsigned start2, unsigned end2) |
| 290 | { |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 291 | void *kaddr = kmap_atomic(page); |
Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 292 | |
| 293 | BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); |
| 294 | |
| 295 | if (end1 > start1) |
| 296 | memset(kaddr + start1, 0, end1 - start1); |
| 297 | |
| 298 | if (end2 > start2) |
| 299 | memset(kaddr + start2, 0, end2 - start2); |
| 300 | |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 301 | kunmap_atomic(kaddr); |
Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 302 | flush_dcache_page(page); |
| 303 | } |
| 304 | |
| 305 | static inline void zero_user_segment(struct page *page, |
| 306 | unsigned start, unsigned end) |
| 307 | { |
| 308 | zero_user_segments(page, start, end, 0, 0); |
| 309 | } |
| 310 | |
| 311 | static inline void zero_user(struct page *page, |
| 312 | unsigned start, unsigned size) |
| 313 | { |
| 314 | zero_user_segments(page, start, start + size, 0, 0); |
| 315 | } |
Nate Diller | 01f2705 | 2007-05-09 02:35:07 -0700 | [diff] [blame] | 316 | |
Atsushi Nemoto | 77fff4a | 2006-12-12 17:14:54 +0000 | [diff] [blame] | 317 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE |
| 318 | |
Atsushi Nemoto | 9de455b | 2006-12-12 17:14:55 +0000 | [diff] [blame] | 319 | static inline void copy_user_highpage(struct page *to, struct page *from, |
| 320 | unsigned long vaddr, struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | { |
| 322 | char *vfrom, *vto; |
| 323 | |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 324 | vfrom = kmap_atomic(from); |
| 325 | vto = kmap_atomic(to); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | copy_user_page(vto, vfrom, vaddr, to); |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 327 | kunmap_atomic(vto); |
| 328 | kunmap_atomic(vfrom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | } |
| 330 | |
Atsushi Nemoto | 77fff4a | 2006-12-12 17:14:54 +0000 | [diff] [blame] | 331 | #endif |
| 332 | |
Khalid Aziz | a4602b6 | 2018-02-21 10:15:51 -0700 | [diff] [blame] | 333 | #ifndef __HAVE_ARCH_COPY_HIGHPAGE |
| 334 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | static inline void copy_highpage(struct page *to, struct page *from) |
| 336 | { |
| 337 | char *vfrom, *vto; |
| 338 | |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 339 | vfrom = kmap_atomic(from); |
| 340 | vto = kmap_atomic(to); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | copy_page(vto, vfrom); |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 342 | kunmap_atomic(vto); |
| 343 | kunmap_atomic(vfrom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | } |
| 345 | |
Khalid Aziz | a4602b6 | 2018-02-21 10:15:51 -0700 | [diff] [blame] | 346 | #endif |
| 347 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | #endif /* _LINUX_HIGHMEM_H */ |