Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * User address space access functions. |
| 3 | * |
| 4 | * Copyright 1997 Andi Kleen <ak@muc.de> |
| 5 | * Copyright 1997 Linus Torvalds |
| 6 | * Copyright 2002 Andi Kleen <ak@suse.de> |
| 7 | */ |
Paul Gortmaker | e683014 | 2016-07-13 20:18:57 -0400 | [diff] [blame] | 8 | #include <linux/export.h> |
Andy Lutomirski | 13d4ea0 | 2016-07-14 13:22:57 -0700 | [diff] [blame] | 9 | #include <linux/uaccess.h> |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 10 | #include <linux/highmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
| 12 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * Zero Userspace |
| 14 | */ |
| 15 | |
| 16 | unsigned long __clear_user(void __user *addr, unsigned long size) |
| 17 | { |
| 18 | long __d0; |
Nick Piggin | 3ee1afa | 2008-09-10 13:37:17 +0200 | [diff] [blame] | 19 | might_fault(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | /* no memory constraint because it doesn't change any memory gcc knows |
| 21 | about */ |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 22 | stac(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | asm volatile( |
| 24 | " testq %[size8],%[size8]\n" |
| 25 | " jz 4f\n" |
Alexey Dobriyan | 1153933 | 2018-05-08 00:39:37 +0300 | [diff] [blame] | 26 | "0: movq $0,(%[dst])\n" |
| 27 | " addq $8,%[dst]\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | " decl %%ecx ; jnz 0b\n" |
| 29 | "4: movq %[size1],%%rcx\n" |
| 30 | " testl %%ecx,%%ecx\n" |
| 31 | " jz 2f\n" |
Alexey Dobriyan | 1153933 | 2018-05-08 00:39:37 +0300 | [diff] [blame] | 32 | "1: movb $0,(%[dst])\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | " incq %[dst]\n" |
| 34 | " decl %%ecx ; jnz 1b\n" |
| 35 | "2:\n" |
| 36 | ".section .fixup,\"ax\"\n" |
| 37 | "3: lea 0(%[size1],%[size8],8),%[size8]\n" |
| 38 | " jmp 2b\n" |
| 39 | ".previous\n" |
Jann Horn | 75045f7 | 2018-08-28 22:14:18 +0200 | [diff] [blame] | 40 | _ASM_EXTABLE_UA(0b, 3b) |
| 41 | _ASM_EXTABLE_UA(1b, 2b) |
Andi Kleen | e0a9612 | 2009-01-16 15:22:11 +0100 | [diff] [blame] | 42 | : [size8] "=&c"(size), [dst] "=&D" (__d0) |
Alexey Dobriyan | 1153933 | 2018-05-08 00:39:37 +0300 | [diff] [blame] | 43 | : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr)); |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 44 | clac(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | return size; |
| 46 | } |
Andi Kleen | 2ee60e17 | 2006-06-26 13:59:44 +0200 | [diff] [blame] | 47 | EXPORT_SYMBOL(__clear_user); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
| 49 | unsigned long clear_user(void __user *to, unsigned long n) |
| 50 | { |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame^] | 51 | if (access_ok(to, n)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | return __clear_user(to, n); |
| 53 | return n; |
| 54 | } |
Andi Kleen | 2ee60e17 | 2006-06-26 13:59:44 +0200 | [diff] [blame] | 55 | EXPORT_SYMBOL(clear_user); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | |
Vitaly Mayatskikh | 1129585 | 2008-07-02 15:48:21 +0200 | [diff] [blame] | 57 | /* |
| 58 | * Try to copy last bytes and clear the rest if needed. |
| 59 | * Since protection fault in copy_from/to_user is not a normal situation, |
| 60 | * it is not necessary to optimize tail handling. |
| 61 | */ |
Andi Kleen | 277d5b4 | 2013-08-05 15:02:43 -0700 | [diff] [blame] | 62 | __visible unsigned long |
Linus Torvalds | cae2a17 | 2015-04-06 10:26:17 -0700 | [diff] [blame] | 63 | copy_user_handle_tail(char *to, char *from, unsigned len) |
Vitaly Mayatskikh | 1129585 | 2008-07-02 15:48:21 +0200 | [diff] [blame] | 64 | { |
CQ Tang | 66db3fe | 2013-03-18 11:02:21 -0400 | [diff] [blame] | 65 | for (; len; --len, to++) { |
Linus Torvalds | cae2a17 | 2015-04-06 10:26:17 -0700 | [diff] [blame] | 66 | char c; |
| 67 | |
Vitaly Mayatskikh | 1129585 | 2008-07-02 15:48:21 +0200 | [diff] [blame] | 68 | if (__get_user_nocheck(c, from++, sizeof(char))) |
| 69 | break; |
CQ Tang | 66db3fe | 2013-03-18 11:02:21 -0400 | [diff] [blame] | 70 | if (__put_user_nocheck(c, to, sizeof(char))) |
Vitaly Mayatskikh | 1129585 | 2008-07-02 15:48:21 +0200 | [diff] [blame] | 71 | break; |
| 72 | } |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 73 | clac(); |
Vitaly Mayatskikh | 1129585 | 2008-07-02 15:48:21 +0200 | [diff] [blame] | 74 | return len; |
| 75 | } |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 76 | |
Dan Williams | 12c8913 | 2018-05-03 17:06:26 -0700 | [diff] [blame] | 77 | /* |
| 78 | * Similar to copy_user_handle_tail, probe for the write fault point, |
| 79 | * but reuse __memcpy_mcsafe in case a new read error is encountered. |
| 80 | * clac() is handled in _copy_to_iter_mcsafe(). |
| 81 | */ |
| 82 | __visible unsigned long |
| 83 | mcsafe_handle_tail(char *to, char *from, unsigned len) |
| 84 | { |
| 85 | for (; len; --len, to++, from++) { |
| 86 | /* |
| 87 | * Call the assembly routine back directly since |
| 88 | * memcpy_mcsafe() may silently fallback to memcpy. |
| 89 | */ |
| 90 | unsigned long rem = __memcpy_mcsafe(to, from, 1); |
| 91 | |
| 92 | if (rem) |
| 93 | break; |
| 94 | } |
| 95 | return len; |
| 96 | } |
| 97 | |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 98 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
| 99 | /** |
| 100 | * clean_cache_range - write back a cache range with CLWB |
| 101 | * @vaddr: virtual start address |
| 102 | * @size: number of bytes to write back |
| 103 | * |
| 104 | * Write back a cache range using the CLWB (cache line write back) |
| 105 | * instruction. Note that @size is internally rounded up to be cache |
| 106 | * line size aligned. |
| 107 | */ |
| 108 | static void clean_cache_range(void *addr, size_t size) |
| 109 | { |
| 110 | u16 x86_clflush_size = boot_cpu_data.x86_clflush_size; |
| 111 | unsigned long clflush_mask = x86_clflush_size - 1; |
| 112 | void *vend = addr + size; |
| 113 | void *p; |
| 114 | |
| 115 | for (p = (void *)((unsigned long)addr & ~clflush_mask); |
| 116 | p < vend; p += x86_clflush_size) |
| 117 | clwb(p); |
| 118 | } |
| 119 | |
Dan Williams | 4e4f00a | 2017-05-29 22:40:44 -0700 | [diff] [blame] | 120 | void arch_wb_cache_pmem(void *addr, size_t size) |
| 121 | { |
| 122 | clean_cache_range(addr, size); |
| 123 | } |
| 124 | EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); |
| 125 | |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 126 | long __copy_user_flushcache(void *dst, const void __user *src, unsigned size) |
| 127 | { |
| 128 | unsigned long flushed, dest = (unsigned long) dst; |
| 129 | long rc = __copy_user_nocache(dst, src, size, 0); |
| 130 | |
| 131 | /* |
| 132 | * __copy_user_nocache() uses non-temporal stores for the bulk |
| 133 | * of the transfer, but we need to manually flush if the |
| 134 | * transfer is unaligned. A cached memory copy is used when |
| 135 | * destination or size is not naturally aligned. That is: |
| 136 | * - Require 8-byte alignment when size is 8 bytes or larger. |
| 137 | * - Require 4-byte alignment when size is 4 bytes. |
| 138 | */ |
| 139 | if (size < 8) { |
| 140 | if (!IS_ALIGNED(dest, 4) || size != 4) |
| 141 | clean_cache_range(dst, 1); |
| 142 | } else { |
| 143 | if (!IS_ALIGNED(dest, 8)) { |
| 144 | dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); |
| 145 | clean_cache_range(dst, 1); |
| 146 | } |
| 147 | |
| 148 | flushed = dest - (unsigned long) dst; |
| 149 | if (size > flushed && !IS_ALIGNED(size - flushed, 8)) |
| 150 | clean_cache_range(dst + size - 1, 1); |
| 151 | } |
| 152 | |
| 153 | return rc; |
| 154 | } |
| 155 | |
Mikulas Patocka | 02101c4 | 2018-08-08 17:22:16 -0400 | [diff] [blame] | 156 | void __memcpy_flushcache(void *_dst, const void *_src, size_t size) |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 157 | { |
| 158 | unsigned long dest = (unsigned long) _dst; |
| 159 | unsigned long source = (unsigned long) _src; |
| 160 | |
| 161 | /* cache copy and flush to align dest */ |
| 162 | if (!IS_ALIGNED(dest, 8)) { |
| 163 | unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest); |
| 164 | |
| 165 | memcpy((void *) dest, (void *) source, len); |
| 166 | clean_cache_range((void *) dest, len); |
| 167 | dest += len; |
| 168 | source += len; |
| 169 | size -= len; |
| 170 | if (!size) |
| 171 | return; |
| 172 | } |
| 173 | |
| 174 | /* 4x8 movnti loop */ |
| 175 | while (size >= 32) { |
| 176 | asm("movq (%0), %%r8\n" |
| 177 | "movq 8(%0), %%r9\n" |
| 178 | "movq 16(%0), %%r10\n" |
| 179 | "movq 24(%0), %%r11\n" |
| 180 | "movnti %%r8, (%1)\n" |
| 181 | "movnti %%r9, 8(%1)\n" |
| 182 | "movnti %%r10, 16(%1)\n" |
| 183 | "movnti %%r11, 24(%1)\n" |
| 184 | :: "r" (source), "r" (dest) |
| 185 | : "memory", "r8", "r9", "r10", "r11"); |
| 186 | dest += 32; |
| 187 | source += 32; |
| 188 | size -= 32; |
| 189 | } |
| 190 | |
| 191 | /* 1x8 movnti loop */ |
| 192 | while (size >= 8) { |
| 193 | asm("movq (%0), %%r8\n" |
| 194 | "movnti %%r8, (%1)\n" |
| 195 | :: "r" (source), "r" (dest) |
| 196 | : "memory", "r8"); |
| 197 | dest += 8; |
| 198 | source += 8; |
| 199 | size -= 8; |
| 200 | } |
| 201 | |
| 202 | /* 1x4 movnti loop */ |
| 203 | while (size >= 4) { |
| 204 | asm("movl (%0), %%r8d\n" |
| 205 | "movnti %%r8d, (%1)\n" |
| 206 | :: "r" (source), "r" (dest) |
| 207 | : "memory", "r8"); |
| 208 | dest += 4; |
| 209 | source += 4; |
| 210 | size -= 4; |
| 211 | } |
| 212 | |
| 213 | /* cache copy for remaining bytes */ |
| 214 | if (size) { |
| 215 | memcpy((void *) dest, (void *) source, size); |
| 216 | clean_cache_range((void *) dest, size); |
| 217 | } |
| 218 | } |
Mikulas Patocka | 02101c4 | 2018-08-08 17:22:16 -0400 | [diff] [blame] | 219 | EXPORT_SYMBOL_GPL(__memcpy_flushcache); |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 220 | |
| 221 | void memcpy_page_flushcache(char *to, struct page *page, size_t offset, |
| 222 | size_t len) |
| 223 | { |
| 224 | char *from = kmap_atomic(page); |
| 225 | |
| 226 | memcpy_flushcache(to, from + offset, len); |
| 227 | kunmap_atomic(from); |
| 228 | } |
| 229 | #endif |