blob: 1847e993ac63a6b277d36d0f70455247d7bb81bc [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * User address space access functions.
4 *
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
7 * Copyright 2002 Andi Kleen <ak@suse.de>
8 */
Paul Gortmakere6830142016-07-13 20:18:57 -04009#include <linux/export.h>
Andy Lutomirski13d4ea02016-07-14 13:22:57 -070010#include <linux/uaccess.h>
Dan Williams0aed55a2017-05-29 12:22:50 -070011#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 * Zero Userspace
15 */
16
17unsigned long __clear_user(void __user *addr, unsigned long size)
18{
19 long __d0;
Nick Piggin3ee1afa2008-09-10 13:37:17 +020020 might_fault();
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 /* no memory constraint because it doesn't change any memory gcc knows
22 about */
H. Peter Anvin63bcff22012-09-21 12:43:12 -070023 stac();
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 asm volatile(
25 " testq %[size8],%[size8]\n"
26 " jz 4f\n"
Matt Flemingbb5570a2020-06-18 11:20:02 +010027 " .align 16\n"
Alexey Dobriyan11539332018-05-08 00:39:37 +030028 "0: movq $0,(%[dst])\n"
29 " addq $8,%[dst]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 " decl %%ecx ; jnz 0b\n"
31 "4: movq %[size1],%%rcx\n"
32 " testl %%ecx,%%ecx\n"
33 " jz 2f\n"
Alexey Dobriyan11539332018-05-08 00:39:37 +030034 "1: movb $0,(%[dst])\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 " incq %[dst]\n"
36 " decl %%ecx ; jnz 1b\n"
37 "2:\n"
38 ".section .fixup,\"ax\"\n"
39 "3: lea 0(%[size1],%[size8],8),%[size8]\n"
40 " jmp 2b\n"
41 ".previous\n"
Jann Horn75045f72018-08-28 22:14:18 +020042 _ASM_EXTABLE_UA(0b, 3b)
43 _ASM_EXTABLE_UA(1b, 2b)
Andi Kleene0a96122009-01-16 15:22:11 +010044 : [size8] "=&c"(size), [dst] "=&D" (__d0)
Alexey Dobriyan11539332018-05-08 00:39:37 +030045 : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
H. Peter Anvin63bcff22012-09-21 12:43:12 -070046 clac();
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 return size;
48}
Andi Kleen2ee60e172006-06-26 13:59:44 +020049EXPORT_SYMBOL(__clear_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51unsigned long clear_user(void __user *to, unsigned long n)
52{
Linus Torvalds96d4f262019-01-03 18:57:57 -080053 if (access_ok(to, n))
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 return __clear_user(to, n);
55 return n;
56}
Andi Kleen2ee60e172006-06-26 13:59:44 +020057EXPORT_SYMBOL(clear_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Vitaly Mayatskikh11295852008-07-02 15:48:21 +020059/*
Dan Williams12c89132018-05-03 17:06:26 -070060 * Similar to copy_user_handle_tail, probe for the write fault point,
61 * but reuse __memcpy_mcsafe in case a new read error is encountered.
62 * clac() is handled in _copy_to_iter_mcsafe().
63 */
Josh Poimboeuf5e307a62019-07-17 20:36:43 -050064__visible notrace unsigned long
Dan Williams12c89132018-05-03 17:06:26 -070065mcsafe_handle_tail(char *to, char *from, unsigned len)
66{
67 for (; len; --len, to++, from++) {
68 /*
69 * Call the assembly routine back directly since
70 * memcpy_mcsafe() may silently fallback to memcpy.
71 */
72 unsigned long rem = __memcpy_mcsafe(to, from, 1);
73
74 if (rem)
75 break;
76 }
77 return len;
78}
79
Dan Williams0aed55a2017-05-29 12:22:50 -070080#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
81/**
82 * clean_cache_range - write back a cache range with CLWB
83 * @vaddr: virtual start address
84 * @size: number of bytes to write back
85 *
86 * Write back a cache range using the CLWB (cache line write back)
87 * instruction. Note that @size is internally rounded up to be cache
88 * line size aligned.
89 */
90static void clean_cache_range(void *addr, size_t size)
91{
92 u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
93 unsigned long clflush_mask = x86_clflush_size - 1;
94 void *vend = addr + size;
95 void *p;
96
97 for (p = (void *)((unsigned long)addr & ~clflush_mask);
98 p < vend; p += x86_clflush_size)
99 clwb(p);
100}
101
Dan Williams4e4f00a2017-05-29 22:40:44 -0700102void arch_wb_cache_pmem(void *addr, size_t size)
103{
104 clean_cache_range(addr, size);
105}
106EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
107
Dan Williams0aed55a2017-05-29 12:22:50 -0700108long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
109{
110 unsigned long flushed, dest = (unsigned long) dst;
111 long rc = __copy_user_nocache(dst, src, size, 0);
112
113 /*
114 * __copy_user_nocache() uses non-temporal stores for the bulk
115 * of the transfer, but we need to manually flush if the
116 * transfer is unaligned. A cached memory copy is used when
117 * destination or size is not naturally aligned. That is:
118 * - Require 8-byte alignment when size is 8 bytes or larger.
119 * - Require 4-byte alignment when size is 4 bytes.
120 */
121 if (size < 8) {
122 if (!IS_ALIGNED(dest, 4) || size != 4)
Mikulas Patockaa1cd6c22020-09-25 21:19:24 -0700123 clean_cache_range(dst, size);
Dan Williams0aed55a2017-05-29 12:22:50 -0700124 } else {
125 if (!IS_ALIGNED(dest, 8)) {
126 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
127 clean_cache_range(dst, 1);
128 }
129
130 flushed = dest - (unsigned long) dst;
131 if (size > flushed && !IS_ALIGNED(size - flushed, 8))
132 clean_cache_range(dst + size - 1, 1);
133 }
134
135 return rc;
136}
137
Mikulas Patocka02101c42018-08-08 17:22:16 -0400138void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
Dan Williams0aed55a2017-05-29 12:22:50 -0700139{
140 unsigned long dest = (unsigned long) _dst;
141 unsigned long source = (unsigned long) _src;
142
143 /* cache copy and flush to align dest */
144 if (!IS_ALIGNED(dest, 8)) {
145 unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
146
147 memcpy((void *) dest, (void *) source, len);
148 clean_cache_range((void *) dest, len);
149 dest += len;
150 source += len;
151 size -= len;
152 if (!size)
153 return;
154 }
155
156 /* 4x8 movnti loop */
157 while (size >= 32) {
158 asm("movq (%0), %%r8\n"
159 "movq 8(%0), %%r9\n"
160 "movq 16(%0), %%r10\n"
161 "movq 24(%0), %%r11\n"
162 "movnti %%r8, (%1)\n"
163 "movnti %%r9, 8(%1)\n"
164 "movnti %%r10, 16(%1)\n"
165 "movnti %%r11, 24(%1)\n"
166 :: "r" (source), "r" (dest)
167 : "memory", "r8", "r9", "r10", "r11");
168 dest += 32;
169 source += 32;
170 size -= 32;
171 }
172
173 /* 1x8 movnti loop */
174 while (size >= 8) {
175 asm("movq (%0), %%r8\n"
176 "movnti %%r8, (%1)\n"
177 :: "r" (source), "r" (dest)
178 : "memory", "r8");
179 dest += 8;
180 source += 8;
181 size -= 8;
182 }
183
184 /* 1x4 movnti loop */
185 while (size >= 4) {
186 asm("movl (%0), %%r8d\n"
187 "movnti %%r8d, (%1)\n"
188 :: "r" (source), "r" (dest)
189 : "memory", "r8");
190 dest += 4;
191 source += 4;
192 size -= 4;
193 }
194
195 /* cache copy for remaining bytes */
196 if (size) {
197 memcpy((void *) dest, (void *) source, size);
198 clean_cache_range((void *) dest, size);
199 }
200}
Mikulas Patocka02101c42018-08-08 17:22:16 -0400201EXPORT_SYMBOL_GPL(__memcpy_flushcache);
Dan Williams0aed55a2017-05-29 12:22:50 -0700202
203void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
204 size_t len)
205{
206 char *from = kmap_atomic(page);
207
208 memcpy_flushcache(to, from + offset, len);
209 kunmap_atomic(from);
210}
211#endif