blob: 22de922d6ecb2f9783cb3c00f1be7998d4e3ad18 [file] [log] [blame]
Thomas Gleixner50acfb22019-05-29 07:18:00 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -07002/*
3 * Copyright (C) 2012 Regents of the University of California
4 *
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -07005 * This file was copied from include/asm-generic/uaccess.h
6 */
7
8#ifndef _ASM_RISCV_UACCESS_H
9#define _ASM_RISCV_UACCESS_H
10
Christoph Hellwigefbfc622020-08-11 18:33:41 -070011#include <asm/pgtable.h> /* for TASK_SIZE */
12
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070013/*
14 * User space memory access functions
15 */
Greentime Huadccfb12020-03-03 17:34:17 +080016
17extern unsigned long __must_check __asm_copy_to_user(void __user *to,
18 const void *from, unsigned long n);
19extern unsigned long __must_check __asm_copy_from_user(void *to,
20 const void __user *from, unsigned long n);
21
22static inline unsigned long
23raw_copy_from_user(void *to, const void __user *from, unsigned long n)
24{
25 return __asm_copy_from_user(to, from, n);
26}
27
28static inline unsigned long
29raw_copy_to_user(void __user *to, const void *from, unsigned long n)
30{
31 return __asm_copy_to_user(to, from, n);
32}
33
Christoph Hellwig6bd33e12019-10-28 13:10:41 +010034#ifdef CONFIG_MMU
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070035#include <linux/errno.h>
36#include <linux/compiler.h>
37#include <linux/thread_info.h>
38#include <asm/byteorder.h>
Christoph Hellwigdf720962019-04-15 11:14:32 +020039#include <asm/extable.h>
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070040#include <asm/asm.h>
41
42#define __enable_user_access() \
43 __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
44#define __disable_user_access() \
45 __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
46
47/*
48 * The fs value determines whether argument validity checking should be
49 * performed or not. If get_fs() == USER_DS, checking is performed, with
50 * get_fs() == KERNEL_DS, checking is bypassed.
51 *
52 * For historical reasons, these macros are grossly misnamed.
53 */
54
Christoph Hellwig5cfade52019-04-15 11:14:33 +020055#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
56
57#define KERNEL_DS MAKE_MM_SEG(~0UL)
58#define USER_DS MAKE_MM_SEG(TASK_SIZE)
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070059
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070060#define get_fs() (current_thread_info()->addr_limit)
61
62static inline void set_fs(mm_segment_t fs)
63{
64 current_thread_info()->addr_limit = fs;
65}
66
Christoph Hellwig5cfade52019-04-15 11:14:33 +020067#define segment_eq(a, b) ((a).seg == (b).seg)
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070068
Christoph Hellwig5cfade52019-04-15 11:14:33 +020069#define user_addr_max() (get_fs().seg)
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070070
71
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070072/**
73 * access_ok: - Checks if a user space pointer is valid
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070074 * @addr: User space pointer to start of block to check
75 * @size: Size of block to check
76 *
77 * Context: User context only. This function may sleep.
78 *
79 * Checks if a pointer to a block of memory in user space is valid.
80 *
81 * Returns true (nonzero) if the memory block may be valid, false (zero)
82 * if it is definitely invalid.
83 *
84 * Note that, depending on architecture, this function probably just
85 * checks that the pointer is in the user space range - after calling
86 * this function, memory access functions may still return -EFAULT.
87 */
Linus Torvalds96d4f262019-01-03 18:57:57 -080088#define access_ok(addr, size) ({ \
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070089 __chk_user_ptr(addr); \
90 likely(__access_ok((unsigned long __force)(addr), (size))); \
91})
92
93/*
94 * Ensure that the range [addr, addr+size) is within the process's
95 * address space
96 */
97static inline int __access_ok(unsigned long addr, unsigned long size)
98{
99 const mm_segment_t fs = get_fs();
100
Christoph Hellwig5cfade52019-04-15 11:14:33 +0200101 return size <= fs.seg && addr <= fs.seg - size;
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700102}
103
104/*
105 * The exception table consists of pairs of addresses: the first is the
106 * address of an instruction that is allowed to fault, and the second is
107 * the address at which the program should continue. No registers are
108 * modified, so it is entirely up to the continuation code to figure out
109 * what to do.
110 *
111 * All the routines below use bits of fixup code that are out of line
112 * with the main instruction path. This means when everything is well,
113 * we don't even have to jump over them. Further, they do not intrude
114 * on our cache or tlb entries.
115 */
116
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700117#define __LSW 0
Christoph Hellwige28dcc72019-04-15 11:14:34 +0200118#define __MSW 1
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700119
120/*
121 * The "__xxx" versions of the user access functions do not verify the address
122 * space - it must have been done previously with a separate "access_ok()"
123 * call.
124 */
125
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700126#define __get_user_asm(insn, x, ptr, err) \
127do { \
128 uintptr_t __tmp; \
129 __typeof__(x) __x; \
130 __enable_user_access(); \
131 __asm__ __volatile__ ( \
132 "1:\n" \
133 " " insn " %1, %3\n" \
134 "2:\n" \
135 " .section .fixup,\"ax\"\n" \
136 " .balign 4\n" \
137 "3:\n" \
138 " li %0, %4\n" \
139 " li %1, 0\n" \
140 " jump 2b, %2\n" \
141 " .previous\n" \
142 " .section __ex_table,\"a\"\n" \
143 " .balign " RISCV_SZPTR "\n" \
144 " " RISCV_PTR " 1b, 3b\n" \
145 " .previous" \
146 : "+r" (err), "=&r" (__x), "=r" (__tmp) \
147 : "m" (*(ptr)), "i" (-EFAULT)); \
148 __disable_user_access(); \
149 (x) = __x; \
150} while (0)
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700151
152#ifdef CONFIG_64BIT
153#define __get_user_8(x, ptr, err) \
154 __get_user_asm("ld", x, ptr, err)
155#else /* !CONFIG_64BIT */
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700156#define __get_user_8(x, ptr, err) \
157do { \
158 u32 __user *__ptr = (u32 __user *)(ptr); \
159 u32 __lo, __hi; \
160 uintptr_t __tmp; \
161 __enable_user_access(); \
162 __asm__ __volatile__ ( \
163 "1:\n" \
164 " lw %1, %4\n" \
165 "2:\n" \
166 " lw %2, %5\n" \
167 "3:\n" \
168 " .section .fixup,\"ax\"\n" \
169 " .balign 4\n" \
170 "4:\n" \
171 " li %0, %6\n" \
172 " li %1, 0\n" \
173 " li %2, 0\n" \
174 " jump 3b, %3\n" \
175 " .previous\n" \
176 " .section __ex_table,\"a\"\n" \
177 " .balign " RISCV_SZPTR "\n" \
178 " " RISCV_PTR " 1b, 4b\n" \
179 " " RISCV_PTR " 2b, 4b\n" \
180 " .previous" \
181 : "+r" (err), "=&r" (__lo), "=r" (__hi), \
182 "=r" (__tmp) \
183 : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \
184 "i" (-EFAULT)); \
185 __disable_user_access(); \
186 (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
187 (((u64)__hi << 32) | __lo))); \
188} while (0)
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700189#endif /* CONFIG_64BIT */
190
191
192/**
193 * __get_user: - Get a simple variable from user space, with less checking.
194 * @x: Variable to store result.
195 * @ptr: Source address, in user space.
196 *
197 * Context: User context only. This function may sleep.
198 *
199 * This macro copies a single simple variable from user space to kernel
200 * space. It supports simple types like char and int, but not larger
201 * data types like structures or arrays.
202 *
203 * @ptr must have pointer-to-simple-variable type, and the result of
204 * dereferencing @ptr must be assignable to @x without a cast.
205 *
206 * Caller must check the pointer with access_ok() before calling this
207 * function.
208 *
209 * Returns zero on success, or -EFAULT on error.
210 * On error, the variable @x is set to zero.
211 */
212#define __get_user(x, ptr) \
213({ \
214 register long __gu_err = 0; \
215 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
216 __chk_user_ptr(__gu_ptr); \
217 switch (sizeof(*__gu_ptr)) { \
218 case 1: \
219 __get_user_asm("lb", (x), __gu_ptr, __gu_err); \
220 break; \
221 case 2: \
222 __get_user_asm("lh", (x), __gu_ptr, __gu_err); \
223 break; \
224 case 4: \
225 __get_user_asm("lw", (x), __gu_ptr, __gu_err); \
226 break; \
227 case 8: \
228 __get_user_8((x), __gu_ptr, __gu_err); \
229 break; \
230 default: \
231 BUILD_BUG(); \
232 } \
233 __gu_err; \
234})
235
236/**
237 * get_user: - Get a simple variable from user space.
238 * @x: Variable to store result.
239 * @ptr: Source address, in user space.
240 *
241 * Context: User context only. This function may sleep.
242 *
243 * This macro copies a single simple variable from user space to kernel
244 * space. It supports simple types like char and int, but not larger
245 * data types like structures or arrays.
246 *
247 * @ptr must have pointer-to-simple-variable type, and the result of
248 * dereferencing @ptr must be assignable to @x without a cast.
249 *
250 * Returns zero on success, or -EFAULT on error.
251 * On error, the variable @x is set to zero.
252 */
253#define get_user(x, ptr) \
254({ \
255 const __typeof__(*(ptr)) __user *__p = (ptr); \
256 might_fault(); \
Linus Torvalds96d4f262019-01-03 18:57:57 -0800257 access_ok(__p, sizeof(*__p)) ? \
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700258 __get_user((x), __p) : \
259 ((x) = 0, -EFAULT); \
260})
261
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700262#define __put_user_asm(insn, x, ptr, err) \
263do { \
264 uintptr_t __tmp; \
265 __typeof__(*(ptr)) __x = x; \
266 __enable_user_access(); \
267 __asm__ __volatile__ ( \
268 "1:\n" \
269 " " insn " %z3, %2\n" \
270 "2:\n" \
271 " .section .fixup,\"ax\"\n" \
272 " .balign 4\n" \
273 "3:\n" \
274 " li %0, %4\n" \
275 " jump 2b, %1\n" \
276 " .previous\n" \
277 " .section __ex_table,\"a\"\n" \
278 " .balign " RISCV_SZPTR "\n" \
279 " " RISCV_PTR " 1b, 3b\n" \
280 " .previous" \
281 : "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \
282 : "rJ" (__x), "i" (-EFAULT)); \
283 __disable_user_access(); \
284} while (0)
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700285
286#ifdef CONFIG_64BIT
287#define __put_user_8(x, ptr, err) \
288 __put_user_asm("sd", x, ptr, err)
289#else /* !CONFIG_64BIT */
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700290#define __put_user_8(x, ptr, err) \
291do { \
292 u32 __user *__ptr = (u32 __user *)(ptr); \
293 u64 __x = (__typeof__((x)-(x)))(x); \
294 uintptr_t __tmp; \
295 __enable_user_access(); \
296 __asm__ __volatile__ ( \
297 "1:\n" \
298 " sw %z4, %2\n" \
299 "2:\n" \
300 " sw %z5, %3\n" \
301 "3:\n" \
302 " .section .fixup,\"ax\"\n" \
303 " .balign 4\n" \
304 "4:\n" \
305 " li %0, %6\n" \
Alan Kaodbee9c92019-03-22 14:37:04 +0800306 " jump 3b, %1\n" \
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700307 " .previous\n" \
308 " .section __ex_table,\"a\"\n" \
309 " .balign " RISCV_SZPTR "\n" \
310 " " RISCV_PTR " 1b, 4b\n" \
311 " " RISCV_PTR " 2b, 4b\n" \
312 " .previous" \
313 : "+r" (err), "=r" (__tmp), \
314 "=m" (__ptr[__LSW]), \
315 "=m" (__ptr[__MSW]) \
316 : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
317 __disable_user_access(); \
318} while (0)
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700319#endif /* CONFIG_64BIT */
320
321
322/**
323 * __put_user: - Write a simple value into user space, with less checking.
324 * @x: Value to copy to user space.
325 * @ptr: Destination address, in user space.
326 *
327 * Context: User context only. This function may sleep.
328 *
329 * This macro copies a single simple value from kernel space to user
330 * space. It supports simple types like char and int, but not larger
331 * data types like structures or arrays.
332 *
333 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
334 * to the result of dereferencing @ptr.
335 *
336 * Caller must check the pointer with access_ok() before calling this
337 * function.
338 *
339 * Returns zero on success, or -EFAULT on error.
340 */
341#define __put_user(x, ptr) \
342({ \
343 register long __pu_err = 0; \
344 __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
345 __chk_user_ptr(__gu_ptr); \
346 switch (sizeof(*__gu_ptr)) { \
347 case 1: \
348 __put_user_asm("sb", (x), __gu_ptr, __pu_err); \
349 break; \
350 case 2: \
351 __put_user_asm("sh", (x), __gu_ptr, __pu_err); \
352 break; \
353 case 4: \
354 __put_user_asm("sw", (x), __gu_ptr, __pu_err); \
355 break; \
356 case 8: \
357 __put_user_8((x), __gu_ptr, __pu_err); \
358 break; \
359 default: \
360 BUILD_BUG(); \
361 } \
362 __pu_err; \
363})
364
365/**
366 * put_user: - Write a simple value into user space.
367 * @x: Value to copy to user space.
368 * @ptr: Destination address, in user space.
369 *
370 * Context: User context only. This function may sleep.
371 *
372 * This macro copies a single simple value from kernel space to user
373 * space. It supports simple types like char and int, but not larger
374 * data types like structures or arrays.
375 *
376 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
377 * to the result of dereferencing @ptr.
378 *
379 * Returns zero on success, or -EFAULT on error.
380 */
381#define put_user(x, ptr) \
382({ \
383 __typeof__(*(ptr)) __user *__p = (ptr); \
384 might_fault(); \
Linus Torvalds96d4f262019-01-03 18:57:57 -0800385 access_ok(__p, sizeof(*__p)) ? \
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700386 __put_user((x), __p) : \
387 -EFAULT; \
388})
389
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700390extern long strncpy_from_user(char *dest, const char __user *src, long count);
391
392extern long __must_check strlen_user(const char __user *str);
393extern long __must_check strnlen_user(const char __user *str, long n);
394
395extern
396unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
397
398static inline
399unsigned long __must_check clear_user(void __user *to, unsigned long n)
400{
401 might_fault();
Linus Torvalds96d4f262019-01-03 18:57:57 -0800402 return access_ok(to, n) ?
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700403 __clear_user(to, n) : n;
404}
405
406/*
407 * Atomic compare-and-exchange, but with a fixup for userspace faults. Faults
408 * will set "err" to -EFAULT, while successful accesses return the previous
409 * value.
410 */
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700411#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
412({ \
413 __typeof__(ptr) __ptr = (ptr); \
414 __typeof__(*(ptr)) __old = (old); \
415 __typeof__(*(ptr)) __new = (new); \
416 __typeof__(*(ptr)) __ret; \
417 __typeof__(err) __err = 0; \
418 register unsigned int __rc; \
419 __enable_user_access(); \
420 switch (size) { \
421 case 4: \
422 __asm__ __volatile__ ( \
423 "0:\n" \
424 " lr.w" #scb " %[ret], %[ptr]\n" \
425 " bne %[ret], %z[old], 1f\n" \
426 " sc.w" #lrb " %[rc], %z[new], %[ptr]\n" \
427 " bnez %[rc], 0b\n" \
428 "1:\n" \
429 ".section .fixup,\"ax\"\n" \
430 ".balign 4\n" \
431 "2:\n" \
432 " li %[err], %[efault]\n" \
433 " jump 1b, %[rc]\n" \
434 ".previous\n" \
435 ".section __ex_table,\"a\"\n" \
436 ".balign " RISCV_SZPTR "\n" \
437 " " RISCV_PTR " 1b, 2b\n" \
438 ".previous\n" \
439 : [ret] "=&r" (__ret), \
440 [rc] "=&r" (__rc), \
441 [ptr] "+A" (*__ptr), \
442 [err] "=&r" (__err) \
443 : [old] "rJ" (__old), \
444 [new] "rJ" (__new), \
445 [efault] "i" (-EFAULT)); \
446 break; \
447 case 8: \
448 __asm__ __volatile__ ( \
449 "0:\n" \
450 " lr.d" #scb " %[ret], %[ptr]\n" \
451 " bne %[ret], %z[old], 1f\n" \
452 " sc.d" #lrb " %[rc], %z[new], %[ptr]\n" \
453 " bnez %[rc], 0b\n" \
454 "1:\n" \
455 ".section .fixup,\"ax\"\n" \
456 ".balign 4\n" \
457 "2:\n" \
458 " li %[err], %[efault]\n" \
459 " jump 1b, %[rc]\n" \
460 ".previous\n" \
461 ".section __ex_table,\"a\"\n" \
462 ".balign " RISCV_SZPTR "\n" \
463 " " RISCV_PTR " 1b, 2b\n" \
464 ".previous\n" \
465 : [ret] "=&r" (__ret), \
466 [rc] "=&r" (__rc), \
467 [ptr] "+A" (*__ptr), \
468 [err] "=&r" (__err) \
469 : [old] "rJ" (__old), \
470 [new] "rJ" (__new), \
471 [efault] "i" (-EFAULT)); \
472 break; \
473 default: \
474 BUILD_BUG(); \
475 } \
476 __disable_user_access(); \
477 (err) = __err; \
478 __ret; \
479})
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700480
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100481#else /* CONFIG_MMU */
482#include <asm-generic/uaccess.h>
483#endif /* CONFIG_MMU */
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700484#endif /* _ASM_RISCV_UACCESS_H */