Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 2 | #ifndef _ASM_X86_UACCESS_64_H |
| 3 | #define _ASM_X86_UACCESS_64_H |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | |
| 5 | /* |
| 6 | * User space memory access functions |
| 7 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/compiler.h> |
Nick Piggin | 16dbc6c | 2008-10-02 14:50:12 -0700 | [diff] [blame] | 9 | #include <linux/lockdep.h> |
Andrey Ryabinin | 1771c6e | 2016-05-20 16:59:31 -0700 | [diff] [blame] | 10 | #include <linux/kasan-checks.h> |
Jan Beulich | 1b1d925 | 2009-12-18 16:12:56 +0000 | [diff] [blame] | 11 | #include <asm/alternative.h> |
Borislav Petkov | cd4d09e | 2016-01-26 22:12:04 +0100 | [diff] [blame] | 12 | #include <asm/cpufeatures.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <asm/page.h> |
| 14 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | /* |
| 16 | * Copy To/From Userspace |
| 17 | */ |
| 18 | |
| 19 | /* Handles exceptions in both to and from, but doesn't do access_ok */ |
Andi Kleen | 9591200 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 20 | __must_check unsigned long |
Fenghua Yu | 954e482 | 2012-05-24 18:19:45 -0700 | [diff] [blame] | 21 | copy_user_enhanced_fast_string(void *to, const void *from, unsigned len); |
| 22 | __must_check unsigned long |
Jan Beulich | 1b1d925 | 2009-12-18 16:12:56 +0000 | [diff] [blame] | 23 | copy_user_generic_string(void *to, const void *from, unsigned len); |
| 24 | __must_check unsigned long |
| 25 | copy_user_generic_unrolled(void *to, const void *from, unsigned len); |
| 26 | |
| 27 | static __always_inline __must_check unsigned long |
| 28 | copy_user_generic(void *to, const void *from, unsigned len) |
| 29 | { |
| 30 | unsigned ret; |
| 31 | |
Fenghua Yu | 954e482 | 2012-05-24 18:19:45 -0700 | [diff] [blame] | 32 | /* |
| 33 | * If CPU has ERMS feature, use copy_user_enhanced_fast_string. |
| 34 | * Otherwise, if CPU has rep_good feature, use copy_user_generic_string. |
| 35 | * Otherwise, use copy_user_generic_unrolled. |
| 36 | */ |
| 37 | alternative_call_2(copy_user_generic_unrolled, |
Jan Beulich | 1b1d925 | 2009-12-18 16:12:56 +0000 | [diff] [blame] | 38 | copy_user_generic_string, |
| 39 | X86_FEATURE_REP_GOOD, |
Fenghua Yu | 954e482 | 2012-05-24 18:19:45 -0700 | [diff] [blame] | 40 | copy_user_enhanced_fast_string, |
| 41 | X86_FEATURE_ERMS, |
Jan Beulich | 1b1d925 | 2009-12-18 16:12:56 +0000 | [diff] [blame] | 42 | ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), |
| 43 | "=d" (len)), |
| 44 | "1" (to), "2" (from), "3" (len) |
| 45 | : "memory", "rcx", "r8", "r9", "r10", "r11"); |
| 46 | return ret; |
| 47 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
Al Viro | beba3a2 | 2017-03-25 19:33:21 -0400 | [diff] [blame] | 49 | static __always_inline __must_check unsigned long |
| 50 | raw_copy_from_user(void *dst, const void __user *src, unsigned long size) |
Joe Perches | b896313 | 2008-03-23 01:03:49 -0700 | [diff] [blame] | 51 | { |
Al Viro | 4b842e4 | 2020-02-15 11:46:30 -0500 | [diff] [blame] | 52 | return copy_user_generic(dst, (__force void *)src, size); |
Joe Perches | b896313 | 2008-03-23 01:03:49 -0700 | [diff] [blame] | 53 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | |
Al Viro | beba3a2 | 2017-03-25 19:33:21 -0400 | [diff] [blame] | 55 | static __always_inline __must_check unsigned long |
| 56 | raw_copy_to_user(void __user *dst, const void *src, unsigned long size) |
Joe Perches | b896313 | 2008-03-23 01:03:49 -0700 | [diff] [blame] | 57 | { |
Al Viro | 4b842e4 | 2020-02-15 11:46:30 -0500 | [diff] [blame] | 58 | return copy_user_generic((__force void *)dst, src, size); |
Joe Perches | b896313 | 2008-03-23 01:03:49 -0700 | [diff] [blame] | 59 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
Andi Kleen | 9591200 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 61 | static __always_inline __must_check |
Al Viro | beba3a2 | 2017-03-25 19:33:21 -0400 | [diff] [blame] | 62 | unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size) |
Joe Perches | b896313 | 2008-03-23 01:03:49 -0700 | [diff] [blame] | 63 | { |
Al Viro | a41e0d7 | 2017-03-25 19:38:23 -0400 | [diff] [blame] | 64 | return copy_user_generic((__force void *)dst, |
| 65 | (__force void *)src, size); |
Joe Perches | b896313 | 2008-03-23 01:03:49 -0700 | [diff] [blame] | 66 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | |
Joe Perches | b896313 | 2008-03-23 01:03:49 -0700 | [diff] [blame] | 68 | extern long __copy_user_nocache(void *dst, const void __user *src, |
| 69 | unsigned size, int zerorest); |
Andi Kleen | 0812a57 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 70 | |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 71 | extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size); |
| 72 | extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset, |
| 73 | size_t len); |
| 74 | |
Ingo Molnar | f180053 | 2009-03-02 11:00:57 +0100 | [diff] [blame] | 75 | static inline int |
Ingo Molnar | f180053 | 2009-03-02 11:00:57 +0100 | [diff] [blame] | 76 | __copy_from_user_inatomic_nocache(void *dst, const void __user *src, |
| 77 | unsigned size) |
Andi Kleen | 0812a57 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 78 | { |
Andrey Ryabinin | 1771c6e | 2016-05-20 16:59:31 -0700 | [diff] [blame] | 79 | kasan_check_write(dst, size); |
Ingo Molnar | f180053 | 2009-03-02 11:00:57 +0100 | [diff] [blame] | 80 | return __copy_user_nocache(dst, src, size, 0); |
Andi Kleen | 0812a57 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 81 | } |
| 82 | |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 83 | static inline int |
| 84 | __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) |
| 85 | { |
| 86 | kasan_check_write(dst, size); |
| 87 | return __copy_user_flushcache(dst, src, size); |
| 88 | } |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 89 | #endif /* _ASM_X86_UACCESS_64_H */ |