blob: b16f6a1d8b26708d41a5a4d2b6111b543ac0b1c2 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_UACCESS_64_H
2#define _ASM_X86_UACCESS_64_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
4/*
5 * User space memory access functions
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/compiler.h>
Nick Piggin16dbc6c2008-10-02 14:50:12 -07008#include <linux/lockdep.h>
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -07009#include <linux/kasan-checks.h>
Jan Beulich1b1d9252009-12-18 16:12:56 +000010#include <asm/alternative.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +010011#include <asm/cpufeatures.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <asm/page.h>
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014/*
15 * Copy To/From Userspace
16 */
17
18/* Handles exceptions in both to and from, but doesn't do access_ok */
Andi Kleen95912002006-09-26 10:52:39 +020019__must_check unsigned long
Fenghua Yu954e4822012-05-24 18:19:45 -070020copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
21__must_check unsigned long
Jan Beulich1b1d9252009-12-18 16:12:56 +000022copy_user_generic_string(void *to, const void *from, unsigned len);
23__must_check unsigned long
24copy_user_generic_unrolled(void *to, const void *from, unsigned len);
25
26static __always_inline __must_check unsigned long
27copy_user_generic(void *to, const void *from, unsigned len)
28{
29 unsigned ret;
30
Fenghua Yu954e4822012-05-24 18:19:45 -070031 /*
32 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
33 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
34 * Otherwise, use copy_user_generic_unrolled.
35 */
36 alternative_call_2(copy_user_generic_unrolled,
Jan Beulich1b1d9252009-12-18 16:12:56 +000037 copy_user_generic_string,
38 X86_FEATURE_REP_GOOD,
Fenghua Yu954e4822012-05-24 18:19:45 -070039 copy_user_enhanced_fast_string,
40 X86_FEATURE_ERMS,
Jan Beulich1b1d9252009-12-18 16:12:56 +000041 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
42 "=d" (len)),
43 "1" (to), "2" (from), "3" (len)
44 : "memory", "rcx", "r8", "r9", "r10", "r11");
45 return ret;
46}
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Al Virobeba3a22017-03-25 19:33:21 -040048static __always_inline __must_check unsigned long
49raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
Joe Perchesb8963132008-03-23 01:03:49 -070050{
Andi Kleen383d0792006-09-26 10:52:40 +020051 int ret = 0;
Ingo Molnard1a76182008-10-28 16:54:49 +010052
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 if (!__builtin_constant_p(size))
Joe Perchesb8963132008-03-23 01:03:49 -070054 return copy_user_generic(dst, (__force void *)src, size);
55 switch (size) {
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080056 case 1:
57 __uaccess_begin();
Al Viro122b05d2017-03-25 18:36:22 -040058 __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
Joe Perchesb8963132008-03-23 01:03:49 -070059 ret, "b", "b", "=q", 1);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080060 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080062 case 2:
63 __uaccess_begin();
Al Viro122b05d2017-03-25 18:36:22 -040064 __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
Joe Perchesb8963132008-03-23 01:03:49 -070065 ret, "w", "w", "=r", 2);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080066 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080068 case 4:
69 __uaccess_begin();
Al Viro122b05d2017-03-25 18:36:22 -040070 __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
Joe Perchesb8963132008-03-23 01:03:49 -070071 ret, "l", "k", "=r", 4);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080072 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080074 case 8:
75 __uaccess_begin();
Al Viro122b05d2017-03-25 18:36:22 -040076 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
Joe Perchesb8963132008-03-23 01:03:49 -070077 ret, "q", "", "=r", 8);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080078 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -070079 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 case 10:
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080081 __uaccess_begin();
Al Viro122b05d2017-03-25 18:36:22 -040082 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
Hiroshi Shimamoto20a4a232008-11-13 18:06:04 -080083 ret, "q", "", "=r", 10);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080084 if (likely(!ret))
Al Viro122b05d2017-03-25 18:36:22 -040085 __get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080086 (u16 __user *)(8 + (char __user *)src),
87 ret, "w", "w", "=r", 2);
88 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -070089 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 case 16:
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080091 __uaccess_begin();
Al Viro122b05d2017-03-25 18:36:22 -040092 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
Joe Perchesb8963132008-03-23 01:03:49 -070093 ret, "q", "", "=r", 16);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080094 if (likely(!ret))
Al Viro122b05d2017-03-25 18:36:22 -040095 __get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080096 (u64 __user *)(8 + (char __user *)src),
97 ret, "q", "", "=r", 8);
98 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -070099 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 default:
Joe Perchesb8963132008-03-23 01:03:49 -0700101 return copy_user_generic(dst, (__force void *)src, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 }
Joe Perchesb8963132008-03-23 01:03:49 -0700103}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
Al Virobeba3a22017-03-25 19:33:21 -0400105static __always_inline __must_check unsigned long
106raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
Joe Perchesb8963132008-03-23 01:03:49 -0700107{
Andi Kleen383d0792006-09-26 10:52:40 +0200108 int ret = 0;
Ingo Molnard1a76182008-10-28 16:54:49 +0100109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 if (!__builtin_constant_p(size))
Joe Perchesb8963132008-03-23 01:03:49 -0700111 return copy_user_generic((__force void *)dst, src, size);
112 switch (size) {
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800113 case 1:
114 __uaccess_begin();
115 __put_user_asm(*(u8 *)src, (u8 __user *)dst,
Joe Perchesb8963132008-03-23 01:03:49 -0700116 ret, "b", "b", "iq", 1);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800117 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800119 case 2:
120 __uaccess_begin();
121 __put_user_asm(*(u16 *)src, (u16 __user *)dst,
Joe Perchesb8963132008-03-23 01:03:49 -0700122 ret, "w", "w", "ir", 2);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800123 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800125 case 4:
126 __uaccess_begin();
127 __put_user_asm(*(u32 *)src, (u32 __user *)dst,
Joe Perchesb8963132008-03-23 01:03:49 -0700128 ret, "l", "k", "ir", 4);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800129 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800131 case 8:
132 __uaccess_begin();
133 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200134 ret, "q", "", "er", 8);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800135 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -0700136 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 case 10:
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800138 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -0700139 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200140 ret, "q", "", "er", 10);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800141 if (likely(!ret)) {
142 asm("":::"memory");
143 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
144 ret, "w", "w", "ir", 2);
145 }
146 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -0700147 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 case 16:
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800149 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -0700150 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200151 ret, "q", "", "er", 16);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800152 if (likely(!ret)) {
153 asm("":::"memory");
154 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
155 ret, "q", "", "er", 8);
156 }
157 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -0700158 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 default:
Joe Perchesb8963132008-03-23 01:03:49 -0700160 return copy_user_generic((__force void *)dst, src, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 }
Joe Perchesb8963132008-03-23 01:03:49 -0700162}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Andi Kleen95912002006-09-26 10:52:39 +0200164static __always_inline __must_check
Al Virobeba3a22017-03-25 19:33:21 -0400165unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
Joe Perchesb8963132008-03-23 01:03:49 -0700166{
Al Viroa41e0d72017-03-25 19:38:23 -0400167 return copy_user_generic((__force void *)dst,
168 (__force void *)src, size);
Joe Perchesb8963132008-03-23 01:03:49 -0700169}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Joe Perchesb8963132008-03-23 01:03:49 -0700171extern long __copy_user_nocache(void *dst, const void __user *src,
172 unsigned size, int zerorest);
Andi Kleen0812a572007-02-13 13:26:19 +0100173
Dan Williams0aed55a2017-05-29 12:22:50 -0700174extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
175extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
176 size_t len);
177
Ingo Molnarf1800532009-03-02 11:00:57 +0100178static inline int
Ingo Molnarf1800532009-03-02 11:00:57 +0100179__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
180 unsigned size)
Andi Kleen0812a572007-02-13 13:26:19 +0100181{
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -0700182 kasan_check_write(dst, size);
Ingo Molnarf1800532009-03-02 11:00:57 +0100183 return __copy_user_nocache(dst, src, size, 0);
Andi Kleen0812a572007-02-13 13:26:19 +0100184}
185
Dan Williams0aed55a2017-05-29 12:22:50 -0700186static inline int
187__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
188{
189 kasan_check_write(dst, size);
190 return __copy_user_flushcache(dst, src, size);
191}
192
Vitaly Mayatskikh11295852008-07-02 15:48:21 +0200193unsigned long
Linus Torvaldscae2a172015-04-06 10:26:17 -0700194copy_user_handle_tail(char *to, char *from, unsigned len);
Vitaly Mayatskikh11295852008-07-02 15:48:21 +0200195
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700196#endif /* _ASM_X86_UACCESS_64_H */