blob: ba2dc19306303728e1cf8652a20d982dce1e4e2a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
H. Peter Anvin1965aae2008-10-22 22:26:29 -07002#ifndef _ASM_X86_UACCESS_32_H
3#define _ASM_X86_UACCESS_32_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5/*
6 * User space memory access functions
7 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/string.h>
H. Peter Anvin14e6d172008-02-04 16:47:59 +01009#include <asm/asm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <asm/page.h>
11
Al Virobeba3a22017-03-25 19:33:21 -040012unsigned long __must_check __copy_user_ll
13 (void *to, const void *from, unsigned long n);
Joe Perchesb1fcec72008-03-23 01:03:48 -070014unsigned long __must_check __copy_from_user_ll_nocache_nozero
15 (void *to, const void __user *from, unsigned long n);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Ingo Molnar652050a2006-01-14 13:21:30 -080017static __always_inline unsigned long __must_check
Al Virobeba3a22017-03-25 19:33:21 -040018raw_copy_to_user(void __user *to, const void *from, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -070019{
Al Virobeba3a22017-03-25 19:33:21 -040020 return __copy_user_ll((__force void *)to, from, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -070021}
22
Ingo Molnar652050a2006-01-14 13:21:30 -080023static __always_inline unsigned long
Al Virobeba3a22017-03-25 19:33:21 -040024raw_copy_from_user(void *to, const void __user *from, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -070025{
26 if (__builtin_constant_p(n)) {
27 unsigned long ret;
28
29 switch (n) {
30 case 1:
Al Virobeba3a22017-03-25 19:33:21 -040031 ret = 0;
Dan Williams304ec1b2018-01-29 17:02:49 -080032 __uaccess_begin_nospec();
Al Virobeba3a22017-03-25 19:33:21 -040033 __get_user_asm_nozero(*(u8 *)to, from, ret,
34 "b", "b", "=q", 1);
Linus Torvaldsde9e4782016-02-23 14:58:52 -080035 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 return ret;
37 case 2:
Al Virobeba3a22017-03-25 19:33:21 -040038 ret = 0;
Dan Williams304ec1b2018-01-29 17:02:49 -080039 __uaccess_begin_nospec();
Al Virobeba3a22017-03-25 19:33:21 -040040 __get_user_asm_nozero(*(u16 *)to, from, ret,
41 "w", "w", "=r", 2);
Linus Torvaldsde9e4782016-02-23 14:58:52 -080042 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 return ret;
44 case 4:
Al Virobeba3a22017-03-25 19:33:21 -040045 ret = 0;
Dan Williams304ec1b2018-01-29 17:02:49 -080046 __uaccess_begin_nospec();
Al Virobeba3a22017-03-25 19:33:21 -040047 __get_user_asm_nozero(*(u32 *)to, from, ret,
48 "l", "k", "=r", 4);
Linus Torvaldsde9e4782016-02-23 14:58:52 -080049 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 return ret;
51 }
52 }
Al Virobeba3a22017-03-25 19:33:21 -040053 return __copy_user_ll(to, (__force const void *)from, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054}
55
Ingo Molnar652050a2006-01-14 13:21:30 -080056static __always_inline unsigned long
Joe Perchesb1fcec72008-03-23 01:03:48 -070057__copy_from_user_inatomic_nocache(void *to, const void __user *from,
58 unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
NeilBrown7c12d812006-06-25 05:48:02 -070060 return __copy_from_user_ll_nocache_nozero(to, from, n);
Hiro Yoshiokac22ce142006-06-23 02:04:16 -070061}
62
H. Peter Anvin1965aae2008-10-22 22:26:29 -070063#endif /* _ASM_X86_UACCESS_32_H */