blob: e7265a552f4f0cb47dbfb2a624b7c17b97fdca36 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
H. Peter Anvin1965aae2008-10-22 22:26:29 -07002#ifndef _ASM_X86_UACCESS_64_H
3#define _ASM_X86_UACCESS_64_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5/*
6 * User space memory access functions
7 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/compiler.h>
Nick Piggin16dbc6c2008-10-02 14:50:12 -07009#include <linux/lockdep.h>
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -070010#include <linux/kasan-checks.h>
Jan Beulich1b1d9252009-12-18 16:12:56 +000011#include <asm/alternative.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +010012#include <asm/cpufeatures.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/page.h>
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015/*
16 * Copy To/From Userspace
17 */
18
19/* Handles exceptions in both to and from, but doesn't do access_ok */
Andi Kleen95912002006-09-26 10:52:39 +020020__must_check unsigned long
Fenghua Yu954e4822012-05-24 18:19:45 -070021copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22__must_check unsigned long
Jan Beulich1b1d9252009-12-18 16:12:56 +000023copy_user_generic_string(void *to, const void *from, unsigned len);
24__must_check unsigned long
25copy_user_generic_unrolled(void *to, const void *from, unsigned len);
26
27static __always_inline __must_check unsigned long
28copy_user_generic(void *to, const void *from, unsigned len)
29{
30 unsigned ret;
31
Fenghua Yu954e4822012-05-24 18:19:45 -070032 /*
33 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
34 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
35 * Otherwise, use copy_user_generic_unrolled.
36 */
37 alternative_call_2(copy_user_generic_unrolled,
Jan Beulich1b1d9252009-12-18 16:12:56 +000038 copy_user_generic_string,
39 X86_FEATURE_REP_GOOD,
Fenghua Yu954e4822012-05-24 18:19:45 -070040 copy_user_enhanced_fast_string,
41 X86_FEATURE_ERMS,
Jan Beulich1b1d9252009-12-18 16:12:56 +000042 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
43 "=d" (len)),
44 "1" (to), "2" (from), "3" (len)
45 : "memory", "rcx", "r8", "r9", "r10", "r11");
46 return ret;
47}
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Al Virobeba3a22017-03-25 19:33:21 -040049static __always_inline __must_check unsigned long
50raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
Joe Perchesb8963132008-03-23 01:03:49 -070051{
Al Viro4b842e42020-02-15 11:46:30 -050052 return copy_user_generic(dst, (__force void *)src, size);
Joe Perchesb8963132008-03-23 01:03:49 -070053}
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Al Virobeba3a22017-03-25 19:33:21 -040055static __always_inline __must_check unsigned long
56raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
Joe Perchesb8963132008-03-23 01:03:49 -070057{
Al Viro4b842e42020-02-15 11:46:30 -050058 return copy_user_generic((__force void *)dst, src, size);
Joe Perchesb8963132008-03-23 01:03:49 -070059}
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Andi Kleen95912002006-09-26 10:52:39 +020061static __always_inline __must_check
Al Virobeba3a22017-03-25 19:33:21 -040062unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
Joe Perchesb8963132008-03-23 01:03:49 -070063{
Al Viroa41e0d72017-03-25 19:38:23 -040064 return copy_user_generic((__force void *)dst,
65 (__force void *)src, size);
Joe Perchesb8963132008-03-23 01:03:49 -070066}
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Joe Perchesb8963132008-03-23 01:03:49 -070068extern long __copy_user_nocache(void *dst, const void __user *src,
69 unsigned size, int zerorest);
Andi Kleen0812a572007-02-13 13:26:19 +010070
Dan Williams0aed55a2017-05-29 12:22:50 -070071extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
72extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
73 size_t len);
74
Ingo Molnarf1800532009-03-02 11:00:57 +010075static inline int
Ingo Molnarf1800532009-03-02 11:00:57 +010076__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
77 unsigned size)
Andi Kleen0812a572007-02-13 13:26:19 +010078{
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -070079 kasan_check_write(dst, size);
Ingo Molnarf1800532009-03-02 11:00:57 +010080 return __copy_user_nocache(dst, src, size, 0);
Andi Kleen0812a572007-02-13 13:26:19 +010081}
82
Dan Williams0aed55a2017-05-29 12:22:50 -070083static inline int
84__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
85{
86 kasan_check_write(dst, size);
87 return __copy_user_flushcache(dst, src, size);
88}
H. Peter Anvin1965aae2008-10-22 22:26:29 -070089#endif /* _ASM_X86_UACCESS_64_H */