blob: 10ffa8b5c1175fcfe4f049ac0ebc572149147dbe [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Arnd Bergmanneed417d2009-05-13 22:56:37 +00002#ifndef __ASM_GENERIC_UACCESS_H
3#define __ASM_GENERIC_UACCESS_H
4
5/*
6 * User space memory access functions, these should work
Geert Uytterhoeven0a4a6642013-12-30 10:06:33 +01007 * on any machine that has kernel and user data in the same
Arnd Bergmanneed417d2009-05-13 22:56:37 +00008 * address space, e.g. all NOMMU machines.
9 */
Arnd Bergmanneed417d2009-05-13 22:56:37 +000010#include <linux/string.h>
11
Christoph Hellwigbd79f942019-04-23 18:38:08 +020012#ifdef CONFIG_UACCESS_MEMCPY
Christoph Hellwig931de112020-09-07 07:58:19 +020013#include <asm/unaligned.h>
14
Christoph Hellwig0bcd0a22020-10-27 09:50:17 +010015static __always_inline int
16__get_user_fn(size_t size, const void __user *from, void *to)
Christoph Hellwig931de112020-09-07 07:58:19 +020017{
18 BUILD_BUG_ON(!__builtin_constant_p(size));
19
20 switch (size) {
21 case 1:
Arnd Bergmannd40d8172021-05-08 14:58:46 +020022 *(u8 *)to = *((u8 __force *)from);
Christoph Hellwig931de112020-09-07 07:58:19 +020023 return 0;
24 case 2:
25 *(u16 *)to = get_unaligned((u16 __force *)from);
26 return 0;
27 case 4:
28 *(u32 *)to = get_unaligned((u32 __force *)from);
29 return 0;
30 case 8:
31 *(u64 *)to = get_unaligned((u64 __force *)from);
32 return 0;
33 default:
34 BUILD_BUG();
35 return 0;
36 }
37
38}
39#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
40
Christoph Hellwig0bcd0a22020-10-27 09:50:17 +010041static __always_inline int
42__put_user_fn(size_t size, void __user *to, void *from)
Christoph Hellwig931de112020-09-07 07:58:19 +020043{
44 BUILD_BUG_ON(!__builtin_constant_p(size));
45
46 switch (size) {
47 case 1:
Arnd Bergmannd40d8172021-05-08 14:58:46 +020048 *(u8 __force *)to = *(u8 *)from;
Christoph Hellwig931de112020-09-07 07:58:19 +020049 return 0;
50 case 2:
51 put_unaligned(*(u16 *)from, (u16 __force *)to);
52 return 0;
53 case 4:
54 put_unaligned(*(u32 *)from, (u32 __force *)to);
55 return 0;
56 case 8:
57 put_unaligned(*(u64 *)from, (u64 __force *)to);
58 return 0;
59 default:
60 BUILD_BUG();
61 return 0;
62 }
63}
64#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
65
Christoph Hellwig2d2d2552020-09-07 07:58:20 +020066#define __get_kernel_nofault(dst, src, type, err_label) \
67do { \
68 *((type *)dst) = get_unaligned((type *)(src)); \
69 if (0) /* make sure the label looks used to the compiler */ \
70 goto err_label; \
71} while (0)
72
73#define __put_kernel_nofault(dst, src, type, err_label) \
74do { \
75 put_unaligned(*((type *)src), (type *)(dst)); \
76 if (0) /* make sure the label looks used to the compiler */ \
77 goto err_label; \
78} while (0)
79
80#define HAVE_GET_KERNEL_NOFAULT 1
81
Christoph Hellwigbd79f942019-04-23 18:38:08 +020082static inline __must_check unsigned long
83raw_copy_from_user(void *to, const void __user * from, unsigned long n)
84{
Christoph Hellwigbd79f942019-04-23 18:38:08 +020085 memcpy(to, (const void __force *)from, n);
86 return 0;
87}
88
89static inline __must_check unsigned long
90raw_copy_to_user(void __user *to, const void *from, unsigned long n)
91{
Christoph Hellwigbd79f942019-04-23 18:38:08 +020092 memcpy((void __force *)to, from, n);
93 return 0;
94}
95#define INLINE_COPY_FROM_USER
96#define INLINE_COPY_TO_USER
97#endif /* CONFIG_UACCESS_MEMCPY */
98
Christoph Hellwig3c57fa12020-09-07 07:58:21 +020099#ifdef CONFIG_SET_FS
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000100#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
101
102#ifndef KERNEL_DS
103#define KERNEL_DS MAKE_MM_SEG(~0UL)
104#endif
105
106#ifndef USER_DS
107#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
108#endif
109
110#ifndef get_fs
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000111#define get_fs() (current_thread_info()->addr_limit)
112
113static inline void set_fs(mm_segment_t fs)
114{
115 current_thread_info()->addr_limit = fs;
116}
117#endif
118
Christoph Hellwig428e2972020-08-11 18:33:44 -0700119#ifndef uaccess_kernel
120#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
Vineet Gupta10a60072013-01-18 15:12:16 +0530121#endif
Arnd Bergmann98b861a2021-01-23 15:21:22 +0100122
123#ifndef user_addr_max
124#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
125#endif
126
Christoph Hellwig3c57fa12020-09-07 07:58:21 +0200127#endif /* CONFIG_SET_FS */
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000128
Linus Torvalds96d4f262019-01-03 18:57:57 -0800129#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000130
131/*
132 * The architecture should really override this if possible, at least
133 * doing a check on the get_fs()
134 */
135#ifndef __access_ok
136static inline int __access_ok(unsigned long addr, unsigned long size)
137{
138 return 1;
139}
140#endif
141
142/*
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000143 * These are the main single-value transfer routines. They automatically
144 * use the right size if we just have the right pointer type.
145 * This version just falls back to copy_{from,to}_user, which should
146 * provide a fast-path for small values.
147 */
148#define __put_user(x, ptr) \
149({ \
150 __typeof__(*(ptr)) __x = (x); \
151 int __pu_err = -EFAULT; \
152 __chk_user_ptr(ptr); \
153 switch (sizeof (*(ptr))) { \
154 case 1: \
155 case 2: \
156 case 4: \
157 case 8: \
158 __pu_err = __put_user_fn(sizeof (*(ptr)), \
159 ptr, &__x); \
160 break; \
161 default: \
162 __put_user_bad(); \
163 break; \
164 } \
165 __pu_err; \
166})
167
168#define put_user(x, ptr) \
169({ \
Al Viro19852962017-09-04 12:07:24 -0400170 void __user *__p = (ptr); \
Michael S. Tsirkine0acd0b2013-05-26 17:30:36 +0300171 might_fault(); \
Linus Torvalds96d4f262019-01-03 18:57:57 -0800172 access_ok(__p, sizeof(*ptr)) ? \
Al Viro19852962017-09-04 12:07:24 -0400173 __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000174 -EFAULT; \
175})
176
Vineet Gupta05d88a42013-01-18 15:12:16 +0530177#ifndef __put_user_fn
178
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000179static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
180{
Al Virod5975802017-03-20 21:56:06 -0400181 return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000182}
183
Vineet Gupta05d88a42013-01-18 15:12:16 +0530184#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
185
186#endif
187
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000188extern int __put_user_bad(void) __attribute__((noreturn));
189
190#define __get_user(x, ptr) \
191({ \
192 int __gu_err = -EFAULT; \
193 __chk_user_ptr(ptr); \
194 switch (sizeof(*(ptr))) { \
195 case 1: { \
Al Viroc1aad8d2017-03-28 01:02:40 -0400196 unsigned char __x = 0; \
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000197 __gu_err = __get_user_fn(sizeof (*(ptr)), \
198 ptr, &__x); \
199 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
200 break; \
201 }; \
202 case 2: { \
Al Viroc1aad8d2017-03-28 01:02:40 -0400203 unsigned short __x = 0; \
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000204 __gu_err = __get_user_fn(sizeof (*(ptr)), \
205 ptr, &__x); \
206 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
207 break; \
208 }; \
209 case 4: { \
Al Viroc1aad8d2017-03-28 01:02:40 -0400210 unsigned int __x = 0; \
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000211 __gu_err = __get_user_fn(sizeof (*(ptr)), \
212 ptr, &__x); \
213 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
214 break; \
215 }; \
216 case 8: { \
Al Viroc1aad8d2017-03-28 01:02:40 -0400217 unsigned long long __x = 0; \
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000218 __gu_err = __get_user_fn(sizeof (*(ptr)), \
219 ptr, &__x); \
220 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
221 break; \
222 }; \
223 default: \
224 __get_user_bad(); \
225 break; \
226 } \
227 __gu_err; \
228})
229
230#define get_user(x, ptr) \
231({ \
Al Viro19852962017-09-04 12:07:24 -0400232 const void __user *__p = (ptr); \
Michael S. Tsirkine0acd0b2013-05-26 17:30:36 +0300233 might_fault(); \
Linus Torvalds96d4f262019-01-03 18:57:57 -0800234 access_ok(__p, sizeof(*ptr)) ? \
Al Viro19852962017-09-04 12:07:24 -0400235 __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
Al Viro9ad18b72016-08-17 23:19:01 -0400236 ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000237})
238
Vineet Gupta05d88a42013-01-18 15:12:16 +0530239#ifndef __get_user_fn
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000240static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
241{
Al Virod5975802017-03-20 21:56:06 -0400242 return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000243}
244
Vineet Gupta05d88a42013-01-18 15:12:16 +0530245#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
246
247#endif
248
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000249extern int __get_user_bad(void) __attribute__((noreturn));
250
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000251/*
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000252 * Zero Userspace
253 */
254#ifndef __clear_user
255static inline __must_check unsigned long
256__clear_user(void __user *to, unsigned long n)
257{
258 memset((void __force *)to, 0, n);
259 return 0;
260}
261#endif
262
263static inline __must_check unsigned long
264clear_user(void __user *to, unsigned long n)
265{
Michael S. Tsirkine0acd0b2013-05-26 17:30:36 +0300266 might_fault();
Linus Torvalds96d4f262019-01-03 18:57:57 -0800267 if (!access_ok(to, n))
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000268 return n;
269
270 return __clear_user(to, n);
271}
272
Al Viroaaa2e7a2016-12-25 01:22:09 -0500273#include <asm/extable.h>
274
Arnd Bergmann98b861a2021-01-23 15:21:22 +0100275__must_check long strncpy_from_user(char *dst, const char __user *src,
276 long count);
277__must_check long strnlen_user(const char __user *src, long n);
278
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000279#endif /* __ASM_GENERIC_UACCESS_H */