blob: 4973328f3c6e75ae04b010d10f7210d8c5279b1d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Arnd Bergmanneed417d2009-05-13 22:56:37 +00002#ifndef __ASM_GENERIC_UACCESS_H
3#define __ASM_GENERIC_UACCESS_H
4
5/*
6 * User space memory access functions, these should work
Geert Uytterhoeven0a4a6642013-12-30 10:06:33 +01007 * on any machine that has kernel and user data in the same
Arnd Bergmanneed417d2009-05-13 22:56:37 +00008 * address space, e.g. all NOMMU machines.
9 */
Arnd Bergmanneed417d2009-05-13 22:56:37 +000010#include <linux/string.h>
11
Christoph Hellwigbd79f942019-04-23 18:38:08 +020012#ifdef CONFIG_UACCESS_MEMCPY
Christoph Hellwig931de112020-09-07 07:58:19 +020013#include <asm/unaligned.h>
14
Christoph Hellwig0bcd0a22020-10-27 09:50:17 +010015static __always_inline int
16__get_user_fn(size_t size, const void __user *from, void *to)
Christoph Hellwig931de112020-09-07 07:58:19 +020017{
18 BUILD_BUG_ON(!__builtin_constant_p(size));
19
20 switch (size) {
21 case 1:
22 *(u8 *)to = get_unaligned((u8 __force *)from);
23 return 0;
24 case 2:
25 *(u16 *)to = get_unaligned((u16 __force *)from);
26 return 0;
27 case 4:
28 *(u32 *)to = get_unaligned((u32 __force *)from);
29 return 0;
30 case 8:
31 *(u64 *)to = get_unaligned((u64 __force *)from);
32 return 0;
33 default:
34 BUILD_BUG();
35 return 0;
36 }
37
38}
39#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
40
Christoph Hellwig0bcd0a22020-10-27 09:50:17 +010041static __always_inline int
42__put_user_fn(size_t size, void __user *to, void *from)
Christoph Hellwig931de112020-09-07 07:58:19 +020043{
44 BUILD_BUG_ON(!__builtin_constant_p(size));
45
46 switch (size) {
47 case 1:
48 put_unaligned(*(u8 *)from, (u8 __force *)to);
49 return 0;
50 case 2:
51 put_unaligned(*(u16 *)from, (u16 __force *)to);
52 return 0;
53 case 4:
54 put_unaligned(*(u32 *)from, (u32 __force *)to);
55 return 0;
56 case 8:
57 put_unaligned(*(u64 *)from, (u64 __force *)to);
58 return 0;
59 default:
60 BUILD_BUG();
61 return 0;
62 }
63}
64#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
65
Christoph Hellwig2d2d2552020-09-07 07:58:20 +020066#define __get_kernel_nofault(dst, src, type, err_label) \
67do { \
68 *((type *)dst) = get_unaligned((type *)(src)); \
69 if (0) /* make sure the label looks used to the compiler */ \
70 goto err_label; \
71} while (0)
72
73#define __put_kernel_nofault(dst, src, type, err_label) \
74do { \
75 put_unaligned(*((type *)src), (type *)(dst)); \
76 if (0) /* make sure the label looks used to the compiler */ \
77 goto err_label; \
78} while (0)
79
80#define HAVE_GET_KERNEL_NOFAULT 1
81
Christoph Hellwigbd79f942019-04-23 18:38:08 +020082static inline __must_check unsigned long
83raw_copy_from_user(void *to, const void __user * from, unsigned long n)
84{
Christoph Hellwigbd79f942019-04-23 18:38:08 +020085 memcpy(to, (const void __force *)from, n);
86 return 0;
87}
88
89static inline __must_check unsigned long
90raw_copy_to_user(void __user *to, const void *from, unsigned long n)
91{
Christoph Hellwigbd79f942019-04-23 18:38:08 +020092 memcpy((void __force *)to, from, n);
93 return 0;
94}
95#define INLINE_COPY_FROM_USER
96#define INLINE_COPY_TO_USER
97#endif /* CONFIG_UACCESS_MEMCPY */
98
Christoph Hellwig3c57fa12020-09-07 07:58:21 +020099#ifdef CONFIG_SET_FS
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000100#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
101
102#ifndef KERNEL_DS
103#define KERNEL_DS MAKE_MM_SEG(~0UL)
104#endif
105
106#ifndef USER_DS
107#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
108#endif
109
110#ifndef get_fs
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000111#define get_fs() (current_thread_info()->addr_limit)
112
113static inline void set_fs(mm_segment_t fs)
114{
115 current_thread_info()->addr_limit = fs;
116}
117#endif
118
Christoph Hellwig428e2972020-08-11 18:33:44 -0700119#ifndef uaccess_kernel
120#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
Vineet Gupta10a60072013-01-18 15:12:16 +0530121#endif
Christoph Hellwig3c57fa12020-09-07 07:58:21 +0200122#endif /* CONFIG_SET_FS */
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000123
Linus Torvalds96d4f262019-01-03 18:57:57 -0800124#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000125
126/*
127 * The architecture should really override this if possible, at least
128 * doing a check on the get_fs()
129 */
130#ifndef __access_ok
131static inline int __access_ok(unsigned long addr, unsigned long size)
132{
133 return 1;
134}
135#endif
136
137/*
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000138 * These are the main single-value transfer routines. They automatically
139 * use the right size if we just have the right pointer type.
140 * This version just falls back to copy_{from,to}_user, which should
141 * provide a fast-path for small values.
142 */
143#define __put_user(x, ptr) \
144({ \
145 __typeof__(*(ptr)) __x = (x); \
146 int __pu_err = -EFAULT; \
147 __chk_user_ptr(ptr); \
148 switch (sizeof (*(ptr))) { \
149 case 1: \
150 case 2: \
151 case 4: \
152 case 8: \
153 __pu_err = __put_user_fn(sizeof (*(ptr)), \
154 ptr, &__x); \
155 break; \
156 default: \
157 __put_user_bad(); \
158 break; \
159 } \
160 __pu_err; \
161})
162
163#define put_user(x, ptr) \
164({ \
Al Viro19852962017-09-04 12:07:24 -0400165 void __user *__p = (ptr); \
Michael S. Tsirkine0acd0b2013-05-26 17:30:36 +0300166 might_fault(); \
Linus Torvalds96d4f262019-01-03 18:57:57 -0800167 access_ok(__p, sizeof(*ptr)) ? \
Al Viro19852962017-09-04 12:07:24 -0400168 __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000169 -EFAULT; \
170})
171
Vineet Gupta05d88a42013-01-18 15:12:16 +0530172#ifndef __put_user_fn
173
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000174static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
175{
Al Virod5975802017-03-20 21:56:06 -0400176 return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000177}
178
Vineet Gupta05d88a42013-01-18 15:12:16 +0530179#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
180
181#endif
182
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000183extern int __put_user_bad(void) __attribute__((noreturn));
184
185#define __get_user(x, ptr) \
186({ \
187 int __gu_err = -EFAULT; \
188 __chk_user_ptr(ptr); \
189 switch (sizeof(*(ptr))) { \
190 case 1: { \
Al Viroc1aad8d2017-03-28 01:02:40 -0400191 unsigned char __x = 0; \
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000192 __gu_err = __get_user_fn(sizeof (*(ptr)), \
193 ptr, &__x); \
194 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
195 break; \
196 }; \
197 case 2: { \
Al Viroc1aad8d2017-03-28 01:02:40 -0400198 unsigned short __x = 0; \
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000199 __gu_err = __get_user_fn(sizeof (*(ptr)), \
200 ptr, &__x); \
201 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
202 break; \
203 }; \
204 case 4: { \
Al Viroc1aad8d2017-03-28 01:02:40 -0400205 unsigned int __x = 0; \
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000206 __gu_err = __get_user_fn(sizeof (*(ptr)), \
207 ptr, &__x); \
208 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
209 break; \
210 }; \
211 case 8: { \
Al Viroc1aad8d2017-03-28 01:02:40 -0400212 unsigned long long __x = 0; \
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000213 __gu_err = __get_user_fn(sizeof (*(ptr)), \
214 ptr, &__x); \
215 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
216 break; \
217 }; \
218 default: \
219 __get_user_bad(); \
220 break; \
221 } \
222 __gu_err; \
223})
224
225#define get_user(x, ptr) \
226({ \
Al Viro19852962017-09-04 12:07:24 -0400227 const void __user *__p = (ptr); \
Michael S. Tsirkine0acd0b2013-05-26 17:30:36 +0300228 might_fault(); \
Linus Torvalds96d4f262019-01-03 18:57:57 -0800229 access_ok(__p, sizeof(*ptr)) ? \
Al Viro19852962017-09-04 12:07:24 -0400230 __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
Al Viro9ad18b72016-08-17 23:19:01 -0400231 ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000232})
233
Vineet Gupta05d88a42013-01-18 15:12:16 +0530234#ifndef __get_user_fn
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000235static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
236{
Al Virod5975802017-03-20 21:56:06 -0400237 return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000238}
239
Vineet Gupta05d88a42013-01-18 15:12:16 +0530240#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
241
242#endif
243
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000244extern int __get_user_bad(void) __attribute__((noreturn));
245
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000246/*
247 * Copy a null terminated string from userspace.
248 */
249#ifndef __strncpy_from_user
250static inline long
251__strncpy_from_user(char *dst, const char __user *src, long count)
252{
253 char *tmp;
254 strncpy(dst, (const char __force *)src, count);
255 for (tmp = dst; *tmp && count > 0; tmp++, count--)
256 ;
257 return (tmp - dst);
258}
259#endif
260
261static inline long
262strncpy_from_user(char *dst, const char __user *src, long count)
263{
Linus Torvalds96d4f262019-01-03 18:57:57 -0800264 if (!access_ok(src, 1))
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000265 return -EFAULT;
266 return __strncpy_from_user(dst, src, count);
267}
268
269/*
270 * Return the size of a string (including the ending 0)
271 *
272 * Return 0 on exception, a value greater than N if too long
273 */
GuanXuetao7f509a92011-01-15 18:08:09 +0800274#ifndef __strnlen_user
Mark Salter830f5802011-10-04 09:17:36 -0400275#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
GuanXuetao7f509a92011-01-15 18:08:09 +0800276#endif
277
Mark Salter830f5802011-10-04 09:17:36 -0400278/*
279 * Unlike strnlen, strnlen_user includes the nul terminator in
280 * its returned count. Callers should check for a returned value
281 * greater than N as an indication the string is too long.
282 */
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000283static inline long strnlen_user(const char __user *src, long n)
284{
Linus Torvalds96d4f262019-01-03 18:57:57 -0800285 if (!access_ok(src, 1))
Mike Frysinger98448132009-06-14 02:00:02 -0400286 return 0;
GuanXuetao7f509a92011-01-15 18:08:09 +0800287 return __strnlen_user(src, n);
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000288}
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000289
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000290/*
291 * Zero Userspace
292 */
293#ifndef __clear_user
294static inline __must_check unsigned long
295__clear_user(void __user *to, unsigned long n)
296{
297 memset((void __force *)to, 0, n);
298 return 0;
299}
300#endif
301
302static inline __must_check unsigned long
303clear_user(void __user *to, unsigned long n)
304{
Michael S. Tsirkine0acd0b2013-05-26 17:30:36 +0300305 might_fault();
Linus Torvalds96d4f262019-01-03 18:57:57 -0800306 if (!access_ok(to, n))
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000307 return n;
308
309 return __clear_user(to, n);
310}
311
Al Viroaaa2e7a2016-12-25 01:22:09 -0500312#include <asm/extable.h>
313
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000314#endif /* __ASM_GENERIC_UACCESS_H */