Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 2 | #ifndef __ASM_GENERIC_UACCESS_H |
| 3 | #define __ASM_GENERIC_UACCESS_H |
| 4 | |
| 5 | /* |
| 6 | * User space memory access functions, these should work |
Geert Uytterhoeven | 0a4a664 | 2013-12-30 10:06:33 +0100 | [diff] [blame] | 7 | * on any machine that has kernel and user data in the same |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 8 | * address space, e.g. all NOMMU machines. |
| 9 | */ |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 10 | #include <linux/string.h> |
| 11 | |
Christoph Hellwig | bd79f94 | 2019-04-23 18:38:08 +0200 | [diff] [blame] | 12 | #ifdef CONFIG_UACCESS_MEMCPY |
Christoph Hellwig | 931de11 | 2020-09-07 07:58:19 +0200 | [diff] [blame] | 13 | #include <asm/unaligned.h> |
| 14 | |
Christoph Hellwig | 0bcd0a2 | 2020-10-27 09:50:17 +0100 | [diff] [blame] | 15 | static __always_inline int |
| 16 | __get_user_fn(size_t size, const void __user *from, void *to) |
Christoph Hellwig | 931de11 | 2020-09-07 07:58:19 +0200 | [diff] [blame] | 17 | { |
| 18 | BUILD_BUG_ON(!__builtin_constant_p(size)); |
| 19 | |
| 20 | switch (size) { |
| 21 | case 1: |
Arnd Bergmann | d40d817 | 2021-05-08 14:58:46 +0200 | [diff] [blame] | 22 | *(u8 *)to = *((u8 __force *)from); |
Christoph Hellwig | 931de11 | 2020-09-07 07:58:19 +0200 | [diff] [blame] | 23 | return 0; |
| 24 | case 2: |
| 25 | *(u16 *)to = get_unaligned((u16 __force *)from); |
| 26 | return 0; |
| 27 | case 4: |
| 28 | *(u32 *)to = get_unaligned((u32 __force *)from); |
| 29 | return 0; |
| 30 | case 8: |
| 31 | *(u64 *)to = get_unaligned((u64 __force *)from); |
| 32 | return 0; |
| 33 | default: |
| 34 | BUILD_BUG(); |
| 35 | return 0; |
| 36 | } |
| 37 | |
| 38 | } |
| 39 | #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) |
| 40 | |
Christoph Hellwig | 0bcd0a2 | 2020-10-27 09:50:17 +0100 | [diff] [blame] | 41 | static __always_inline int |
| 42 | __put_user_fn(size_t size, void __user *to, void *from) |
Christoph Hellwig | 931de11 | 2020-09-07 07:58:19 +0200 | [diff] [blame] | 43 | { |
| 44 | BUILD_BUG_ON(!__builtin_constant_p(size)); |
| 45 | |
| 46 | switch (size) { |
| 47 | case 1: |
Arnd Bergmann | d40d817 | 2021-05-08 14:58:46 +0200 | [diff] [blame] | 48 | *(u8 __force *)to = *(u8 *)from; |
Christoph Hellwig | 931de11 | 2020-09-07 07:58:19 +0200 | [diff] [blame] | 49 | return 0; |
| 50 | case 2: |
| 51 | put_unaligned(*(u16 *)from, (u16 __force *)to); |
| 52 | return 0; |
| 53 | case 4: |
| 54 | put_unaligned(*(u32 *)from, (u32 __force *)to); |
| 55 | return 0; |
| 56 | case 8: |
| 57 | put_unaligned(*(u64 *)from, (u64 __force *)to); |
| 58 | return 0; |
| 59 | default: |
| 60 | BUILD_BUG(); |
| 61 | return 0; |
| 62 | } |
| 63 | } |
| 64 | #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) |
| 65 | |
Christoph Hellwig | 2d2d255 | 2020-09-07 07:58:20 +0200 | [diff] [blame] | 66 | #define __get_kernel_nofault(dst, src, type, err_label) \ |
| 67 | do { \ |
| 68 | *((type *)dst) = get_unaligned((type *)(src)); \ |
| 69 | if (0) /* make sure the label looks used to the compiler */ \ |
| 70 | goto err_label; \ |
| 71 | } while (0) |
| 72 | |
| 73 | #define __put_kernel_nofault(dst, src, type, err_label) \ |
| 74 | do { \ |
| 75 | put_unaligned(*((type *)src), (type *)(dst)); \ |
| 76 | if (0) /* make sure the label looks used to the compiler */ \ |
| 77 | goto err_label; \ |
| 78 | } while (0) |
| 79 | |
| 80 | #define HAVE_GET_KERNEL_NOFAULT 1 |
| 81 | |
Christoph Hellwig | bd79f94 | 2019-04-23 18:38:08 +0200 | [diff] [blame] | 82 | static inline __must_check unsigned long |
| 83 | raw_copy_from_user(void *to, const void __user * from, unsigned long n) |
| 84 | { |
Christoph Hellwig | bd79f94 | 2019-04-23 18:38:08 +0200 | [diff] [blame] | 85 | memcpy(to, (const void __force *)from, n); |
| 86 | return 0; |
| 87 | } |
| 88 | |
| 89 | static inline __must_check unsigned long |
| 90 | raw_copy_to_user(void __user *to, const void *from, unsigned long n) |
| 91 | { |
Christoph Hellwig | bd79f94 | 2019-04-23 18:38:08 +0200 | [diff] [blame] | 92 | memcpy((void __force *)to, from, n); |
| 93 | return 0; |
| 94 | } |
| 95 | #define INLINE_COPY_FROM_USER |
| 96 | #define INLINE_COPY_TO_USER |
| 97 | #endif /* CONFIG_UACCESS_MEMCPY */ |
| 98 | |
Christoph Hellwig | 3c57fa1 | 2020-09-07 07:58:21 +0200 | [diff] [blame] | 99 | #ifdef CONFIG_SET_FS |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 100 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) |
| 101 | |
| 102 | #ifndef KERNEL_DS |
| 103 | #define KERNEL_DS MAKE_MM_SEG(~0UL) |
| 104 | #endif |
| 105 | |
| 106 | #ifndef USER_DS |
| 107 | #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) |
| 108 | #endif |
| 109 | |
| 110 | #ifndef get_fs |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 111 | #define get_fs() (current_thread_info()->addr_limit) |
| 112 | |
| 113 | static inline void set_fs(mm_segment_t fs) |
| 114 | { |
| 115 | current_thread_info()->addr_limit = fs; |
| 116 | } |
| 117 | #endif |
| 118 | |
Christoph Hellwig | 428e297 | 2020-08-11 18:33:44 -0700 | [diff] [blame] | 119 | #ifndef uaccess_kernel |
| 120 | #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) |
Vineet Gupta | 10a6007 | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 121 | #endif |
Arnd Bergmann | 98b861a | 2021-01-23 15:21:22 +0100 | [diff] [blame] | 122 | |
| 123 | #ifndef user_addr_max |
| 124 | #define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) |
| 125 | #endif |
| 126 | |
Christoph Hellwig | 3c57fa1 | 2020-09-07 07:58:21 +0200 | [diff] [blame] | 127 | #endif /* CONFIG_SET_FS */ |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 128 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 129 | #define access_ok(addr, size) __access_ok((unsigned long)(addr),(size)) |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 130 | |
| 131 | /* |
| 132 | * The architecture should really override this if possible, at least |
| 133 | * doing a check on the get_fs() |
| 134 | */ |
| 135 | #ifndef __access_ok |
| 136 | static inline int __access_ok(unsigned long addr, unsigned long size) |
| 137 | { |
| 138 | return 1; |
| 139 | } |
| 140 | #endif |
| 141 | |
| 142 | /* |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 143 | * These are the main single-value transfer routines. They automatically |
| 144 | * use the right size if we just have the right pointer type. |
| 145 | * This version just falls back to copy_{from,to}_user, which should |
| 146 | * provide a fast-path for small values. |
| 147 | */ |
| 148 | #define __put_user(x, ptr) \ |
| 149 | ({ \ |
| 150 | __typeof__(*(ptr)) __x = (x); \ |
| 151 | int __pu_err = -EFAULT; \ |
| 152 | __chk_user_ptr(ptr); \ |
| 153 | switch (sizeof (*(ptr))) { \ |
| 154 | case 1: \ |
| 155 | case 2: \ |
| 156 | case 4: \ |
| 157 | case 8: \ |
| 158 | __pu_err = __put_user_fn(sizeof (*(ptr)), \ |
| 159 | ptr, &__x); \ |
| 160 | break; \ |
| 161 | default: \ |
| 162 | __put_user_bad(); \ |
| 163 | break; \ |
| 164 | } \ |
| 165 | __pu_err; \ |
| 166 | }) |
| 167 | |
| 168 | #define put_user(x, ptr) \ |
| 169 | ({ \ |
Al Viro | 1985296 | 2017-09-04 12:07:24 -0400 | [diff] [blame] | 170 | void __user *__p = (ptr); \ |
Michael S. Tsirkin | e0acd0b | 2013-05-26 17:30:36 +0300 | [diff] [blame] | 171 | might_fault(); \ |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 172 | access_ok(__p, sizeof(*ptr)) ? \ |
Al Viro | 1985296 | 2017-09-04 12:07:24 -0400 | [diff] [blame] | 173 | __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \ |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 174 | -EFAULT; \ |
| 175 | }) |
| 176 | |
Vineet Gupta | 05d88a4 | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 177 | #ifndef __put_user_fn |
| 178 | |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 179 | static inline int __put_user_fn(size_t size, void __user *ptr, void *x) |
| 180 | { |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 181 | return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0; |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 182 | } |
| 183 | |
Vineet Gupta | 05d88a4 | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 184 | #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) |
| 185 | |
| 186 | #endif |
| 187 | |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 188 | extern int __put_user_bad(void) __attribute__((noreturn)); |
| 189 | |
| 190 | #define __get_user(x, ptr) \ |
| 191 | ({ \ |
| 192 | int __gu_err = -EFAULT; \ |
| 193 | __chk_user_ptr(ptr); \ |
| 194 | switch (sizeof(*(ptr))) { \ |
| 195 | case 1: { \ |
Al Viro | c1aad8d | 2017-03-28 01:02:40 -0400 | [diff] [blame] | 196 | unsigned char __x = 0; \ |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 197 | __gu_err = __get_user_fn(sizeof (*(ptr)), \ |
| 198 | ptr, &__x); \ |
| 199 | (x) = *(__force __typeof__(*(ptr)) *) &__x; \ |
| 200 | break; \ |
| 201 | }; \ |
| 202 | case 2: { \ |
Al Viro | c1aad8d | 2017-03-28 01:02:40 -0400 | [diff] [blame] | 203 | unsigned short __x = 0; \ |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 204 | __gu_err = __get_user_fn(sizeof (*(ptr)), \ |
| 205 | ptr, &__x); \ |
| 206 | (x) = *(__force __typeof__(*(ptr)) *) &__x; \ |
| 207 | break; \ |
| 208 | }; \ |
| 209 | case 4: { \ |
Al Viro | c1aad8d | 2017-03-28 01:02:40 -0400 | [diff] [blame] | 210 | unsigned int __x = 0; \ |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 211 | __gu_err = __get_user_fn(sizeof (*(ptr)), \ |
| 212 | ptr, &__x); \ |
| 213 | (x) = *(__force __typeof__(*(ptr)) *) &__x; \ |
| 214 | break; \ |
| 215 | }; \ |
| 216 | case 8: { \ |
Al Viro | c1aad8d | 2017-03-28 01:02:40 -0400 | [diff] [blame] | 217 | unsigned long long __x = 0; \ |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 218 | __gu_err = __get_user_fn(sizeof (*(ptr)), \ |
| 219 | ptr, &__x); \ |
| 220 | (x) = *(__force __typeof__(*(ptr)) *) &__x; \ |
| 221 | break; \ |
| 222 | }; \ |
| 223 | default: \ |
| 224 | __get_user_bad(); \ |
| 225 | break; \ |
| 226 | } \ |
| 227 | __gu_err; \ |
| 228 | }) |
| 229 | |
| 230 | #define get_user(x, ptr) \ |
| 231 | ({ \ |
Al Viro | 1985296 | 2017-09-04 12:07:24 -0400 | [diff] [blame] | 232 | const void __user *__p = (ptr); \ |
Michael S. Tsirkin | e0acd0b | 2013-05-26 17:30:36 +0300 | [diff] [blame] | 233 | might_fault(); \ |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 234 | access_ok(__p, sizeof(*ptr)) ? \ |
Al Viro | 1985296 | 2017-09-04 12:07:24 -0400 | [diff] [blame] | 235 | __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\ |
Al Viro | 9ad18b7 | 2016-08-17 23:19:01 -0400 | [diff] [blame] | 236 | ((x) = (__typeof__(*(ptr)))0,-EFAULT); \ |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 237 | }) |
| 238 | |
Vineet Gupta | 05d88a4 | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 239 | #ifndef __get_user_fn |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 240 | static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) |
| 241 | { |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 242 | return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0; |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 243 | } |
| 244 | |
Vineet Gupta | 05d88a4 | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 245 | #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) |
| 246 | |
| 247 | #endif |
| 248 | |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 249 | extern int __get_user_bad(void) __attribute__((noreturn)); |
| 250 | |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 251 | /* |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 252 | * Zero Userspace |
| 253 | */ |
| 254 | #ifndef __clear_user |
| 255 | static inline __must_check unsigned long |
| 256 | __clear_user(void __user *to, unsigned long n) |
| 257 | { |
| 258 | memset((void __force *)to, 0, n); |
| 259 | return 0; |
| 260 | } |
| 261 | #endif |
| 262 | |
| 263 | static inline __must_check unsigned long |
| 264 | clear_user(void __user *to, unsigned long n) |
| 265 | { |
Michael S. Tsirkin | e0acd0b | 2013-05-26 17:30:36 +0300 | [diff] [blame] | 266 | might_fault(); |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 267 | if (!access_ok(to, n)) |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 268 | return n; |
| 269 | |
| 270 | return __clear_user(to, n); |
| 271 | } |
| 272 | |
Al Viro | aaa2e7a | 2016-12-25 01:22:09 -0500 | [diff] [blame] | 273 | #include <asm/extable.h> |
| 274 | |
Arnd Bergmann | 98b861a | 2021-01-23 15:21:22 +0100 | [diff] [blame] | 275 | __must_check long strncpy_from_user(char *dst, const char __user *src, |
| 276 | long count); |
| 277 | __must_check long strnlen_user(const char __user *src, long n); |
| 278 | |
Arnd Bergmann | eed417d | 2009-05-13 22:56:37 +0000 | [diff] [blame] | 279 | #endif /* __ASM_GENERIC_UACCESS_H */ |