Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 2 | #ifndef __LINUX_UACCESS_H__ |
| 3 | #define __LINUX_UACCESS_H__ |
| 4 | |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 5 | #include <linux/sched.h> |
Al Viro | af1d5b3 | 2016-12-27 18:14:09 -0500 | [diff] [blame] | 6 | #include <linux/thread_info.h> |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 7 | #include <linux/kasan-checks.h> |
Al Viro | 5e6039d | 2016-12-27 18:00:15 -0500 | [diff] [blame] | 8 | |
Al Viro | db68ce1 | 2017-03-20 21:08:07 -0400 | [diff] [blame] | 9 | #define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) |
| 10 | |
Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 11 | #include <asm/uaccess.h> |
| 12 | |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 13 | /* |
| 14 | * Architectures should provide two primitives (raw_copy_{to,from}_user()) |
Al Viro | 701cac6 | 2017-04-05 19:15:53 -0400 | [diff] [blame] | 15 | * and get rid of their private instances of copy_{to,from}_user() and |
| 16 | * __copy_{to,from}_user{,_inatomic}(). |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 17 | * |
| 18 | * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and |
| 19 | * return the amount left to copy. They should assume that access_ok() has |
| 20 | * already been checked (and succeeded); they should *not* zero-pad anything. |
| 21 | * No KASAN or object size checks either - those belong here. |
| 22 | * |
| 23 | * Both of these functions should attempt to copy size bytes starting at from |
| 24 | * into the area starting at to. They must not fetch or store anything |
| 25 | * outside of those areas. Return value must be between 0 (everything |
| 26 | * copied successfully) and size (nothing copied). |
| 27 | * |
| 28 | * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting |
| 29 | * at to must become equal to the bytes fetched from the corresponding area |
| 30 | * starting at from. All data past to + size - N must be left unmodified. |
| 31 | * |
| 32 | * If copying succeeds, the return value must be 0. If some data cannot be |
| 33 | * fetched, it is permitted to copy less than had been fetched; the only |
| 34 | * hard requirement is that not storing anything at all (i.e. returning size) |
| 35 | * should happen only when nothing could be copied. In other words, you don't |
| 36 | * have to squeeze as much as possible - it is allowed, but not necessary. |
| 37 | * |
| 38 | * For raw_copy_from_user() to always points to kernel memory and no faults |
| 39 | * on store should happen. Interpretation of from is affected by set_fs(). |
| 40 | * For raw_copy_to_user() it's the other way round. |
| 41 | * |
| 42 | * Both can be inlined - it's up to architectures whether it wants to bother |
| 43 | * with that. They should not be used directly; they are used to implement |
| 44 | * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic()) |
| 45 | * that are used instead. Out of those, __... ones are inlined. Plain |
| 46 | * copy_{to,from}_user() might or might not be inlined. If you want them |
| 47 | * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER. |
| 48 | * |
| 49 | * NOTE: only copy_from_user() zero-pads the destination in case of short copy. |
| 50 | * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything |
| 51 | * at all; their callers absolutely must check the return value. |
| 52 | * |
| 53 | * Biarch ones should also provide raw_copy_in_user() - similar to the above, |
| 54 | * but both source and destination are __user pointers (affected by set_fs() |
| 55 | * as usual) and both source and destination can trigger faults. |
| 56 | */ |
| 57 | |
| 58 | static __always_inline unsigned long |
| 59 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) |
| 60 | { |
| 61 | kasan_check_write(to, n); |
| 62 | check_object_size(to, n, false); |
| 63 | return raw_copy_from_user(to, from, n); |
| 64 | } |
| 65 | |
| 66 | static __always_inline unsigned long |
| 67 | __copy_from_user(void *to, const void __user *from, unsigned long n) |
| 68 | { |
| 69 | might_fault(); |
| 70 | kasan_check_write(to, n); |
| 71 | check_object_size(to, n, false); |
| 72 | return raw_copy_from_user(to, from, n); |
| 73 | } |
| 74 | |
| 75 | /** |
| 76 | * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. |
| 77 | * @to: Destination address, in user space. |
| 78 | * @from: Source address, in kernel space. |
| 79 | * @n: Number of bytes to copy. |
| 80 | * |
| 81 | * Context: User context only. |
| 82 | * |
| 83 | * Copy data from kernel space to user space. Caller must check |
| 84 | * the specified block with access_ok() before calling this function. |
| 85 | * The caller should also make sure he pins the user space address |
| 86 | * so that we don't result in page fault and sleep. |
| 87 | */ |
| 88 | static __always_inline unsigned long |
| 89 | __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) |
| 90 | { |
| 91 | kasan_check_read(from, n); |
| 92 | check_object_size(from, n, true); |
| 93 | return raw_copy_to_user(to, from, n); |
| 94 | } |
| 95 | |
| 96 | static __always_inline unsigned long |
| 97 | __copy_to_user(void __user *to, const void *from, unsigned long n) |
| 98 | { |
| 99 | might_fault(); |
| 100 | kasan_check_read(from, n); |
| 101 | check_object_size(from, n, true); |
| 102 | return raw_copy_to_user(to, from, n); |
| 103 | } |
| 104 | |
| 105 | #ifdef INLINE_COPY_FROM_USER |
| 106 | static inline unsigned long |
| 107 | _copy_from_user(void *to, const void __user *from, unsigned long n) |
| 108 | { |
| 109 | unsigned long res = n; |
Al Viro | 9c5f690 | 2017-06-29 21:39:54 -0400 | [diff] [blame] | 110 | might_fault(); |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 111 | if (likely(access_ok(from, n))) { |
Al Viro | 9c5f690 | 2017-06-29 21:39:54 -0400 | [diff] [blame] | 112 | kasan_check_write(to, n); |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 113 | res = raw_copy_from_user(to, from, n); |
Al Viro | 9c5f690 | 2017-06-29 21:39:54 -0400 | [diff] [blame] | 114 | } |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 115 | if (unlikely(res)) |
| 116 | memset(to + (n - res), 0, res); |
| 117 | return res; |
| 118 | } |
| 119 | #else |
| 120 | extern unsigned long |
| 121 | _copy_from_user(void *, const void __user *, unsigned long); |
| 122 | #endif |
| 123 | |
| 124 | #ifdef INLINE_COPY_TO_USER |
| 125 | static inline unsigned long |
| 126 | _copy_to_user(void __user *to, const void *from, unsigned long n) |
| 127 | { |
Al Viro | 9c5f690 | 2017-06-29 21:39:54 -0400 | [diff] [blame] | 128 | might_fault(); |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 129 | if (access_ok(to, n)) { |
Al Viro | 9c5f690 | 2017-06-29 21:39:54 -0400 | [diff] [blame] | 130 | kasan_check_read(from, n); |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 131 | n = raw_copy_to_user(to, from, n); |
Al Viro | 9c5f690 | 2017-06-29 21:39:54 -0400 | [diff] [blame] | 132 | } |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 133 | return n; |
| 134 | } |
| 135 | #else |
| 136 | extern unsigned long |
| 137 | _copy_to_user(void __user *, const void *, unsigned long); |
| 138 | #endif |
| 139 | |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 140 | static __always_inline unsigned long __must_check |
| 141 | copy_from_user(void *to, const void __user *from, unsigned long n) |
| 142 | { |
Al Viro | b0377fe | 2017-06-29 21:42:43 -0400 | [diff] [blame] | 143 | if (likely(check_copy_size(to, n, false))) |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 144 | n = _copy_from_user(to, from, n); |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 145 | return n; |
| 146 | } |
| 147 | |
| 148 | static __always_inline unsigned long __must_check |
| 149 | copy_to_user(void __user *to, const void *from, unsigned long n) |
| 150 | { |
Al Viro | b0377fe | 2017-06-29 21:42:43 -0400 | [diff] [blame] | 151 | if (likely(check_copy_size(from, n, true))) |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 152 | n = _copy_to_user(to, from, n); |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 153 | return n; |
| 154 | } |
| 155 | #ifdef CONFIG_COMPAT |
| 156 | static __always_inline unsigned long __must_check |
Bart Van Assche | f58e76c | 2017-08-23 15:29:10 -0700 | [diff] [blame] | 157 | copy_in_user(void __user *to, const void __user *from, unsigned long n) |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 158 | { |
| 159 | might_fault(); |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 160 | if (access_ok(to, n) && access_ok(from, n)) |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 161 | n = raw_copy_in_user(to, from, n); |
| 162 | return n; |
| 163 | } |
| 164 | #endif |
Al Viro | d597580 | 2017-03-20 21:56:06 -0400 | [diff] [blame] | 165 | |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 166 | static __always_inline void pagefault_disabled_inc(void) |
| 167 | { |
| 168 | current->pagefault_disabled++; |
| 169 | } |
| 170 | |
| 171 | static __always_inline void pagefault_disabled_dec(void) |
| 172 | { |
| 173 | current->pagefault_disabled--; |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 174 | } |
| 175 | |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 176 | /* |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 177 | * These routines enable/disable the pagefault handler. If disabled, it will |
| 178 | * not take any locks and go straight to the fixup table. |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 179 | * |
David Hildenbrand | 8222dbe | 2015-05-11 17:52:20 +0200 | [diff] [blame] | 180 | * User access methods will not sleep when called from a pagefault_disabled() |
| 181 | * environment. |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 182 | */ |
| 183 | static inline void pagefault_disable(void) |
| 184 | { |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 185 | pagefault_disabled_inc(); |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 186 | /* |
| 187 | * make sure to have issued the store before a pagefault |
| 188 | * can hit. |
| 189 | */ |
| 190 | barrier(); |
| 191 | } |
| 192 | |
| 193 | static inline void pagefault_enable(void) |
| 194 | { |
| 195 | /* |
| 196 | * make sure to issue those last loads/stores before enabling |
| 197 | * the pagefault handler again. |
| 198 | */ |
| 199 | barrier(); |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 200 | pagefault_disabled_dec(); |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 201 | } |
| 202 | |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 203 | /* |
| 204 | * Is the pagefault handler disabled? If so, user access methods will not sleep. |
| 205 | */ |
Masami Hiramatsu | 2d8d8fa | 2019-05-15 14:38:06 +0900 | [diff] [blame] | 206 | static inline bool pagefault_disabled(void) |
| 207 | { |
| 208 | return current->pagefault_disabled != 0; |
| 209 | } |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 210 | |
David Hildenbrand | 70ffdb9 | 2015-05-11 17:52:11 +0200 | [diff] [blame] | 211 | /* |
| 212 | * The pagefault handler is in general disabled by pagefault_disable() or |
| 213 | * when in irq context (via in_atomic()). |
| 214 | * |
| 215 | * This function should only be used by the fault handlers. Other users should |
| 216 | * stick to pagefault_disabled(). |
| 217 | * Please NEVER use preempt_disable() to disable the fault handler. With |
| 218 | * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled. |
| 219 | * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT. |
| 220 | */ |
| 221 | #define faulthandler_disabled() (pagefault_disabled() || in_atomic()) |
| 222 | |
Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 223 | #ifndef ARCH_HAS_NOCACHE_UACCESS |
| 224 | |
| 225 | static inline unsigned long __copy_from_user_inatomic_nocache(void *to, |
| 226 | const void __user *from, unsigned long n) |
| 227 | { |
| 228 | return __copy_from_user_inatomic(to, from, n); |
| 229 | } |
| 230 | |
Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 231 | #endif /* ARCH_HAS_NOCACHE_UACCESS */ |
| 232 | |
Ingo Molnar | c33fa9f | 2008-04-17 20:05:36 +0200 | [diff] [blame] | 233 | /* |
| 234 | * probe_kernel_read(): safely attempt to read from a location |
| 235 | * @dst: pointer to the buffer that shall take the data |
| 236 | * @src: address to read from |
| 237 | * @size: size of the data chunk |
| 238 | * |
| 239 | * Safely read from address @src to the buffer at @dst. If a kernel fault |
| 240 | * happens, handle that and return -EFAULT. |
| 241 | */ |
Steven Rostedt | f29c504 | 2011-05-19 14:35:33 -0400 | [diff] [blame] | 242 | extern long probe_kernel_read(void *dst, const void *src, size_t size); |
| 243 | extern long __probe_kernel_read(void *dst, const void *src, size_t size); |
Ingo Molnar | c33fa9f | 2008-04-17 20:05:36 +0200 | [diff] [blame] | 244 | |
| 245 | /* |
Masami Hiramatsu | 3d70818 | 2019-05-15 14:38:18 +0900 | [diff] [blame^] | 246 | * probe_user_read(): safely attempt to read from a location in user space |
| 247 | * @dst: pointer to the buffer that shall take the data |
| 248 | * @src: address to read from |
| 249 | * @size: size of the data chunk |
| 250 | * |
| 251 | * Safely read from address @src to the buffer at @dst. If a kernel fault |
| 252 | * happens, handle that and return -EFAULT. |
| 253 | */ |
| 254 | extern long probe_user_read(void *dst, const void __user *src, size_t size); |
| 255 | |
| 256 | /* |
Ingo Molnar | c33fa9f | 2008-04-17 20:05:36 +0200 | [diff] [blame] | 257 | * probe_kernel_write(): safely attempt to write to a location |
| 258 | * @dst: address to write to |
| 259 | * @src: pointer to the data that shall be written |
| 260 | * @size: size of the data chunk |
| 261 | * |
| 262 | * Safely write to address @dst from the buffer at @src. If a kernel fault |
| 263 | * happens, handle that and return -EFAULT. |
| 264 | */ |
Steven Rostedt | f29c504 | 2011-05-19 14:35:33 -0400 | [diff] [blame] | 265 | extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); |
| 266 | extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); |
Ingo Molnar | c33fa9f | 2008-04-17 20:05:36 +0200 | [diff] [blame] | 267 | |
Alexei Starovoitov | 1a6877b | 2015-08-28 15:56:22 -0700 | [diff] [blame] | 268 | extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); |
Masami Hiramatsu | 3d70818 | 2019-05-15 14:38:18 +0900 | [diff] [blame^] | 269 | extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, |
| 270 | long count); |
| 271 | extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); |
Alexei Starovoitov | 1a6877b | 2015-08-28 15:56:22 -0700 | [diff] [blame] | 272 | |
Andrew Morton | 0ab32b6 | 2015-11-05 18:46:03 -0800 | [diff] [blame] | 273 | /** |
| 274 | * probe_kernel_address(): safely attempt to read from a location |
| 275 | * @addr: address to read from |
| 276 | * @retval: read into this variable |
| 277 | * |
| 278 | * Returns 0 on success, or -EFAULT. |
| 279 | */ |
| 280 | #define probe_kernel_address(addr, retval) \ |
| 281 | probe_kernel_read(&retval, addr, sizeof(retval)) |
| 282 | |
Linus Torvalds | 5b24a7a | 2015-12-17 09:57:27 -0800 | [diff] [blame] | 283 | #ifndef user_access_begin |
Linus Torvalds | 594cc25 | 2019-01-04 12:56:09 -0800 | [diff] [blame] | 284 | #define user_access_begin(ptr,len) access_ok(ptr, len) |
Linus Torvalds | 5b24a7a | 2015-12-17 09:57:27 -0800 | [diff] [blame] | 285 | #define user_access_end() do { } while (0) |
Linus Torvalds | 1bd4403 | 2016-08-08 13:02:01 -0700 | [diff] [blame] | 286 | #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) |
| 287 | #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) |
Peter Zijlstra | e74deb1 | 2019-04-03 09:39:48 +0200 | [diff] [blame] | 288 | static inline unsigned long user_access_save(void) { return 0UL; } |
| 289 | static inline void user_access_restore(unsigned long flags) { } |
Linus Torvalds | 5b24a7a | 2015-12-17 09:57:27 -0800 | [diff] [blame] | 290 | #endif |
| 291 | |
Kees Cook | b394d46 | 2018-01-10 14:22:38 -0800 | [diff] [blame] | 292 | #ifdef CONFIG_HARDENED_USERCOPY |
Kees Cook | afcc90f8 | 2018-01-10 15:17:01 -0800 | [diff] [blame] | 293 | void usercopy_warn(const char *name, const char *detail, bool to_user, |
| 294 | unsigned long offset, unsigned long len); |
Kees Cook | b394d46 | 2018-01-10 14:22:38 -0800 | [diff] [blame] | 295 | void __noreturn usercopy_abort(const char *name, const char *detail, |
| 296 | bool to_user, unsigned long offset, |
| 297 | unsigned long len); |
| 298 | #endif |
| 299 | |
Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 300 | #endif /* __LINUX_UACCESS_H__ */ |