Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_UACCESS_H__ |
| 2 | #define __LINUX_UACCESS_H__ |
| 3 | |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 4 | #include <linux/sched.h> |
Al Viro | 5e6039d | 2016-12-27 18:00:15 -0500 | [diff] [blame^] | 5 | |
| 6 | #define VERIFY_READ 0 |
| 7 | #define VERIFY_WRITE 1 |
| 8 | |
Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 9 | #include <asm/uaccess.h> |
| 10 | |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 11 | static __always_inline void pagefault_disabled_inc(void) |
| 12 | { |
| 13 | current->pagefault_disabled++; |
| 14 | } |
| 15 | |
| 16 | static __always_inline void pagefault_disabled_dec(void) |
| 17 | { |
| 18 | current->pagefault_disabled--; |
| 19 | WARN_ON(current->pagefault_disabled < 0); |
| 20 | } |
| 21 | |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 22 | /* |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 23 | * These routines enable/disable the pagefault handler. If disabled, it will |
| 24 | * not take any locks and go straight to the fixup table. |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 25 | * |
David Hildenbrand | 8222dbe | 2015-05-11 17:52:20 +0200 | [diff] [blame] | 26 | * User access methods will not sleep when called from a pagefault_disabled() |
| 27 | * environment. |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 28 | */ |
| 29 | static inline void pagefault_disable(void) |
| 30 | { |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 31 | pagefault_disabled_inc(); |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 32 | /* |
| 33 | * make sure to have issued the store before a pagefault |
| 34 | * can hit. |
| 35 | */ |
| 36 | barrier(); |
| 37 | } |
| 38 | |
| 39 | static inline void pagefault_enable(void) |
| 40 | { |
| 41 | /* |
| 42 | * make sure to issue those last loads/stores before enabling |
| 43 | * the pagefault handler again. |
| 44 | */ |
| 45 | barrier(); |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 46 | pagefault_disabled_dec(); |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 47 | } |
| 48 | |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 49 | /* |
| 50 | * Is the pagefault handler disabled? If so, user access methods will not sleep. |
| 51 | */ |
| 52 | #define pagefault_disabled() (current->pagefault_disabled != 0) |
| 53 | |
David Hildenbrand | 70ffdb9 | 2015-05-11 17:52:11 +0200 | [diff] [blame] | 54 | /* |
| 55 | * The pagefault handler is in general disabled by pagefault_disable() or |
| 56 | * when in irq context (via in_atomic()). |
| 57 | * |
| 58 | * This function should only be used by the fault handlers. Other users should |
| 59 | * stick to pagefault_disabled(). |
| 60 | * Please NEVER use preempt_disable() to disable the fault handler. With |
| 61 | * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled. |
| 62 | * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT. |
| 63 | */ |
| 64 | #define faulthandler_disabled() (pagefault_disabled() || in_atomic()) |
| 65 | |
Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 66 | #ifndef ARCH_HAS_NOCACHE_UACCESS |
| 67 | |
| 68 | static inline unsigned long __copy_from_user_inatomic_nocache(void *to, |
| 69 | const void __user *from, unsigned long n) |
| 70 | { |
| 71 | return __copy_from_user_inatomic(to, from, n); |
| 72 | } |
| 73 | |
| 74 | static inline unsigned long __copy_from_user_nocache(void *to, |
| 75 | const void __user *from, unsigned long n) |
| 76 | { |
| 77 | return __copy_from_user(to, from, n); |
| 78 | } |
| 79 | |
| 80 | #endif /* ARCH_HAS_NOCACHE_UACCESS */ |
| 81 | |
Ingo Molnar | c33fa9f | 2008-04-17 20:05:36 +0200 | [diff] [blame] | 82 | /* |
| 83 | * probe_kernel_read(): safely attempt to read from a location |
| 84 | * @dst: pointer to the buffer that shall take the data |
| 85 | * @src: address to read from |
| 86 | * @size: size of the data chunk |
| 87 | * |
| 88 | * Safely read from address @src to the buffer at @dst. If a kernel fault |
| 89 | * happens, handle that and return -EFAULT. |
| 90 | */ |
Steven Rostedt | f29c504 | 2011-05-19 14:35:33 -0400 | [diff] [blame] | 91 | extern long probe_kernel_read(void *dst, const void *src, size_t size); |
| 92 | extern long __probe_kernel_read(void *dst, const void *src, size_t size); |
Ingo Molnar | c33fa9f | 2008-04-17 20:05:36 +0200 | [diff] [blame] | 93 | |
| 94 | /* |
| 95 | * probe_kernel_write(): safely attempt to write to a location |
| 96 | * @dst: address to write to |
| 97 | * @src: pointer to the data that shall be written |
| 98 | * @size: size of the data chunk |
| 99 | * |
| 100 | * Safely write to address @dst from the buffer at @src. If a kernel fault |
| 101 | * happens, handle that and return -EFAULT. |
| 102 | */ |
Steven Rostedt | f29c504 | 2011-05-19 14:35:33 -0400 | [diff] [blame] | 103 | extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); |
| 104 | extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); |
Ingo Molnar | c33fa9f | 2008-04-17 20:05:36 +0200 | [diff] [blame] | 105 | |
Alexei Starovoitov | 1a6877b | 2015-08-28 15:56:22 -0700 | [diff] [blame] | 106 | extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); |
| 107 | |
Andrew Morton | 0ab32b6 | 2015-11-05 18:46:03 -0800 | [diff] [blame] | 108 | /** |
| 109 | * probe_kernel_address(): safely attempt to read from a location |
| 110 | * @addr: address to read from |
| 111 | * @retval: read into this variable |
| 112 | * |
| 113 | * Returns 0 on success, or -EFAULT. |
| 114 | */ |
| 115 | #define probe_kernel_address(addr, retval) \ |
| 116 | probe_kernel_read(&retval, addr, sizeof(retval)) |
| 117 | |
Linus Torvalds | 5b24a7a | 2015-12-17 09:57:27 -0800 | [diff] [blame] | 118 | #ifndef user_access_begin |
| 119 | #define user_access_begin() do { } while (0) |
| 120 | #define user_access_end() do { } while (0) |
Linus Torvalds | 1bd4403 | 2016-08-08 13:02:01 -0700 | [diff] [blame] | 121 | #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) |
| 122 | #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) |
Linus Torvalds | 5b24a7a | 2015-12-17 09:57:27 -0800 | [diff] [blame] | 123 | #endif |
| 124 | |
Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 125 | #endif /* __LINUX_UACCESS_H__ */ |