blob: 20ef8e6ec2db93fc2c160f8697ef810385c5bcf2 [file] [log] [blame]
Hiro Yoshiokac22ce142006-06-23 02:04:16 -07001#ifndef __LINUX_UACCESS_H__
2#define __LINUX_UACCESS_H__
3
David Hildenbrand8bcbde52015-05-11 17:52:06 +02004#include <linux/sched.h>
Al Viroaf1d5b32016-12-27 18:14:09 -05005#include <linux/thread_info.h>
Al Virod5975802017-03-20 21:56:06 -04006#include <linux/kasan-checks.h>
Al Viro5e6039d2016-12-27 18:00:15 -05007
8#define VERIFY_READ 0
9#define VERIFY_WRITE 1
10
Al Virodb68ce12017-03-20 21:08:07 -040011#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
12
Hiro Yoshiokac22ce142006-06-23 02:04:16 -070013#include <asm/uaccess.h>
14
Al Virod5975802017-03-20 21:56:06 -040015/*
16 * Architectures should provide two primitives (raw_copy_{to,from}_user())
Al Viro701cac62017-04-05 19:15:53 -040017 * and get rid of their private instances of copy_{to,from}_user() and
18 * __copy_{to,from}_user{,_inatomic}().
Al Virod5975802017-03-20 21:56:06 -040019 *
20 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
21 * return the amount left to copy. They should assume that access_ok() has
22 * already been checked (and succeeded); they should *not* zero-pad anything.
23 * No KASAN or object size checks either - those belong here.
24 *
25 * Both of these functions should attempt to copy size bytes starting at from
26 * into the area starting at to. They must not fetch or store anything
27 * outside of those areas. Return value must be between 0 (everything
28 * copied successfully) and size (nothing copied).
29 *
30 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
31 * at to must become equal to the bytes fetched from the corresponding area
32 * starting at from. All data past to + size - N must be left unmodified.
33 *
34 * If copying succeeds, the return value must be 0. If some data cannot be
35 * fetched, it is permitted to copy less than had been fetched; the only
36 * hard requirement is that not storing anything at all (i.e. returning size)
37 * should happen only when nothing could be copied. In other words, you don't
38 * have to squeeze as much as possible - it is allowed, but not necessary.
39 *
40 * For raw_copy_from_user() to always points to kernel memory and no faults
41 * on store should happen. Interpretation of from is affected by set_fs().
42 * For raw_copy_to_user() it's the other way round.
43 *
44 * Both can be inlined - it's up to architectures whether it wants to bother
45 * with that. They should not be used directly; they are used to implement
46 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
47 * that are used instead. Out of those, __... ones are inlined. Plain
48 * copy_{to,from}_user() might or might not be inlined. If you want them
49 * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
50 *
51 * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
52 * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
53 * at all; their callers absolutely must check the return value.
54 *
55 * Biarch ones should also provide raw_copy_in_user() - similar to the above,
56 * but both source and destination are __user pointers (affected by set_fs()
57 * as usual) and both source and destination can trigger faults.
58 */
59
60static __always_inline unsigned long
61__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
62{
63 kasan_check_write(to, n);
64 check_object_size(to, n, false);
65 return raw_copy_from_user(to, from, n);
66}
67
68static __always_inline unsigned long
69__copy_from_user(void *to, const void __user *from, unsigned long n)
70{
71 might_fault();
72 kasan_check_write(to, n);
73 check_object_size(to, n, false);
74 return raw_copy_from_user(to, from, n);
75}
76
77/**
78 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
79 * @to: Destination address, in user space.
80 * @from: Source address, in kernel space.
81 * @n: Number of bytes to copy.
82 *
83 * Context: User context only.
84 *
85 * Copy data from kernel space to user space. Caller must check
86 * the specified block with access_ok() before calling this function.
87 * The caller should also make sure he pins the user space address
88 * so that we don't result in page fault and sleep.
89 */
90static __always_inline unsigned long
91__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
92{
93 kasan_check_read(from, n);
94 check_object_size(from, n, true);
95 return raw_copy_to_user(to, from, n);
96}
97
98static __always_inline unsigned long
99__copy_to_user(void __user *to, const void *from, unsigned long n)
100{
101 might_fault();
102 kasan_check_read(from, n);
103 check_object_size(from, n, true);
104 return raw_copy_to_user(to, from, n);
105}
106
107#ifdef INLINE_COPY_FROM_USER
108static inline unsigned long
109_copy_from_user(void *to, const void __user *from, unsigned long n)
110{
111 unsigned long res = n;
Al Viro9c5f6902017-06-29 21:39:54 -0400112 might_fault();
113 if (likely(access_ok(VERIFY_READ, from, n))) {
114 kasan_check_write(to, n);
Al Virod5975802017-03-20 21:56:06 -0400115 res = raw_copy_from_user(to, from, n);
Al Viro9c5f6902017-06-29 21:39:54 -0400116 }
Al Virod5975802017-03-20 21:56:06 -0400117 if (unlikely(res))
118 memset(to + (n - res), 0, res);
119 return res;
120}
121#else
122extern unsigned long
123_copy_from_user(void *, const void __user *, unsigned long);
124#endif
125
126#ifdef INLINE_COPY_TO_USER
127static inline unsigned long
128_copy_to_user(void __user *to, const void *from, unsigned long n)
129{
Al Viro9c5f6902017-06-29 21:39:54 -0400130 might_fault();
131 if (access_ok(VERIFY_WRITE, to, n)) {
132 kasan_check_read(from, n);
Al Virod5975802017-03-20 21:56:06 -0400133 n = raw_copy_to_user(to, from, n);
Al Viro9c5f6902017-06-29 21:39:54 -0400134 }
Al Virod5975802017-03-20 21:56:06 -0400135 return n;
136}
137#else
138extern unsigned long
139_copy_to_user(void __user *, const void *, unsigned long);
140#endif
141
Al Virod5975802017-03-20 21:56:06 -0400142static __always_inline unsigned long __must_check
143copy_from_user(void *to, const void __user *from, unsigned long n)
144{
Al Virob0377fe2017-06-29 21:42:43 -0400145 if (likely(check_copy_size(to, n, false)))
Al Virod5975802017-03-20 21:56:06 -0400146 n = _copy_from_user(to, from, n);
Al Virod5975802017-03-20 21:56:06 -0400147 return n;
148}
149
150static __always_inline unsigned long __must_check
151copy_to_user(void __user *to, const void *from, unsigned long n)
152{
Al Virob0377fe2017-06-29 21:42:43 -0400153 if (likely(check_copy_size(from, n, true)))
Al Virod5975802017-03-20 21:56:06 -0400154 n = _copy_to_user(to, from, n);
Al Virod5975802017-03-20 21:56:06 -0400155 return n;
156}
157#ifdef CONFIG_COMPAT
158static __always_inline unsigned long __must_check
Bart Van Asschef58e76c2017-08-23 15:29:10 -0700159copy_in_user(void __user *to, const void __user *from, unsigned long n)
Al Virod5975802017-03-20 21:56:06 -0400160{
161 might_fault();
162 if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
163 n = raw_copy_in_user(to, from, n);
164 return n;
165}
166#endif
Al Virod5975802017-03-20 21:56:06 -0400167
David Hildenbrand8bcbde52015-05-11 17:52:06 +0200168static __always_inline void pagefault_disabled_inc(void)
169{
170 current->pagefault_disabled++;
171}
172
173static __always_inline void pagefault_disabled_dec(void)
174{
175 current->pagefault_disabled--;
David Hildenbrand8bcbde52015-05-11 17:52:06 +0200176}
177
Peter Zijlstraa8663742006-12-06 20:32:20 -0800178/*
David Hildenbrand8bcbde52015-05-11 17:52:06 +0200179 * These routines enable/disable the pagefault handler. If disabled, it will
180 * not take any locks and go straight to the fixup table.
Peter Zijlstraa8663742006-12-06 20:32:20 -0800181 *
David Hildenbrand8222dbe2015-05-11 17:52:20 +0200182 * User access methods will not sleep when called from a pagefault_disabled()
183 * environment.
Peter Zijlstraa8663742006-12-06 20:32:20 -0800184 */
185static inline void pagefault_disable(void)
186{
David Hildenbrand8bcbde52015-05-11 17:52:06 +0200187 pagefault_disabled_inc();
Peter Zijlstraa8663742006-12-06 20:32:20 -0800188 /*
189 * make sure to have issued the store before a pagefault
190 * can hit.
191 */
192 barrier();
193}
194
195static inline void pagefault_enable(void)
196{
197 /*
198 * make sure to issue those last loads/stores before enabling
199 * the pagefault handler again.
200 */
201 barrier();
David Hildenbrand8bcbde52015-05-11 17:52:06 +0200202 pagefault_disabled_dec();
Peter Zijlstraa8663742006-12-06 20:32:20 -0800203}
204
David Hildenbrand8bcbde52015-05-11 17:52:06 +0200205/*
206 * Is the pagefault handler disabled? If so, user access methods will not sleep.
207 */
208#define pagefault_disabled() (current->pagefault_disabled != 0)
209
David Hildenbrand70ffdb92015-05-11 17:52:11 +0200210/*
211 * The pagefault handler is in general disabled by pagefault_disable() or
212 * when in irq context (via in_atomic()).
213 *
214 * This function should only be used by the fault handlers. Other users should
215 * stick to pagefault_disabled().
216 * Please NEVER use preempt_disable() to disable the fault handler. With
217 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
218 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
219 */
220#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
221
Hiro Yoshiokac22ce142006-06-23 02:04:16 -0700222#ifndef ARCH_HAS_NOCACHE_UACCESS
223
224static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
225 const void __user *from, unsigned long n)
226{
227 return __copy_from_user_inatomic(to, from, n);
228}
229
Hiro Yoshiokac22ce142006-06-23 02:04:16 -0700230#endif /* ARCH_HAS_NOCACHE_UACCESS */
231
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200232/*
233 * probe_kernel_read(): safely attempt to read from a location
234 * @dst: pointer to the buffer that shall take the data
235 * @src: address to read from
236 * @size: size of the data chunk
237 *
238 * Safely read from address @src to the buffer at @dst. If a kernel fault
239 * happens, handle that and return -EFAULT.
240 */
Steven Rostedtf29c5042011-05-19 14:35:33 -0400241extern long probe_kernel_read(void *dst, const void *src, size_t size);
242extern long __probe_kernel_read(void *dst, const void *src, size_t size);
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200243
244/*
245 * probe_kernel_write(): safely attempt to write to a location
246 * @dst: address to write to
247 * @src: pointer to the data that shall be written
248 * @size: size of the data chunk
249 *
250 * Safely write to address @dst from the buffer at @src. If a kernel fault
251 * happens, handle that and return -EFAULT.
252 */
Steven Rostedtf29c5042011-05-19 14:35:33 -0400253extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
254extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200255
Alexei Starovoitov1a6877b2015-08-28 15:56:22 -0700256extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
257
Andrew Morton0ab32b62015-11-05 18:46:03 -0800258/**
259 * probe_kernel_address(): safely attempt to read from a location
260 * @addr: address to read from
261 * @retval: read into this variable
262 *
263 * Returns 0 on success, or -EFAULT.
264 */
265#define probe_kernel_address(addr, retval) \
266 probe_kernel_read(&retval, addr, sizeof(retval))
267
Linus Torvalds5b24a7a2015-12-17 09:57:27 -0800268#ifndef user_access_begin
269#define user_access_begin() do { } while (0)
270#define user_access_end() do { } while (0)
Linus Torvalds1bd44032016-08-08 13:02:01 -0700271#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
272#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
Linus Torvalds5b24a7a2015-12-17 09:57:27 -0800273#endif
274
Hiro Yoshiokac22ce142006-06-23 02:04:16 -0700275#endif /* __LINUX_UACCESS_H__ */