blob: 88845eda50474bb57941fc3eaaec008d298a4dc7 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02002/*
Christoph Hellwig3f0acb12020-06-08 21:34:11 -07003 * Access kernel or user memory without faulting.
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02004 */
Paul Gortmakerb95f1b312011-10-16 02:01:52 -04005#include <linux/export.h>
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02006#include <linux/mm.h>
David Howells7c7fcf72010-10-27 17:29:01 +01007#include <linux/uaccess.h>
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02008
Christoph Hellwig98a23602020-06-08 21:34:50 -07009bool __weak probe_kernel_read_allowed(const void *unsafe_src, size_t size)
Christoph Hellwigeab0c602020-06-08 21:34:27 -070010{
11 return true;
12}
13
Christoph Hellwigb58294e2020-06-08 21:34:58 -070014#ifdef HAVE_GET_KERNEL_NOFAULT
15
16#define probe_kernel_read_loop(dst, src, len, type, err_label) \
17 while (len >= sizeof(type)) { \
18 __get_kernel_nofault(dst, src, type, err_label); \
19 dst += sizeof(type); \
20 src += sizeof(type); \
21 len -= sizeof(type); \
22 }
23
24long probe_kernel_read(void *dst, const void *src, size_t size)
25{
26 if (!probe_kernel_read_allowed(src, size))
Christoph Hellwig2a71e812020-06-08 21:35:04 -070027 return -ERANGE;
Christoph Hellwigb58294e2020-06-08 21:34:58 -070028
29 pagefault_disable();
30 probe_kernel_read_loop(dst, src, size, u64, Efault);
31 probe_kernel_read_loop(dst, src, size, u32, Efault);
32 probe_kernel_read_loop(dst, src, size, u16, Efault);
33 probe_kernel_read_loop(dst, src, size, u8, Efault);
34 pagefault_enable();
35 return 0;
36Efault:
37 pagefault_enable();
38 return -EFAULT;
39}
40EXPORT_SYMBOL_GPL(probe_kernel_read);
41
42#define probe_kernel_write_loop(dst, src, len, type, err_label) \
43 while (len >= sizeof(type)) { \
44 __put_kernel_nofault(dst, src, type, err_label); \
45 dst += sizeof(type); \
46 src += sizeof(type); \
47 len -= sizeof(type); \
48 }
49
50long probe_kernel_write(void *dst, const void *src, size_t size)
51{
52 pagefault_disable();
53 probe_kernel_write_loop(dst, src, size, u64, Efault);
54 probe_kernel_write_loop(dst, src, size, u32, Efault);
55 probe_kernel_write_loop(dst, src, size, u16, Efault);
56 probe_kernel_write_loop(dst, src, size, u8, Efault);
57 pagefault_enable();
58 return 0;
59Efault:
60 pagefault_enable();
61 return -EFAULT;
62}
63
64long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
65{
66 const void *src = unsafe_addr;
67
68 if (unlikely(count <= 0))
69 return 0;
70 if (!probe_kernel_read_allowed(unsafe_addr, count))
Christoph Hellwig2a71e812020-06-08 21:35:04 -070071 return -ERANGE;
Christoph Hellwigb58294e2020-06-08 21:34:58 -070072
73 pagefault_disable();
74 do {
75 __get_kernel_nofault(dst, src, u8, Efault);
76 dst++;
77 src++;
78 } while (dst[-1] && src - unsafe_addr < count);
79 pagefault_enable();
80
81 dst[-1] = '\0';
82 return src - unsafe_addr;
83Efault:
84 pagefault_enable();
85 dst[-1] = '\0';
86 return -EFAULT;
87}
88#else /* HAVE_GET_KERNEL_NOFAULT */
Ingo Molnarc33fa9f2008-04-17 20:05:36 +020089/**
Christoph Hellwig98a23602020-06-08 21:34:50 -070090 * probe_kernel_read(): safely attempt to read from kernel-space
Christoph Hellwig4f6de122020-06-08 21:34:07 -070091 * @dst: pointer to the buffer that shall take the data
92 * @src: address to read from
93 * @size: size of the data chunk
94 *
95 * Safely read from kernel address @src to the buffer at @dst. If a kernel
Christoph Hellwig2a71e812020-06-08 21:35:04 -070096 * fault happens, handle that and return -EFAULT. If @src is not a valid kernel
97 * address, return -ERANGE.
Andrew Morton0ab32b62015-11-05 18:46:03 -080098 *
99 * We ensure that the copy_from_user is executed in atomic context so that
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700100 * do_page_fault() doesn't attempt to take mmap_lock. This makes
Andrew Morton0ab32b62015-11-05 18:46:03 -0800101 * probe_kernel_read() suitable for use within regions where the caller
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700102 * already holds mmap_lock, or other locks which nest inside mmap_lock.
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200103 */
Christoph Hellwig98a23602020-06-08 21:34:50 -0700104long probe_kernel_read(void *dst, const void *src, size_t size)
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200105{
106 long ret;
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600107 mm_segment_t old_fs = get_fs();
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200108
Christoph Hellwig98a23602020-06-08 21:34:50 -0700109 if (!probe_kernel_read_allowed(src, size))
Christoph Hellwig2a71e812020-06-08 21:35:04 -0700110 return -ERANGE;
Christoph Hellwigeab0c602020-06-08 21:34:27 -0700111
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600112 set_fs(KERNEL_DS);
Christoph Hellwigcd030902020-06-08 21:34:24 -0700113 pagefault_disable();
114 ret = __copy_from_user_inatomic(dst, (__force const void __user *)src,
115 size);
116 pagefault_enable();
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600117 set_fs(old_fs);
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200118
Christoph Hellwigcd030902020-06-08 21:34:24 -0700119 if (ret)
120 return -EFAULT;
121 return 0;
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200122}
Christoph Hellwig98a23602020-06-08 21:34:50 -0700123EXPORT_SYMBOL_GPL(probe_kernel_read);
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200124
125/**
126 * probe_kernel_write(): safely attempt to write to a location
127 * @dst: address to write to
128 * @src: pointer to the data that shall be written
129 * @size: size of the data chunk
130 *
131 * Safely write to address @dst from the buffer at @src. If a kernel fault
132 * happens, handle that and return -EFAULT.
133 */
Christoph Hellwig48c49c02020-06-08 21:34:01 -0700134long probe_kernel_write(void *dst, const void *src, size_t size)
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200135{
136 long ret;
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600137 mm_segment_t old_fs = get_fs();
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200138
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600139 set_fs(KERNEL_DS);
Christoph Hellwigcd030902020-06-08 21:34:24 -0700140 pagefault_disable();
141 ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
142 pagefault_enable();
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600143 set_fs(old_fs);
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200144
Christoph Hellwigcd030902020-06-08 21:34:24 -0700145 if (ret)
146 return -EFAULT;
147 return 0;
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200148}
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700149
Daniel Borkmann1d1585c2019-11-02 00:17:56 +0100150/**
Christoph Hellwigc4cb1642020-06-08 21:34:17 -0700151 * strncpy_from_kernel_nofault: - Copy a NUL terminated string from unsafe
Christoph Hellwig4f6de122020-06-08 21:34:07 -0700152 * address.
153 * @dst: Destination address, in kernel space. This buffer must be at
154 * least @count bytes long.
155 * @unsafe_addr: Unsafe address.
156 * @count: Maximum number of bytes to copy, including the trailing NUL.
157 *
158 * Copies a NUL-terminated string from unsafe address to kernel buffer.
159 *
160 * On success, returns the length of the string INCLUDING the trailing NUL.
161 *
Christoph Hellwig2a71e812020-06-08 21:35:04 -0700162 * If access fails, returns -EFAULT (some data may have been copied and the
163 * trailing NUL added). If @unsafe_addr is not a valid kernel address, return
164 * -ERANGE.
Christoph Hellwig4f6de122020-06-08 21:34:07 -0700165 *
166 * If @count is smaller than the length of the string, copies @count-1 bytes,
167 * sets the last byte of @dst buffer to NUL and returns @count.
168 */
Christoph Hellwigeab0c602020-06-08 21:34:27 -0700169long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
170{
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700171 mm_segment_t old_fs = get_fs();
172 const void *src = unsafe_addr;
173 long ret;
174
175 if (unlikely(count <= 0))
176 return 0;
Christoph Hellwig98a23602020-06-08 21:34:50 -0700177 if (!probe_kernel_read_allowed(unsafe_addr, count))
Christoph Hellwig2a71e812020-06-08 21:35:04 -0700178 return -ERANGE;
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700179
180 set_fs(KERNEL_DS);
181 pagefault_disable();
182
183 do {
Linus Torvaldsbd28b142016-05-22 17:21:27 -0700184 ret = __get_user(*dst++, (const char __user __force *)src++);
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700185 } while (dst[-1] && ret == 0 && src - unsafe_addr < count);
186
187 dst[-1] = '\0';
188 pagefault_enable();
189 set_fs(old_fs);
190
Rasmus Villemoes9dd861d2015-11-05 18:50:11 -0800191 return ret ? -EFAULT : src - unsafe_addr;
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700192}
Christoph Hellwigb58294e2020-06-08 21:34:58 -0700193#endif /* HAVE_GET_KERNEL_NOFAULT */
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900194
195/**
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700196 * probe_user_read(): safely attempt to read from a user-space location
197 * @dst: pointer to the buffer that shall take the data
198 * @src: address to read from. This must be a user address.
199 * @size: size of the data chunk
200 *
201 * Safely read from user address @src to the buffer at @dst. If a kernel fault
202 * happens, handle that and return -EFAULT.
203 */
204long probe_user_read(void *dst, const void __user *src, size_t size)
205{
206 long ret = -EFAULT;
207 mm_segment_t old_fs = get_fs();
208
209 set_fs(USER_DS);
210 if (access_ok(src, size)) {
211 pagefault_disable();
212 ret = __copy_from_user_inatomic(dst, src, size);
213 pagefault_enable();
214 }
215 set_fs(old_fs);
216
217 if (ret)
218 return -EFAULT;
219 return 0;
220}
221EXPORT_SYMBOL_GPL(probe_user_read);
222
223/**
224 * probe_user_write(): safely attempt to write to a user-space location
225 * @dst: address to write to
226 * @src: pointer to the data that shall be written
227 * @size: size of the data chunk
228 *
229 * Safely write to address @dst from the buffer at @src. If a kernel fault
230 * happens, handle that and return -EFAULT.
231 */
232long probe_user_write(void __user *dst, const void *src, size_t size)
233{
234 long ret = -EFAULT;
235 mm_segment_t old_fs = get_fs();
236
237 set_fs(USER_DS);
238 if (access_ok(dst, size)) {
239 pagefault_disable();
240 ret = __copy_to_user_inatomic(dst, src, size);
241 pagefault_enable();
242 }
243 set_fs(old_fs);
244
245 if (ret)
246 return -EFAULT;
247 return 0;
248}
249EXPORT_SYMBOL_GPL(probe_user_write);
250
251/**
Christoph Hellwigbd88bb52020-06-08 21:34:14 -0700252 * strncpy_from_user_nofault: - Copy a NUL terminated string from unsafe user
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900253 * address.
254 * @dst: Destination address, in kernel space. This buffer must be at
255 * least @count bytes long.
256 * @unsafe_addr: Unsafe user address.
257 * @count: Maximum number of bytes to copy, including the trailing NUL.
258 *
259 * Copies a NUL-terminated string from unsafe user address to kernel buffer.
260 *
261 * On success, returns the length of the string INCLUDING the trailing NUL.
262 *
263 * If access fails, returns -EFAULT (some data may have been copied
264 * and the trailing NUL added).
265 *
266 * If @count is smaller than the length of the string, copies @count-1 bytes,
267 * sets the last byte of @dst buffer to NUL and returns @count.
268 */
Christoph Hellwigbd88bb52020-06-08 21:34:14 -0700269long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900270 long count)
271{
272 mm_segment_t old_fs = get_fs();
273 long ret;
274
275 if (unlikely(count <= 0))
276 return 0;
277
278 set_fs(USER_DS);
279 pagefault_disable();
280 ret = strncpy_from_user(dst, unsafe_addr, count);
281 pagefault_enable();
282 set_fs(old_fs);
283
284 if (ret >= count) {
285 ret = count;
286 dst[ret - 1] = '\0';
287 } else if (ret > 0) {
288 ret++;
289 }
290
291 return ret;
292}
293
294/**
Christoph Hellwig02dddb12020-06-08 21:34:20 -0700295 * strnlen_user_nofault: - Get the size of a user string INCLUDING final NUL.
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900296 * @unsafe_addr: The string to measure.
297 * @count: Maximum count (including NUL)
298 *
299 * Get the size of a NUL-terminated string in user space without pagefault.
300 *
301 * Returns the size of the string INCLUDING the terminating NUL.
302 *
303 * If the string is too long, returns a number larger than @count. User
304 * has to check the return value against "> count".
305 * On exception (or invalid count), returns 0.
306 *
307 * Unlike strnlen_user, this can be used from IRQ handler etc. because
308 * it disables pagefaults.
309 */
Christoph Hellwig02dddb12020-06-08 21:34:20 -0700310long strnlen_user_nofault(const void __user *unsafe_addr, long count)
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900311{
312 mm_segment_t old_fs = get_fs();
313 int ret;
314
315 set_fs(USER_DS);
316 pagefault_disable();
317 ret = strnlen_user(unsafe_addr, count);
318 pagefault_enable();
319 set_fs(old_fs);
320
321 return ret;
322}