blob: d3f1a1f0b1c1acca131d05b82e9c87c45467f4f2 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02002/*
Christoph Hellwig3f0acb12020-06-08 21:34:11 -07003 * Access kernel or user memory without faulting.
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02004 */
Paul Gortmakerb95f1b312011-10-16 02:01:52 -04005#include <linux/export.h>
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02006#include <linux/mm.h>
David Howells7c7fcf72010-10-27 17:29:01 +01007#include <linux/uaccess.h>
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02008
Christoph Hellwigfe557312020-06-17 09:37:53 +02009bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src,
10 size_t size)
Christoph Hellwigeab0c602020-06-08 21:34:27 -070011{
12 return true;
13}
14
Christoph Hellwigb58294e2020-06-08 21:34:58 -070015#ifdef HAVE_GET_KERNEL_NOFAULT
16
Christoph Hellwigfe557312020-06-17 09:37:53 +020017#define copy_from_kernel_nofault_loop(dst, src, len, type, err_label) \
Christoph Hellwigb58294e2020-06-08 21:34:58 -070018 while (len >= sizeof(type)) { \
19 __get_kernel_nofault(dst, src, type, err_label); \
20 dst += sizeof(type); \
21 src += sizeof(type); \
22 len -= sizeof(type); \
23 }
24
Christoph Hellwigfe557312020-06-17 09:37:53 +020025long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
Christoph Hellwigb58294e2020-06-08 21:34:58 -070026{
Arnd Bergmann2423de22021-08-11 08:30:18 +010027 unsigned long align = 0;
28
29 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
30 align = (unsigned long)dst | (unsigned long)src;
31
Christoph Hellwigfe557312020-06-17 09:37:53 +020032 if (!copy_from_kernel_nofault_allowed(src, size))
Christoph Hellwig2a71e812020-06-08 21:35:04 -070033 return -ERANGE;
Christoph Hellwigb58294e2020-06-08 21:34:58 -070034
35 pagefault_disable();
Arnd Bergmann2423de22021-08-11 08:30:18 +010036 if (!(align & 7))
37 copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
38 if (!(align & 3))
39 copy_from_kernel_nofault_loop(dst, src, size, u32, Efault);
40 if (!(align & 1))
41 copy_from_kernel_nofault_loop(dst, src, size, u16, Efault);
Christoph Hellwigfe557312020-06-17 09:37:53 +020042 copy_from_kernel_nofault_loop(dst, src, size, u8, Efault);
Christoph Hellwigb58294e2020-06-08 21:34:58 -070043 pagefault_enable();
44 return 0;
45Efault:
46 pagefault_enable();
47 return -EFAULT;
48}
Christoph Hellwigfe557312020-06-17 09:37:53 +020049EXPORT_SYMBOL_GPL(copy_from_kernel_nofault);
Christoph Hellwigb58294e2020-06-08 21:34:58 -070050
Christoph Hellwigfe557312020-06-17 09:37:53 +020051#define copy_to_kernel_nofault_loop(dst, src, len, type, err_label) \
Christoph Hellwigb58294e2020-06-08 21:34:58 -070052 while (len >= sizeof(type)) { \
53 __put_kernel_nofault(dst, src, type, err_label); \
54 dst += sizeof(type); \
55 src += sizeof(type); \
56 len -= sizeof(type); \
57 }
58
Christoph Hellwigfe557312020-06-17 09:37:53 +020059long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
Christoph Hellwigb58294e2020-06-08 21:34:58 -070060{
Arnd Bergmann2423de22021-08-11 08:30:18 +010061 unsigned long align = 0;
62
63 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
64 align = (unsigned long)dst | (unsigned long)src;
65
Christoph Hellwigb58294e2020-06-08 21:34:58 -070066 pagefault_disable();
Arnd Bergmann2423de22021-08-11 08:30:18 +010067 if (!(align & 7))
68 copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
69 if (!(align & 3))
70 copy_to_kernel_nofault_loop(dst, src, size, u32, Efault);
71 if (!(align & 1))
72 copy_to_kernel_nofault_loop(dst, src, size, u16, Efault);
Christoph Hellwigfe557312020-06-17 09:37:53 +020073 copy_to_kernel_nofault_loop(dst, src, size, u8, Efault);
Christoph Hellwigb58294e2020-06-08 21:34:58 -070074 pagefault_enable();
75 return 0;
76Efault:
77 pagefault_enable();
78 return -EFAULT;
79}
80
81long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
82{
83 const void *src = unsafe_addr;
84
85 if (unlikely(count <= 0))
86 return 0;
Christoph Hellwigfe557312020-06-17 09:37:53 +020087 if (!copy_from_kernel_nofault_allowed(unsafe_addr, count))
Christoph Hellwig2a71e812020-06-08 21:35:04 -070088 return -ERANGE;
Christoph Hellwigb58294e2020-06-08 21:34:58 -070089
90 pagefault_disable();
91 do {
92 __get_kernel_nofault(dst, src, u8, Efault);
93 dst++;
94 src++;
95 } while (dst[-1] && src - unsafe_addr < count);
96 pagefault_enable();
97
98 dst[-1] = '\0';
99 return src - unsafe_addr;
100Efault:
101 pagefault_enable();
102 dst[-1] = '\0';
103 return -EFAULT;
104}
105#else /* HAVE_GET_KERNEL_NOFAULT */
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200106/**
Christoph Hellwigfe557312020-06-17 09:37:53 +0200107 * copy_from_kernel_nofault(): safely attempt to read from kernel-space
Christoph Hellwig4f6de122020-06-08 21:34:07 -0700108 * @dst: pointer to the buffer that shall take the data
109 * @src: address to read from
110 * @size: size of the data chunk
111 *
112 * Safely read from kernel address @src to the buffer at @dst. If a kernel
Christoph Hellwig2a71e812020-06-08 21:35:04 -0700113 * fault happens, handle that and return -EFAULT. If @src is not a valid kernel
114 * address, return -ERANGE.
Andrew Morton0ab32b62015-11-05 18:46:03 -0800115 *
116 * We ensure that the copy_from_user is executed in atomic context so that
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700117 * do_page_fault() doesn't attempt to take mmap_lock. This makes
Christoph Hellwigfe557312020-06-17 09:37:53 +0200118 * copy_from_kernel_nofault() suitable for use within regions where the caller
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700119 * already holds mmap_lock, or other locks which nest inside mmap_lock.
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200120 */
Christoph Hellwigfe557312020-06-17 09:37:53 +0200121long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200122{
123 long ret;
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600124 mm_segment_t old_fs = get_fs();
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200125
Christoph Hellwigfe557312020-06-17 09:37:53 +0200126 if (!copy_from_kernel_nofault_allowed(src, size))
Christoph Hellwig2a71e812020-06-08 21:35:04 -0700127 return -ERANGE;
Christoph Hellwigeab0c602020-06-08 21:34:27 -0700128
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600129 set_fs(KERNEL_DS);
Christoph Hellwigcd030902020-06-08 21:34:24 -0700130 pagefault_disable();
131 ret = __copy_from_user_inatomic(dst, (__force const void __user *)src,
132 size);
133 pagefault_enable();
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600134 set_fs(old_fs);
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200135
Christoph Hellwigcd030902020-06-08 21:34:24 -0700136 if (ret)
137 return -EFAULT;
138 return 0;
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200139}
Christoph Hellwigfe557312020-06-17 09:37:53 +0200140EXPORT_SYMBOL_GPL(copy_from_kernel_nofault);
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200141
142/**
Christoph Hellwigfe557312020-06-17 09:37:53 +0200143 * copy_to_kernel_nofault(): safely attempt to write to a location
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200144 * @dst: address to write to
145 * @src: pointer to the data that shall be written
146 * @size: size of the data chunk
147 *
148 * Safely write to address @dst from the buffer at @src. If a kernel fault
149 * happens, handle that and return -EFAULT.
150 */
Christoph Hellwigfe557312020-06-17 09:37:53 +0200151long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200152{
153 long ret;
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600154 mm_segment_t old_fs = get_fs();
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200155
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600156 set_fs(KERNEL_DS);
Christoph Hellwigcd030902020-06-08 21:34:24 -0700157 pagefault_disable();
158 ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
159 pagefault_enable();
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600160 set_fs(old_fs);
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200161
Christoph Hellwigcd030902020-06-08 21:34:24 -0700162 if (ret)
163 return -EFAULT;
164 return 0;
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200165}
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700166
Daniel Borkmann1d1585c2019-11-02 00:17:56 +0100167/**
Christoph Hellwigc4cb1642020-06-08 21:34:17 -0700168 * strncpy_from_kernel_nofault: - Copy a NUL terminated string from unsafe
Christoph Hellwig4f6de122020-06-08 21:34:07 -0700169 * address.
170 * @dst: Destination address, in kernel space. This buffer must be at
171 * least @count bytes long.
172 * @unsafe_addr: Unsafe address.
173 * @count: Maximum number of bytes to copy, including the trailing NUL.
174 *
175 * Copies a NUL-terminated string from unsafe address to kernel buffer.
176 *
177 * On success, returns the length of the string INCLUDING the trailing NUL.
178 *
Christoph Hellwig2a71e812020-06-08 21:35:04 -0700179 * If access fails, returns -EFAULT (some data may have been copied and the
180 * trailing NUL added). If @unsafe_addr is not a valid kernel address, return
181 * -ERANGE.
Christoph Hellwig4f6de122020-06-08 21:34:07 -0700182 *
183 * If @count is smaller than the length of the string, copies @count-1 bytes,
184 * sets the last byte of @dst buffer to NUL and returns @count.
185 */
Christoph Hellwigeab0c602020-06-08 21:34:27 -0700186long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
187{
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700188 mm_segment_t old_fs = get_fs();
189 const void *src = unsafe_addr;
190 long ret;
191
192 if (unlikely(count <= 0))
193 return 0;
Christoph Hellwigfe557312020-06-17 09:37:53 +0200194 if (!copy_from_kernel_nofault_allowed(unsafe_addr, count))
Christoph Hellwig2a71e812020-06-08 21:35:04 -0700195 return -ERANGE;
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700196
197 set_fs(KERNEL_DS);
198 pagefault_disable();
199
200 do {
Linus Torvaldsbd28b142016-05-22 17:21:27 -0700201 ret = __get_user(*dst++, (const char __user __force *)src++);
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700202 } while (dst[-1] && ret == 0 && src - unsafe_addr < count);
203
204 dst[-1] = '\0';
205 pagefault_enable();
206 set_fs(old_fs);
207
Rasmus Villemoes9dd861d2015-11-05 18:50:11 -0800208 return ret ? -EFAULT : src - unsafe_addr;
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700209}
Christoph Hellwigb58294e2020-06-08 21:34:58 -0700210#endif /* HAVE_GET_KERNEL_NOFAULT */
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900211
212/**
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200213 * copy_from_user_nofault(): safely attempt to read from a user-space location
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700214 * @dst: pointer to the buffer that shall take the data
215 * @src: address to read from. This must be a user address.
216 * @size: size of the data chunk
217 *
218 * Safely read from user address @src to the buffer at @dst. If a kernel fault
219 * happens, handle that and return -EFAULT.
220 */
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200221long copy_from_user_nofault(void *dst, const void __user *src, size_t size)
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700222{
223 long ret = -EFAULT;
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700224 mm_segment_t old_fs = force_uaccess_begin();
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700225
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700226 if (access_ok(src, size)) {
227 pagefault_disable();
228 ret = __copy_from_user_inatomic(dst, src, size);
229 pagefault_enable();
230 }
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700231 force_uaccess_end(old_fs);
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700232
233 if (ret)
234 return -EFAULT;
235 return 0;
236}
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200237EXPORT_SYMBOL_GPL(copy_from_user_nofault);
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700238
239/**
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200240 * copy_to_user_nofault(): safely attempt to write to a user-space location
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700241 * @dst: address to write to
242 * @src: pointer to the data that shall be written
243 * @size: size of the data chunk
244 *
245 * Safely write to address @dst from the buffer at @src. If a kernel fault
246 * happens, handle that and return -EFAULT.
247 */
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200248long copy_to_user_nofault(void __user *dst, const void *src, size_t size)
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700249{
250 long ret = -EFAULT;
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700251 mm_segment_t old_fs = force_uaccess_begin();
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700252
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700253 if (access_ok(dst, size)) {
254 pagefault_disable();
255 ret = __copy_to_user_inatomic(dst, src, size);
256 pagefault_enable();
257 }
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700258 force_uaccess_end(old_fs);
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700259
260 if (ret)
261 return -EFAULT;
262 return 0;
263}
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200264EXPORT_SYMBOL_GPL(copy_to_user_nofault);
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700265
266/**
Christoph Hellwigbd88bb52020-06-08 21:34:14 -0700267 * strncpy_from_user_nofault: - Copy a NUL terminated string from unsafe user
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900268 * address.
269 * @dst: Destination address, in kernel space. This buffer must be at
270 * least @count bytes long.
271 * @unsafe_addr: Unsafe user address.
272 * @count: Maximum number of bytes to copy, including the trailing NUL.
273 *
274 * Copies a NUL-terminated string from unsafe user address to kernel buffer.
275 *
276 * On success, returns the length of the string INCLUDING the trailing NUL.
277 *
278 * If access fails, returns -EFAULT (some data may have been copied
279 * and the trailing NUL added).
280 *
281 * If @count is smaller than the length of the string, copies @count-1 bytes,
282 * sets the last byte of @dst buffer to NUL and returns @count.
283 */
Christoph Hellwigbd88bb52020-06-08 21:34:14 -0700284long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900285 long count)
286{
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700287 mm_segment_t old_fs;
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900288 long ret;
289
290 if (unlikely(count <= 0))
291 return 0;
292
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700293 old_fs = force_uaccess_begin();
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900294 pagefault_disable();
295 ret = strncpy_from_user(dst, unsafe_addr, count);
296 pagefault_enable();
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700297 force_uaccess_end(old_fs);
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900298
299 if (ret >= count) {
300 ret = count;
301 dst[ret - 1] = '\0';
302 } else if (ret > 0) {
303 ret++;
304 }
305
306 return ret;
307}
308
309/**
Christoph Hellwig02dddb12020-06-08 21:34:20 -0700310 * strnlen_user_nofault: - Get the size of a user string INCLUDING final NUL.
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900311 * @unsafe_addr: The string to measure.
312 * @count: Maximum count (including NUL)
313 *
314 * Get the size of a NUL-terminated string in user space without pagefault.
315 *
316 * Returns the size of the string INCLUDING the terminating NUL.
317 *
318 * If the string is too long, returns a number larger than @count. User
319 * has to check the return value against "> count".
320 * On exception (or invalid count), returns 0.
321 *
322 * Unlike strnlen_user, this can be used from IRQ handler etc. because
323 * it disables pagefaults.
324 */
Christoph Hellwig02dddb12020-06-08 21:34:20 -0700325long strnlen_user_nofault(const void __user *unsafe_addr, long count)
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900326{
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700327 mm_segment_t old_fs;
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900328 int ret;
329
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700330 old_fs = force_uaccess_begin();
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900331 pagefault_disable();
332 ret = strnlen_user(unsafe_addr, count);
333 pagefault_enable();
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700334 force_uaccess_end(old_fs);
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900335
336 return ret;
337}