blob: 3bd70405f2d848d15ee90897e7e267eb3de2ebda [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02002/*
Christoph Hellwig3f0acb12020-06-08 21:34:11 -07003 * Access kernel or user memory without faulting.
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02004 */
Paul Gortmakerb95f1b312011-10-16 02:01:52 -04005#include <linux/export.h>
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02006#include <linux/mm.h>
David Howells7c7fcf72010-10-27 17:29:01 +01007#include <linux/uaccess.h>
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02008
Christoph Hellwigfe557312020-06-17 09:37:53 +02009bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src,
10 size_t size)
Christoph Hellwigeab0c602020-06-08 21:34:27 -070011{
12 return true;
13}
14
Christoph Hellwigb58294e2020-06-08 21:34:58 -070015#ifdef HAVE_GET_KERNEL_NOFAULT
16
Christoph Hellwigfe557312020-06-17 09:37:53 +020017#define copy_from_kernel_nofault_loop(dst, src, len, type, err_label) \
Christoph Hellwigb58294e2020-06-08 21:34:58 -070018 while (len >= sizeof(type)) { \
19 __get_kernel_nofault(dst, src, type, err_label); \
20 dst += sizeof(type); \
21 src += sizeof(type); \
22 len -= sizeof(type); \
23 }
24
Christoph Hellwigfe557312020-06-17 09:37:53 +020025long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
Christoph Hellwigb58294e2020-06-08 21:34:58 -070026{
Christoph Hellwigfe557312020-06-17 09:37:53 +020027 if (!copy_from_kernel_nofault_allowed(src, size))
Christoph Hellwig2a71e812020-06-08 21:35:04 -070028 return -ERANGE;
Christoph Hellwigb58294e2020-06-08 21:34:58 -070029
30 pagefault_disable();
Christoph Hellwigfe557312020-06-17 09:37:53 +020031 copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
32 copy_from_kernel_nofault_loop(dst, src, size, u32, Efault);
33 copy_from_kernel_nofault_loop(dst, src, size, u16, Efault);
34 copy_from_kernel_nofault_loop(dst, src, size, u8, Efault);
Christoph Hellwigb58294e2020-06-08 21:34:58 -070035 pagefault_enable();
36 return 0;
37Efault:
38 pagefault_enable();
39 return -EFAULT;
40}
Christoph Hellwigfe557312020-06-17 09:37:53 +020041EXPORT_SYMBOL_GPL(copy_from_kernel_nofault);
Christoph Hellwigb58294e2020-06-08 21:34:58 -070042
Christoph Hellwigfe557312020-06-17 09:37:53 +020043#define copy_to_kernel_nofault_loop(dst, src, len, type, err_label) \
Christoph Hellwigb58294e2020-06-08 21:34:58 -070044 while (len >= sizeof(type)) { \
45 __put_kernel_nofault(dst, src, type, err_label); \
46 dst += sizeof(type); \
47 src += sizeof(type); \
48 len -= sizeof(type); \
49 }
50
Christoph Hellwigfe557312020-06-17 09:37:53 +020051long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
Christoph Hellwigb58294e2020-06-08 21:34:58 -070052{
53 pagefault_disable();
Christoph Hellwigfe557312020-06-17 09:37:53 +020054 copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
55 copy_to_kernel_nofault_loop(dst, src, size, u32, Efault);
56 copy_to_kernel_nofault_loop(dst, src, size, u16, Efault);
57 copy_to_kernel_nofault_loop(dst, src, size, u8, Efault);
Christoph Hellwigb58294e2020-06-08 21:34:58 -070058 pagefault_enable();
59 return 0;
60Efault:
61 pagefault_enable();
62 return -EFAULT;
63}
64
65long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
66{
67 const void *src = unsafe_addr;
68
69 if (unlikely(count <= 0))
70 return 0;
Christoph Hellwigfe557312020-06-17 09:37:53 +020071 if (!copy_from_kernel_nofault_allowed(unsafe_addr, count))
Christoph Hellwig2a71e812020-06-08 21:35:04 -070072 return -ERANGE;
Christoph Hellwigb58294e2020-06-08 21:34:58 -070073
74 pagefault_disable();
75 do {
76 __get_kernel_nofault(dst, src, u8, Efault);
77 dst++;
78 src++;
79 } while (dst[-1] && src - unsafe_addr < count);
80 pagefault_enable();
81
82 dst[-1] = '\0';
83 return src - unsafe_addr;
84Efault:
85 pagefault_enable();
86 dst[-1] = '\0';
87 return -EFAULT;
88}
89#else /* HAVE_GET_KERNEL_NOFAULT */
Ingo Molnarc33fa9f2008-04-17 20:05:36 +020090/**
Christoph Hellwigfe557312020-06-17 09:37:53 +020091 * copy_from_kernel_nofault(): safely attempt to read from kernel-space
Christoph Hellwig4f6de122020-06-08 21:34:07 -070092 * @dst: pointer to the buffer that shall take the data
93 * @src: address to read from
94 * @size: size of the data chunk
95 *
96 * Safely read from kernel address @src to the buffer at @dst. If a kernel
Christoph Hellwig2a71e812020-06-08 21:35:04 -070097 * fault happens, handle that and return -EFAULT. If @src is not a valid kernel
98 * address, return -ERANGE.
Andrew Morton0ab32b62015-11-05 18:46:03 -080099 *
100 * We ensure that the copy_from_user is executed in atomic context so that
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700101 * do_page_fault() doesn't attempt to take mmap_lock. This makes
Christoph Hellwigfe557312020-06-17 09:37:53 +0200102 * copy_from_kernel_nofault() suitable for use within regions where the caller
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700103 * already holds mmap_lock, or other locks which nest inside mmap_lock.
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200104 */
Christoph Hellwigfe557312020-06-17 09:37:53 +0200105long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200106{
107 long ret;
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600108 mm_segment_t old_fs = get_fs();
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200109
Christoph Hellwigfe557312020-06-17 09:37:53 +0200110 if (!copy_from_kernel_nofault_allowed(src, size))
Christoph Hellwig2a71e812020-06-08 21:35:04 -0700111 return -ERANGE;
Christoph Hellwigeab0c602020-06-08 21:34:27 -0700112
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600113 set_fs(KERNEL_DS);
Christoph Hellwigcd030902020-06-08 21:34:24 -0700114 pagefault_disable();
115 ret = __copy_from_user_inatomic(dst, (__force const void __user *)src,
116 size);
117 pagefault_enable();
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600118 set_fs(old_fs);
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200119
Christoph Hellwigcd030902020-06-08 21:34:24 -0700120 if (ret)
121 return -EFAULT;
122 return 0;
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200123}
Christoph Hellwigfe557312020-06-17 09:37:53 +0200124EXPORT_SYMBOL_GPL(copy_from_kernel_nofault);
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200125
126/**
Christoph Hellwigfe557312020-06-17 09:37:53 +0200127 * copy_to_kernel_nofault(): safely attempt to write to a location
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200128 * @dst: address to write to
129 * @src: pointer to the data that shall be written
130 * @size: size of the data chunk
131 *
132 * Safely write to address @dst from the buffer at @src. If a kernel fault
133 * happens, handle that and return -EFAULT.
134 */
Christoph Hellwigfe557312020-06-17 09:37:53 +0200135long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200136{
137 long ret;
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600138 mm_segment_t old_fs = get_fs();
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200139
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600140 set_fs(KERNEL_DS);
Christoph Hellwigcd030902020-06-08 21:34:24 -0700141 pagefault_disable();
142 ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
143 pagefault_enable();
Jason Wesselb4b8ac52008-02-20 13:33:38 -0600144 set_fs(old_fs);
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200145
Christoph Hellwigcd030902020-06-08 21:34:24 -0700146 if (ret)
147 return -EFAULT;
148 return 0;
Ingo Molnarc33fa9f2008-04-17 20:05:36 +0200149}
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700150
Daniel Borkmann1d1585c2019-11-02 00:17:56 +0100151/**
Christoph Hellwigc4cb1642020-06-08 21:34:17 -0700152 * strncpy_from_kernel_nofault: - Copy a NUL terminated string from unsafe
Christoph Hellwig4f6de122020-06-08 21:34:07 -0700153 * address.
154 * @dst: Destination address, in kernel space. This buffer must be at
155 * least @count bytes long.
156 * @unsafe_addr: Unsafe address.
157 * @count: Maximum number of bytes to copy, including the trailing NUL.
158 *
159 * Copies a NUL-terminated string from unsafe address to kernel buffer.
160 *
161 * On success, returns the length of the string INCLUDING the trailing NUL.
162 *
Christoph Hellwig2a71e812020-06-08 21:35:04 -0700163 * If access fails, returns -EFAULT (some data may have been copied and the
164 * trailing NUL added). If @unsafe_addr is not a valid kernel address, return
165 * -ERANGE.
Christoph Hellwig4f6de122020-06-08 21:34:07 -0700166 *
167 * If @count is smaller than the length of the string, copies @count-1 bytes,
168 * sets the last byte of @dst buffer to NUL and returns @count.
169 */
Christoph Hellwigeab0c602020-06-08 21:34:27 -0700170long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
171{
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700172 mm_segment_t old_fs = get_fs();
173 const void *src = unsafe_addr;
174 long ret;
175
176 if (unlikely(count <= 0))
177 return 0;
Christoph Hellwigfe557312020-06-17 09:37:53 +0200178 if (!copy_from_kernel_nofault_allowed(unsafe_addr, count))
Christoph Hellwig2a71e812020-06-08 21:35:04 -0700179 return -ERANGE;
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700180
181 set_fs(KERNEL_DS);
182 pagefault_disable();
183
184 do {
Linus Torvaldsbd28b142016-05-22 17:21:27 -0700185 ret = __get_user(*dst++, (const char __user __force *)src++);
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700186 } while (dst[-1] && ret == 0 && src - unsafe_addr < count);
187
188 dst[-1] = '\0';
189 pagefault_enable();
190 set_fs(old_fs);
191
Rasmus Villemoes9dd861d2015-11-05 18:50:11 -0800192 return ret ? -EFAULT : src - unsafe_addr;
Alexei Starovoitovdbb7ee02015-08-31 08:57:10 -0700193}
Christoph Hellwigb58294e2020-06-08 21:34:58 -0700194#endif /* HAVE_GET_KERNEL_NOFAULT */
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900195
196/**
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200197 * copy_from_user_nofault(): safely attempt to read from a user-space location
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700198 * @dst: pointer to the buffer that shall take the data
199 * @src: address to read from. This must be a user address.
200 * @size: size of the data chunk
201 *
202 * Safely read from user address @src to the buffer at @dst. If a kernel fault
203 * happens, handle that and return -EFAULT.
204 */
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200205long copy_from_user_nofault(void *dst, const void __user *src, size_t size)
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700206{
207 long ret = -EFAULT;
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700208 mm_segment_t old_fs = force_uaccess_begin();
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700209
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700210 if (access_ok(src, size)) {
211 pagefault_disable();
212 ret = __copy_from_user_inatomic(dst, src, size);
213 pagefault_enable();
214 }
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700215 force_uaccess_end(old_fs);
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700216
217 if (ret)
218 return -EFAULT;
219 return 0;
220}
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200221EXPORT_SYMBOL_GPL(copy_from_user_nofault);
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700222
223/**
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200224 * copy_to_user_nofault(): safely attempt to write to a user-space location
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700225 * @dst: address to write to
226 * @src: pointer to the data that shall be written
227 * @size: size of the data chunk
228 *
229 * Safely write to address @dst from the buffer at @src. If a kernel fault
230 * happens, handle that and return -EFAULT.
231 */
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200232long copy_to_user_nofault(void __user *dst, const void *src, size_t size)
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700233{
234 long ret = -EFAULT;
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700235 mm_segment_t old_fs = force_uaccess_begin();
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700236
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700237 if (access_ok(dst, size)) {
238 pagefault_disable();
239 ret = __copy_to_user_inatomic(dst, src, size);
240 pagefault_enable();
241 }
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700242 force_uaccess_end(old_fs);
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700243
244 if (ret)
245 return -EFAULT;
246 return 0;
247}
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200248EXPORT_SYMBOL_GPL(copy_to_user_nofault);
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700249
250/**
Christoph Hellwigbd88bb52020-06-08 21:34:14 -0700251 * strncpy_from_user_nofault: - Copy a NUL terminated string from unsafe user
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900252 * address.
253 * @dst: Destination address, in kernel space. This buffer must be at
254 * least @count bytes long.
255 * @unsafe_addr: Unsafe user address.
256 * @count: Maximum number of bytes to copy, including the trailing NUL.
257 *
258 * Copies a NUL-terminated string from unsafe user address to kernel buffer.
259 *
260 * On success, returns the length of the string INCLUDING the trailing NUL.
261 *
262 * If access fails, returns -EFAULT (some data may have been copied
263 * and the trailing NUL added).
264 *
265 * If @count is smaller than the length of the string, copies @count-1 bytes,
266 * sets the last byte of @dst buffer to NUL and returns @count.
267 */
Christoph Hellwigbd88bb52020-06-08 21:34:14 -0700268long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900269 long count)
270{
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700271 mm_segment_t old_fs;
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900272 long ret;
273
274 if (unlikely(count <= 0))
275 return 0;
276
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700277 old_fs = force_uaccess_begin();
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900278 pagefault_disable();
279 ret = strncpy_from_user(dst, unsafe_addr, count);
280 pagefault_enable();
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700281 force_uaccess_end(old_fs);
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900282
283 if (ret >= count) {
284 ret = count;
285 dst[ret - 1] = '\0';
286 } else if (ret > 0) {
287 ret++;
288 }
289
290 return ret;
291}
292
293/**
Christoph Hellwig02dddb12020-06-08 21:34:20 -0700294 * strnlen_user_nofault: - Get the size of a user string INCLUDING final NUL.
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900295 * @unsafe_addr: The string to measure.
296 * @count: Maximum count (including NUL)
297 *
298 * Get the size of a NUL-terminated string in user space without pagefault.
299 *
300 * Returns the size of the string INCLUDING the terminating NUL.
301 *
302 * If the string is too long, returns a number larger than @count. User
303 * has to check the return value against "> count".
304 * On exception (or invalid count), returns 0.
305 *
306 * Unlike strnlen_user, this can be used from IRQ handler etc. because
307 * it disables pagefaults.
308 */
Christoph Hellwig02dddb12020-06-08 21:34:20 -0700309long strnlen_user_nofault(const void __user *unsafe_addr, long count)
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900310{
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700311 mm_segment_t old_fs;
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900312 int ret;
313
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700314 old_fs = force_uaccess_begin();
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900315 pagefault_disable();
316 ret = strnlen_user(unsafe_addr, count);
317 pagefault_enable();
Christoph Hellwig3d13f312020-08-11 18:33:47 -0700318 force_uaccess_end(old_fs);
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900319
320 return ret;
321}