blob: 7413dd300516e5405d92858a033b36c0eeb0399c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Aleksa Saraif5a1a532019-10-01 11:10:52 +10002#include <linux/bitops.h>
Albert van der Linde4d0e9df2020-10-15 20:13:50 -07003#include <linux/fault-inject-usercopy.h>
Marco Elver76d6f062020-01-21 17:05:12 +01004#include <linux/instrumented.h>
5#include <linux/uaccess.h>
Al Virod5975802017-03-20 21:56:06 -04006
7/* out-of-line parts */
8
9#ifndef INLINE_COPY_FROM_USER
10unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
11{
12 unsigned long res = n;
Al Viro9c5f6902017-06-29 21:39:54 -040013 might_fault();
Albert van der Linde4d0e9df2020-10-15 20:13:50 -070014 if (!should_fail_usercopy() && likely(access_ok(from, n))) {
Marco Elver76d6f062020-01-21 17:05:12 +010015 instrument_copy_from_user(to, from, n);
Al Virod5975802017-03-20 21:56:06 -040016 res = raw_copy_from_user(to, from, n);
Al Viro9c5f6902017-06-29 21:39:54 -040017 }
Al Virod5975802017-03-20 21:56:06 -040018 if (unlikely(res))
19 memset(to + (n - res), 0, res);
20 return res;
21}
22EXPORT_SYMBOL(_copy_from_user);
23#endif
24
25#ifndef INLINE_COPY_TO_USER
Christophe Leroya0e94592017-12-09 17:24:24 +010026unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
Al Virod5975802017-03-20 21:56:06 -040027{
Al Viro9c5f6902017-06-29 21:39:54 -040028 might_fault();
Albert van der Linde4d0e9df2020-10-15 20:13:50 -070029 if (should_fail_usercopy())
30 return n;
Linus Torvalds96d4f262019-01-03 18:57:57 -080031 if (likely(access_ok(to, n))) {
Marco Elver76d6f062020-01-21 17:05:12 +010032 instrument_copy_to_user(to, from, n);
Al Virod5975802017-03-20 21:56:06 -040033 n = raw_copy_to_user(to, from, n);
Al Viro9c5f6902017-06-29 21:39:54 -040034 }
Al Virod5975802017-03-20 21:56:06 -040035 return n;
36}
37EXPORT_SYMBOL(_copy_to_user);
38#endif
Aleksa Saraif5a1a532019-10-01 11:10:52 +100039
40/**
41 * check_zeroed_user: check if a userspace buffer only contains zero bytes
42 * @from: Source address, in userspace.
43 * @size: Size of buffer.
44 *
45 * This is effectively shorthand for "memchr_inv(from, 0, size) == NULL" for
46 * userspace addresses (and is more efficient because we don't care where the
47 * first non-zero byte is).
48 *
49 * Returns:
50 * * 0: There were non-zero bytes present in the buffer.
51 * * 1: The buffer was full of zero bytes.
52 * * -EFAULT: access to userspace failed.
53 */
54int check_zeroed_user(const void __user *from, size_t size)
55{
56 unsigned long val;
57 uintptr_t align = (uintptr_t) from % sizeof(unsigned long);
58
59 if (unlikely(size == 0))
60 return 1;
61
62 from -= align;
63 size += align;
64
Christophe Leroy41cd7802020-04-03 07:20:51 +000065 if (!user_read_access_begin(from, size))
Aleksa Saraif5a1a532019-10-01 11:10:52 +100066 return -EFAULT;
67
68 unsafe_get_user(val, (unsigned long __user *) from, err_fault);
69 if (align)
70 val &= ~aligned_byte_mask(align);
71
72 while (size > sizeof(unsigned long)) {
73 if (unlikely(val))
74 goto done;
75
76 from += sizeof(unsigned long);
77 size -= sizeof(unsigned long);
78
79 unsafe_get_user(val, (unsigned long __user *) from, err_fault);
80 }
81
82 if (size < sizeof(unsigned long))
83 val &= aligned_byte_mask(size);
84
85done:
Christophe Leroy41cd7802020-04-03 07:20:51 +000086 user_read_access_end();
Aleksa Saraif5a1a532019-10-01 11:10:52 +100087 return (val == 0);
88err_fault:
Christophe Leroy41cd7802020-04-03 07:20:51 +000089 user_read_access_end();
Aleksa Saraif5a1a532019-10-01 11:10:52 +100090 return -EFAULT;
91}
92EXPORT_SYMBOL(check_zeroed_user);