Thomas Gleixner | 9c92ab6 | 2019-05-29 07:17:56 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Kees Cook | 3e2a4c1 | 2014-01-23 15:54:38 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Kernel module for testing copy_to/from_user infrastructure. |
| 4 | * |
| 5 | * Copyright 2013 Google Inc. All Rights Reserved |
| 6 | * |
| 7 | * Authors: |
| 8 | * Kees Cook <keescook@chromium.org> |
Kees Cook | 3e2a4c1 | 2014-01-23 15:54:38 -0800 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 12 | |
| 13 | #include <linux/mman.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/uaccess.h> |
| 18 | #include <linux/vmalloc.h> |
| 19 | |
Kees Cook | 4c5d7bc | 2017-02-14 12:38:07 -0800 | [diff] [blame] | 20 | /* |
| 21 | * Several 32-bit architectures support 64-bit {get,put}_user() calls. |
| 22 | * As there doesn't appear to be anything that can safely determine |
| 23 | * their capability at compile-time, we just have to opt-out certain archs. |
| 24 | */ |
Arnd Bergmann | 4deaa6f | 2017-02-22 11:21:22 -0800 | [diff] [blame] | 25 | #if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \ |
Kees Cook | 4c5d7bc | 2017-02-14 12:38:07 -0800 | [diff] [blame] | 26 | !defined(CONFIG_M68K) && \ |
| 27 | !defined(CONFIG_MICROBLAZE) && \ |
Kees Cook | 4c5d7bc | 2017-02-14 12:38:07 -0800 | [diff] [blame] | 28 | !defined(CONFIG_NIOS2) && \ |
| 29 | !defined(CONFIG_PPC32) && \ |
| 30 | !defined(CONFIG_SUPERH)) |
| 31 | # define TEST_U64 |
| 32 | #endif |
| 33 | |
Aleksa Sarai | f5a1a53 | 2019-10-01 11:10:52 +1000 | [diff] [blame] | 34 | #define test(condition, msg, ...) \ |
| 35 | ({ \ |
| 36 | int cond = (condition); \ |
| 37 | if (cond) \ |
| 38 | pr_warn("[%d] " msg "\n", __LINE__, ##__VA_ARGS__); \ |
| 39 | cond; \ |
Kees Cook | 3e2a4c1 | 2014-01-23 15:54:38 -0800 | [diff] [blame] | 40 | }) |
| 41 | |
Aleksa Sarai | f5a1a53 | 2019-10-01 11:10:52 +1000 | [diff] [blame] | 42 | static bool is_zeroed(void *from, size_t size) |
| 43 | { |
| 44 | return memchr_inv(from, 0x0, size) == NULL; |
| 45 | } |
| 46 | |
| 47 | static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size) |
| 48 | { |
| 49 | int ret = 0; |
Michael Ellerman | f418ddd | 2019-10-16 23:27:32 +1100 | [diff] [blame] | 50 | size_t start, end, i, zero_start, zero_end; |
| 51 | |
| 52 | if (test(size < 2 * PAGE_SIZE, "buffer too small")) |
| 53 | return -EINVAL; |
| 54 | |
| 55 | /* |
| 56 | * We want to cross a page boundary to exercise the code more |
| 57 | * effectively. We also don't want to make the size we scan too large, |
| 58 | * otherwise the test can take a long time and cause soft lockups. So |
| 59 | * scan a 1024 byte region across the page boundary. |
| 60 | */ |
| 61 | size = 1024; |
| 62 | start = PAGE_SIZE - (size / 2); |
| 63 | |
| 64 | kmem += start; |
| 65 | umem += start; |
| 66 | |
| 67 | zero_start = size / 4; |
| 68 | zero_end = size - zero_start; |
Aleksa Sarai | f5a1a53 | 2019-10-01 11:10:52 +1000 | [diff] [blame] | 69 | |
| 70 | /* |
Aleksa Sarai | c90012a | 2019-10-06 10:30:28 +1100 | [diff] [blame] | 71 | * We conduct a series of check_nonzero_user() tests on a block of |
| 72 | * memory with the following byte-pattern (trying every possible |
| 73 | * [start,end] pair): |
Aleksa Sarai | f5a1a53 | 2019-10-01 11:10:52 +1000 | [diff] [blame] | 74 | * |
| 75 | * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ] |
| 76 | * |
Aleksa Sarai | c90012a | 2019-10-06 10:30:28 +1100 | [diff] [blame] | 77 | * And we verify that check_nonzero_user() acts identically to |
| 78 | * memchr_inv(). |
Aleksa Sarai | f5a1a53 | 2019-10-01 11:10:52 +1000 | [diff] [blame] | 79 | */ |
| 80 | |
| 81 | memset(kmem, 0x0, size); |
| 82 | for (i = 1; i < zero_start; i += 2) |
| 83 | kmem[i] = 0xff; |
| 84 | for (i = zero_end; i < size; i += 2) |
| 85 | kmem[i] = 0xff; |
| 86 | |
| 87 | ret |= test(copy_to_user(umem, kmem, size), |
| 88 | "legitimate copy_to_user failed"); |
| 89 | |
| 90 | for (start = 0; start <= size; start++) { |
| 91 | for (end = start; end <= size; end++) { |
| 92 | size_t len = end - start; |
| 93 | int retval = check_zeroed_user(umem + start, len); |
| 94 | int expected = is_zeroed(kmem + start, len); |
| 95 | |
| 96 | ret |= test(retval != expected, |
| 97 | "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)", |
| 98 | retval, expected, start, end); |
| 99 | } |
| 100 | } |
| 101 | |
| 102 | return ret; |
| 103 | } |
| 104 | |
| 105 | static int test_copy_struct_from_user(char *kmem, char __user *umem, |
| 106 | size_t size) |
| 107 | { |
| 108 | int ret = 0; |
| 109 | char *umem_src = NULL, *expected = NULL; |
| 110 | size_t ksize, usize; |
| 111 | |
| 112 | umem_src = kmalloc(size, GFP_KERNEL); |
Aleksa Sarai | c90012a | 2019-10-06 10:30:28 +1100 | [diff] [blame] | 113 | ret = test(umem_src == NULL, "kmalloc failed"); |
| 114 | if (ret) |
Aleksa Sarai | f5a1a53 | 2019-10-01 11:10:52 +1000 | [diff] [blame] | 115 | goto out_free; |
| 116 | |
| 117 | expected = kmalloc(size, GFP_KERNEL); |
Aleksa Sarai | c90012a | 2019-10-06 10:30:28 +1100 | [diff] [blame] | 118 | ret = test(expected == NULL, "kmalloc failed"); |
| 119 | if (ret) |
Aleksa Sarai | f5a1a53 | 2019-10-01 11:10:52 +1000 | [diff] [blame] | 120 | goto out_free; |
| 121 | |
| 122 | /* Fill umem with a fixed byte pattern. */ |
| 123 | memset(umem_src, 0x3e, size); |
| 124 | ret |= test(copy_to_user(umem, umem_src, size), |
| 125 | "legitimate copy_to_user failed"); |
| 126 | |
| 127 | /* Check basic case -- (usize == ksize). */ |
| 128 | ksize = size; |
| 129 | usize = size; |
| 130 | |
| 131 | memcpy(expected, umem_src, ksize); |
| 132 | |
| 133 | memset(kmem, 0x0, size); |
| 134 | ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), |
| 135 | "copy_struct_from_user(usize == ksize) failed"); |
| 136 | ret |= test(memcmp(kmem, expected, ksize), |
| 137 | "copy_struct_from_user(usize == ksize) gives unexpected copy"); |
| 138 | |
| 139 | /* Old userspace case -- (usize < ksize). */ |
| 140 | ksize = size; |
| 141 | usize = size / 2; |
| 142 | |
| 143 | memcpy(expected, umem_src, usize); |
| 144 | memset(expected + usize, 0x0, ksize - usize); |
| 145 | |
| 146 | memset(kmem, 0x0, size); |
| 147 | ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), |
| 148 | "copy_struct_from_user(usize < ksize) failed"); |
| 149 | ret |= test(memcmp(kmem, expected, ksize), |
| 150 | "copy_struct_from_user(usize < ksize) gives unexpected copy"); |
| 151 | |
| 152 | /* New userspace (-E2BIG) case -- (usize > ksize). */ |
| 153 | ksize = size / 2; |
| 154 | usize = size; |
| 155 | |
| 156 | memset(kmem, 0x0, size); |
| 157 | ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG, |
| 158 | "copy_struct_from_user(usize > ksize) didn't give E2BIG"); |
| 159 | |
| 160 | /* New userspace (success) case -- (usize > ksize). */ |
| 161 | ksize = size / 2; |
| 162 | usize = size; |
| 163 | |
| 164 | memcpy(expected, umem_src, ksize); |
| 165 | ret |= test(clear_user(umem + ksize, usize - ksize), |
| 166 | "legitimate clear_user failed"); |
| 167 | |
| 168 | memset(kmem, 0x0, size); |
| 169 | ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), |
| 170 | "copy_struct_from_user(usize > ksize) failed"); |
| 171 | ret |= test(memcmp(kmem, expected, ksize), |
| 172 | "copy_struct_from_user(usize > ksize) gives unexpected copy"); |
| 173 | |
| 174 | out_free: |
| 175 | kfree(expected); |
| 176 | kfree(umem_src); |
| 177 | return ret; |
| 178 | } |
| 179 | |
Kees Cook | 3e2a4c1 | 2014-01-23 15:54:38 -0800 | [diff] [blame] | 180 | static int __init test_user_copy_init(void) |
| 181 | { |
| 182 | int ret = 0; |
| 183 | char *kmem; |
| 184 | char __user *usermem; |
| 185 | char *bad_usermem; |
| 186 | unsigned long user_addr; |
Kees Cook | 4c5d7bc | 2017-02-14 12:38:07 -0800 | [diff] [blame] | 187 | u8 val_u8; |
| 188 | u16 val_u16; |
| 189 | u32 val_u32; |
| 190 | #ifdef TEST_U64 |
| 191 | u64 val_u64; |
| 192 | #endif |
Kees Cook | 3e2a4c1 | 2014-01-23 15:54:38 -0800 | [diff] [blame] | 193 | |
| 194 | kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); |
| 195 | if (!kmem) |
| 196 | return -ENOMEM; |
| 197 | |
| 198 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2, |
| 199 | PROT_READ | PROT_WRITE | PROT_EXEC, |
| 200 | MAP_ANONYMOUS | MAP_PRIVATE, 0); |
| 201 | if (user_addr >= (unsigned long)(TASK_SIZE)) { |
| 202 | pr_warn("Failed to allocate user memory\n"); |
| 203 | kfree(kmem); |
| 204 | return -ENOMEM; |
| 205 | } |
| 206 | |
| 207 | usermem = (char __user *)user_addr; |
| 208 | bad_usermem = (char *)user_addr; |
| 209 | |
Kees Cook | f5f893c | 2017-02-13 11:25:26 -0800 | [diff] [blame] | 210 | /* |
| 211 | * Legitimate usage: none of these copies should fail. |
| 212 | */ |
Kees Cook | 4c5d7bc | 2017-02-14 12:38:07 -0800 | [diff] [blame] | 213 | memset(kmem, 0x3a, PAGE_SIZE * 2); |
Kees Cook | 3e2a4c1 | 2014-01-23 15:54:38 -0800 | [diff] [blame] | 214 | ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE), |
| 215 | "legitimate copy_to_user failed"); |
Kees Cook | 4c5d7bc | 2017-02-14 12:38:07 -0800 | [diff] [blame] | 216 | memset(kmem, 0x0, PAGE_SIZE); |
| 217 | ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE), |
| 218 | "legitimate copy_from_user failed"); |
| 219 | ret |= test(memcmp(kmem, kmem + PAGE_SIZE, PAGE_SIZE), |
| 220 | "legitimate usercopy failed to copy data"); |
| 221 | |
| 222 | #define test_legit(size, check) \ |
| 223 | do { \ |
| 224 | val_##size = check; \ |
| 225 | ret |= test(put_user(val_##size, (size __user *)usermem), \ |
| 226 | "legitimate put_user (" #size ") failed"); \ |
| 227 | val_##size = 0; \ |
| 228 | ret |= test(get_user(val_##size, (size __user *)usermem), \ |
| 229 | "legitimate get_user (" #size ") failed"); \ |
| 230 | ret |= test(val_##size != check, \ |
| 231 | "legitimate get_user (" #size ") failed to do copy"); \ |
| 232 | if (val_##size != check) { \ |
| 233 | pr_info("0x%llx != 0x%llx\n", \ |
| 234 | (unsigned long long)val_##size, \ |
| 235 | (unsigned long long)check); \ |
| 236 | } \ |
| 237 | } while (0) |
| 238 | |
| 239 | test_legit(u8, 0x5a); |
| 240 | test_legit(u16, 0x5a5b); |
| 241 | test_legit(u32, 0x5a5b5c5d); |
| 242 | #ifdef TEST_U64 |
| 243 | test_legit(u64, 0x5a5b5c5d6a6b6c6d); |
| 244 | #endif |
| 245 | #undef test_legit |
Kees Cook | 3e2a4c1 | 2014-01-23 15:54:38 -0800 | [diff] [blame] | 246 | |
Aleksa Sarai | f5a1a53 | 2019-10-01 11:10:52 +1000 | [diff] [blame] | 247 | /* Test usage of check_nonzero_user(). */ |
| 248 | ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE); |
| 249 | /* Test usage of copy_struct_from_user(). */ |
| 250 | ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE); |
| 251 | |
Kees Cook | f5f893c | 2017-02-13 11:25:26 -0800 | [diff] [blame] | 252 | /* |
| 253 | * Invalid usage: none of these copies should succeed. |
| 254 | */ |
| 255 | |
| 256 | /* Prepare kernel memory with check values. */ |
Hoeun Ryu | 4fbfeb8 | 2017-02-12 15:13:33 +0900 | [diff] [blame] | 257 | memset(kmem, 0x5a, PAGE_SIZE); |
| 258 | memset(kmem + PAGE_SIZE, 0, PAGE_SIZE); |
Kees Cook | f5f893c | 2017-02-13 11:25:26 -0800 | [diff] [blame] | 259 | |
| 260 | /* Reject kernel-to-kernel copies through copy_from_user(). */ |
Kees Cook | 3e2a4c1 | 2014-01-23 15:54:38 -0800 | [diff] [blame] | 261 | ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE), |
| 262 | PAGE_SIZE), |
| 263 | "illegal all-kernel copy_from_user passed"); |
Kees Cook | f5f893c | 2017-02-13 11:25:26 -0800 | [diff] [blame] | 264 | |
| 265 | /* Destination half of buffer should have been zeroed. */ |
Hoeun Ryu | 4fbfeb8 | 2017-02-12 15:13:33 +0900 | [diff] [blame] | 266 | ret |= test(memcmp(kmem + PAGE_SIZE, kmem, PAGE_SIZE), |
| 267 | "zeroing failure for illegal all-kernel copy_from_user"); |
Kees Cook | f5f893c | 2017-02-13 11:25:26 -0800 | [diff] [blame] | 268 | |
| 269 | #if 0 |
| 270 | /* |
| 271 | * When running with SMAP/PAN/etc, this will Oops the kernel |
| 272 | * due to the zeroing of userspace memory on failure. This needs |
| 273 | * to be tested in LKDTM instead, since this test module does not |
| 274 | * expect to explode. |
| 275 | */ |
Kees Cook | 3e2a4c1 | 2014-01-23 15:54:38 -0800 | [diff] [blame] | 276 | ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem, |
| 277 | PAGE_SIZE), |
| 278 | "illegal reversed copy_from_user passed"); |
Kees Cook | f5f893c | 2017-02-13 11:25:26 -0800 | [diff] [blame] | 279 | #endif |
Kees Cook | 3e2a4c1 | 2014-01-23 15:54:38 -0800 | [diff] [blame] | 280 | ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE, |
| 281 | PAGE_SIZE), |
| 282 | "illegal all-kernel copy_to_user passed"); |
| 283 | ret |= test(!copy_to_user((char __user *)kmem, bad_usermem, |
| 284 | PAGE_SIZE), |
| 285 | "illegal reversed copy_to_user passed"); |
Kees Cook | f5f893c | 2017-02-13 11:25:26 -0800 | [diff] [blame] | 286 | |
Kees Cook | 4c5d7bc | 2017-02-14 12:38:07 -0800 | [diff] [blame] | 287 | #define test_illegal(size, check) \ |
| 288 | do { \ |
| 289 | val_##size = (check); \ |
| 290 | ret |= test(!get_user(val_##size, (size __user *)kmem), \ |
| 291 | "illegal get_user (" #size ") passed"); \ |
| 292 | ret |= test(val_##size != (size)0, \ |
| 293 | "zeroing failure for illegal get_user (" #size ")"); \ |
| 294 | if (val_##size != (size)0) { \ |
| 295 | pr_info("0x%llx != 0\n", \ |
| 296 | (unsigned long long)val_##size); \ |
| 297 | } \ |
| 298 | ret |= test(!put_user(val_##size, (size __user *)kmem), \ |
| 299 | "illegal put_user (" #size ") passed"); \ |
| 300 | } while (0) |
| 301 | |
| 302 | test_illegal(u8, 0x5a); |
| 303 | test_illegal(u16, 0x5a5b); |
| 304 | test_illegal(u32, 0x5a5b5c5d); |
| 305 | #ifdef TEST_U64 |
| 306 | test_illegal(u64, 0x5a5b5c5d6a6b6c6d); |
| 307 | #endif |
| 308 | #undef test_illegal |
Kees Cook | 3e2a4c1 | 2014-01-23 15:54:38 -0800 | [diff] [blame] | 309 | |
| 310 | vm_munmap(user_addr, PAGE_SIZE * 2); |
| 311 | kfree(kmem); |
| 312 | |
| 313 | if (ret == 0) { |
| 314 | pr_info("tests passed.\n"); |
| 315 | return 0; |
| 316 | } |
| 317 | |
| 318 | return -EINVAL; |
| 319 | } |
| 320 | |
| 321 | module_init(test_user_copy_init); |
| 322 | |
| 323 | static void __exit test_user_copy_exit(void) |
| 324 | { |
| 325 | pr_info("unloaded.\n"); |
| 326 | } |
| 327 | |
| 328 | module_exit(test_user_copy_exit); |
| 329 | |
| 330 | MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); |
| 331 | MODULE_LICENSE("GPL"); |