blob: 65cdf844c8adbbe2ad24b562df3479f4c3f77459 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Akinobu Mita6a11f752009-03-31 15:23:17 -07002#include <linux/kernel.h>
Akinobu Mita8c5fb8e2011-10-31 17:08:10 -07003#include <linux/string.h>
Akinobu Mita6a11f752009-03-31 15:23:17 -07004#include <linux/mm.h>
Akinobu Mita64212ec2011-10-31 17:08:38 -07005#include <linux/highmem.h>
Joonsoo Kime30825f2014-12-12 16:55:49 -08006#include <linux/page_ext.h>
Akinobu Mita6a11f752009-03-31 15:23:17 -07007#include <linux/poison.h>
Akinobu Mita77311132011-10-31 17:08:05 -07008#include <linux/ratelimit.h>
Qian Cai41179922019-03-05 15:41:24 -08009#include <linux/kasan.h>
Akinobu Mita6a11f752009-03-31 15:23:17 -070010
Vlastimil Babka8db26a32020-12-14 19:13:34 -080011bool _page_poisoning_enabled_early;
12EXPORT_SYMBOL(_page_poisoning_enabled_early);
13DEFINE_STATIC_KEY_FALSE(_page_poisoning_enabled);
14EXPORT_SYMBOL(_page_poisoning_enabled);
Laura Abbott8823b1d2016-03-15 14:56:27 -070015
Dou Liyang14298d32018-04-05 16:23:53 -070016static int __init early_page_poison_param(char *buf)
Laura Abbott8823b1d2016-03-15 14:56:27 -070017{
Vlastimil Babka8db26a32020-12-14 19:13:34 -080018 return kstrtobool(buf, &_page_poisoning_enabled_early);
Laura Abbott8823b1d2016-03-15 14:56:27 -070019}
20early_param("page_poison", early_page_poison_param);
21
Akinobu Mita6a11f752009-03-31 15:23:17 -070022static void poison_page(struct page *page)
23{
Akinobu Mita64212ec2011-10-31 17:08:38 -070024 void *addr = kmap_atomic(page);
Akinobu Mita6a11f752009-03-31 15:23:17 -070025
Qian Cai41179922019-03-05 15:41:24 -080026 /* KASAN still think the page is in-use, so skip it. */
27 kasan_disable_current();
Andrey Konovalovaa1ef4d2020-12-22 12:02:17 -080028 memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE);
Qian Cai41179922019-03-05 15:41:24 -080029 kasan_enable_current();
Akinobu Mita64212ec2011-10-31 17:08:38 -070030 kunmap_atomic(addr);
Akinobu Mita6a11f752009-03-31 15:23:17 -070031}
32
Vlastimil Babka8db26a32020-12-14 19:13:34 -080033void __kernel_poison_pages(struct page *page, int n)
Akinobu Mita6a11f752009-03-31 15:23:17 -070034{
35 int i;
36
37 for (i = 0; i < n; i++)
38 poison_page(page + i);
39}
40
41static bool single_bit_flip(unsigned char a, unsigned char b)
42{
43 unsigned char error = a ^ b;
44
45 return error && !(error & (error - 1));
46}
47
48static void check_poison_mem(unsigned char *mem, size_t bytes)
49{
Akinobu Mita77311132011-10-31 17:08:05 -070050 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
Akinobu Mita6a11f752009-03-31 15:23:17 -070051 unsigned char *start;
52 unsigned char *end;
53
Akinobu Mita8c5fb8e2011-10-31 17:08:10 -070054 start = memchr_inv(mem, PAGE_POISON, bytes);
55 if (!start)
Akinobu Mita6a11f752009-03-31 15:23:17 -070056 return;
57
58 for (end = mem + bytes - 1; end > start; end--) {
59 if (*end != PAGE_POISON)
60 break;
61 }
62
Akinobu Mita77311132011-10-31 17:08:05 -070063 if (!__ratelimit(&ratelimit))
Akinobu Mita6a11f752009-03-31 15:23:17 -070064 return;
65 else if (start == end && single_bit_flip(*start, PAGE_POISON))
Laura Abbott8823b1d2016-03-15 14:56:27 -070066 pr_err("pagealloc: single bit error\n");
Akinobu Mita6a11f752009-03-31 15:23:17 -070067 else
Laura Abbott8823b1d2016-03-15 14:56:27 -070068 pr_err("pagealloc: memory corruption\n");
Akinobu Mita6a11f752009-03-31 15:23:17 -070069
70 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
71 end - start + 1, 1);
72 dump_stack();
73}
74
Akinobu Mita6a11f752009-03-31 15:23:17 -070075static void unpoison_page(struct page *page)
76{
Akinobu Mita64212ec2011-10-31 17:08:38 -070077 void *addr;
Akinobu Mita6a11f752009-03-31 15:23:17 -070078
Akinobu Mita64212ec2011-10-31 17:08:38 -070079 addr = kmap_atomic(page);
Vinayak Menonbd33ef32017-05-03 14:54:42 -070080 /*
81 * Page poisoning when enabled poisons each and every page
82 * that is freed to buddy. Thus no extra check is done to
Christophe JAILLETdbf76842019-09-23 15:34:19 -070083 * see if a page was poisoned.
Vinayak Menonbd33ef32017-05-03 14:54:42 -070084 */
Akinobu Mita64212ec2011-10-31 17:08:38 -070085 check_poison_mem(addr, PAGE_SIZE);
Akinobu Mita64212ec2011-10-31 17:08:38 -070086 kunmap_atomic(addr);
Akinobu Mita6a11f752009-03-31 15:23:17 -070087}
88
Vlastimil Babka8db26a32020-12-14 19:13:34 -080089void __kernel_unpoison_pages(struct page *page, int n)
Akinobu Mita6a11f752009-03-31 15:23:17 -070090{
91 int i;
92
93 for (i = 0; i < n; i++)
94 unpoison_page(page + i);
95}
96
Laura Abbott8823b1d2016-03-15 14:56:27 -070097#ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
98void __kernel_map_pages(struct page *page, int numpages, int enable)
99{
100 /* This function does nothing, all work is done via poison pages */
101}
102#endif