blob: 4a9f8dc01160b36abef20cb0510c06b99a411e62 [file] [log] [blame]
Thomas Gleixner1439f942019-05-29 07:12:37 -07001// SPDX-License-Identifier: GPL-2.0-only
Andi Kleen6a460792009-09-16 11:50:15 +02002/*
3 * Copyright (C) 2008, 2009 Intel Corporation
4 * Authors: Andi Kleen, Fengguang Wu
5 *
Andi Kleen6a460792009-09-16 11:50:15 +02006 * High level machine check handler. Handles pages reported by the
Andi Kleen1c80b992010-09-27 23:09:51 +02007 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
Andi Kleen6a460792009-09-16 11:50:15 +02008 * failure.
Andi Kleen1c80b992010-09-27 23:09:51 +02009 *
10 * In addition there is a "soft offline" entry point that allows stop using
11 * not-yet-corrupted-by-suspicious pages without killing anything.
Andi Kleen6a460792009-09-16 11:50:15 +020012 *
13 * Handles page cache pages in various states. The tricky part
Andi Kleen1c80b992010-09-27 23:09:51 +020014 * here is that we can access any page asynchronously in respect to
15 * other VM users, because memory failures could happen anytime and
16 * anywhere. This could violate some of their assumptions. This is why
17 * this code has to be extremely careful. Generally it tries to use
18 * normal locking rules, as in get the standard locks, even if that means
19 * the error handling takes potentially a long time.
Andi Kleene0de78df2015-06-24 16:56:02 -070020 *
21 * It can be very tempting to add handling for obscure cases here.
22 * In general any code for handling new cases should only be added iff:
23 * - You know how to test it.
24 * - You have a test that can be added to mce-test
25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
26 * - The case actually shows up as a frequent (top 10) page state in
27 * tools/vm/page-types when running a real workload.
Andi Kleen1c80b992010-09-27 23:09:51 +020028 *
29 * There are several operations here with exponential complexity because
30 * of unsuitable VM data structures. For example the operation to map back
31 * from RMAP chains to processes has to walk the complete process list and
32 * has non linear complexity with the number. But since memory corruptions
33 * are rare we hope to get away with this. This avoids impacting the core
34 * VM.
Andi Kleen6a460792009-09-16 11:50:15 +020035 */
Andi Kleen6a460792009-09-16 11:50:15 +020036#include <linux/kernel.h>
37#include <linux/mm.h>
38#include <linux/page-flags.h>
Wu Fengguang478c5ff2009-12-16 12:19:59 +010039#include <linux/kernel-page-flags.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010040#include <linux/sched/signal.h>
Ingo Molnar29930022017-02-08 18:51:36 +010041#include <linux/sched/task.h>
Hugh Dickins01e00f82009-10-13 15:02:11 +010042#include <linux/ksm.h>
Andi Kleen6a460792009-09-16 11:50:15 +020043#include <linux/rmap.h>
Paul Gortmakerb9e15ba2011-05-26 16:00:52 -040044#include <linux/export.h>
Andi Kleen6a460792009-09-16 11:50:15 +020045#include <linux/pagemap.h>
46#include <linux/swap.h>
47#include <linux/backing-dev.h>
Andi Kleenfacb6012009-12-16 12:20:00 +010048#include <linux/migrate.h>
Andi Kleenfacb6012009-12-16 12:20:00 +010049#include <linux/suspend.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090050#include <linux/slab.h>
Huang Yingbf998152010-05-31 14:28:19 +080051#include <linux/swapops.h>
Naoya Horiguchi7af446a2010-05-28 09:29:17 +090052#include <linux/hugetlb.h>
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -080053#include <linux/memory_hotplug.h>
Minchan Kim5db8a732011-06-15 15:08:48 -070054#include <linux/mm_inline.h>
Dan Williams6100e342018-07-13 21:50:21 -070055#include <linux/memremap.h>
Huang Yingea8f5fb2011-07-13 13:14:27 +080056#include <linux/kfifo.h>
Naoya Horiguchia5f65102015-11-05 18:47:26 -080057#include <linux/ratelimit.h>
Naoya Horiguchid4ae9912018-08-23 17:00:42 -070058#include <linux/page-isolation.h>
Andi Kleen6a460792009-09-16 11:50:15 +020059#include "internal.h"
Xie XiuQi97f0b132015-06-24 16:57:36 -070060#include "ras/ras_event.h"
Andi Kleen6a460792009-09-16 11:50:15 +020061
62int sysctl_memory_failure_early_kill __read_mostly = 0;
63
64int sysctl_memory_failure_recovery __read_mostly = 1;
65
Xishi Qiu293c07e2013-02-22 16:34:02 -080066atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
Andi Kleen6a460792009-09-16 11:50:15 +020067
Andi Kleen27df5062009-12-21 19:56:42 +010068#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69
Haicheng Li1bfe5fe2009-12-16 12:19:59 +010070u32 hwpoison_filter_enable = 0;
Wu Fengguang7c116f22009-12-16 12:19:59 +010071u32 hwpoison_filter_dev_major = ~0U;
72u32 hwpoison_filter_dev_minor = ~0U;
Wu Fengguang478c5ff2009-12-16 12:19:59 +010073u64 hwpoison_filter_flags_mask;
74u64 hwpoison_filter_flags_value;
Haicheng Li1bfe5fe2009-12-16 12:19:59 +010075EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
Wu Fengguang7c116f22009-12-16 12:19:59 +010076EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
77EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
Wu Fengguang478c5ff2009-12-16 12:19:59 +010078EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
79EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
Wu Fengguang7c116f22009-12-16 12:19:59 +010080
81static int hwpoison_filter_dev(struct page *p)
82{
83 struct address_space *mapping;
84 dev_t dev;
85
86 if (hwpoison_filter_dev_major == ~0U &&
87 hwpoison_filter_dev_minor == ~0U)
88 return 0;
89
90 /*
Andi Kleen1c80b992010-09-27 23:09:51 +020091 * page_mapping() does not accept slab pages.
Wu Fengguang7c116f22009-12-16 12:19:59 +010092 */
93 if (PageSlab(p))
94 return -EINVAL;
95
96 mapping = page_mapping(p);
97 if (mapping == NULL || mapping->host == NULL)
98 return -EINVAL;
99
100 dev = mapping->host->i_sb->s_dev;
101 if (hwpoison_filter_dev_major != ~0U &&
102 hwpoison_filter_dev_major != MAJOR(dev))
103 return -EINVAL;
104 if (hwpoison_filter_dev_minor != ~0U &&
105 hwpoison_filter_dev_minor != MINOR(dev))
106 return -EINVAL;
107
108 return 0;
109}
110
Wu Fengguang478c5ff2009-12-16 12:19:59 +0100111static int hwpoison_filter_flags(struct page *p)
112{
113 if (!hwpoison_filter_flags_mask)
114 return 0;
115
116 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
117 hwpoison_filter_flags_value)
118 return 0;
119 else
120 return -EINVAL;
121}
122
Andi Kleen4fd466e2009-12-16 12:19:59 +0100123/*
124 * This allows stress tests to limit test scope to a collection of tasks
125 * by putting them under some memcg. This prevents killing unrelated/important
126 * processes such as /sbin/init. Note that the target task may share clean
127 * pages with init (eg. libc text), which is harmless. If the target task
128 * share _dirty_ pages with another task B, the test scheme must make sure B
129 * is also included in the memcg. At last, due to race conditions this filter
130 * can only guarantee that the page either belongs to the memcg tasks, or is
131 * a freed page.
132 */
Vladimir Davydov94a59fb2015-09-09 15:35:31 -0700133#ifdef CONFIG_MEMCG
Andi Kleen4fd466e2009-12-16 12:19:59 +0100134u64 hwpoison_filter_memcg;
135EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
136static int hwpoison_filter_task(struct page *p)
137{
Andi Kleen4fd466e2009-12-16 12:19:59 +0100138 if (!hwpoison_filter_memcg)
139 return 0;
140
Vladimir Davydov94a59fb2015-09-09 15:35:31 -0700141 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
Andi Kleen4fd466e2009-12-16 12:19:59 +0100142 return -EINVAL;
143
144 return 0;
145}
146#else
147static int hwpoison_filter_task(struct page *p) { return 0; }
148#endif
149
Wu Fengguang7c116f22009-12-16 12:19:59 +0100150int hwpoison_filter(struct page *p)
151{
Haicheng Li1bfe5fe2009-12-16 12:19:59 +0100152 if (!hwpoison_filter_enable)
153 return 0;
154
Wu Fengguang7c116f22009-12-16 12:19:59 +0100155 if (hwpoison_filter_dev(p))
156 return -EINVAL;
157
Wu Fengguang478c5ff2009-12-16 12:19:59 +0100158 if (hwpoison_filter_flags(p))
159 return -EINVAL;
160
Andi Kleen4fd466e2009-12-16 12:19:59 +0100161 if (hwpoison_filter_task(p))
162 return -EINVAL;
163
Wu Fengguang7c116f22009-12-16 12:19:59 +0100164 return 0;
165}
Andi Kleen27df5062009-12-21 19:56:42 +0100166#else
167int hwpoison_filter(struct page *p)
168{
169 return 0;
170}
171#endif
172
Wu Fengguang7c116f22009-12-16 12:19:59 +0100173EXPORT_SYMBOL_GPL(hwpoison_filter);
174
Andi Kleen6a460792009-09-16 11:50:15 +0200175/*
Dan Williamsae1139e2018-07-13 21:50:11 -0700176 * Kill all processes that have a poisoned page mapped and then isolate
177 * the page.
178 *
179 * General strategy:
180 * Find all processes having the page mapped and kill them.
181 * But we keep a page reference around so that the page is not
182 * actually freed yet.
183 * Then stash the page away
184 *
185 * There's no convenient way to get back to mapped processes
186 * from the VMAs. So do a brute-force search over all
187 * running processes.
188 *
189 * Remember that machine checks are not common (or rather
190 * if they are common you have other problems), so this shouldn't
191 * be a performance issue.
192 *
193 * Also there are some races possible while we get from the
194 * error detection to actually handle it.
195 */
196
197struct to_kill {
198 struct list_head nd;
199 struct task_struct *tsk;
200 unsigned long addr;
201 short size_shift;
Dan Williamsae1139e2018-07-13 21:50:11 -0700202};
203
204/*
Tony Luck7329bbe2011-12-13 09:27:58 -0800205 * Send all the processes who have the page mapped a signal.
206 * ``action optional'' if they are not immediately affected by the error
207 * ``action required'' if error happened in current execution context
Andi Kleen6a460792009-09-16 11:50:15 +0200208 */
Dan Williamsae1139e2018-07-13 21:50:11 -0700209static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
Andi Kleen6a460792009-09-16 11:50:15 +0200210{
Dan Williamsae1139e2018-07-13 21:50:11 -0700211 struct task_struct *t = tk->tsk;
212 short addr_lsb = tk->size_shift;
Wetp Zhang872e9a22020-06-01 21:50:11 -0700213 int ret = 0;
Andi Kleen6a460792009-09-16 11:50:15 +0200214
Naoya Horiguchi03151c62020-06-11 17:34:48 -0700215 pr_err("Memory failure: %#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
Wetp Zhang872e9a22020-06-01 21:50:11 -0700216 pfn, t->comm, t->pid);
Tony Luck7329bbe2011-12-13 09:27:58 -0800217
Wetp Zhang872e9a22020-06-01 21:50:11 -0700218 if (flags & MF_ACTION_REQUIRED) {
Naoya Horiguchi03151c62020-06-11 17:34:48 -0700219 WARN_ON_ONCE(t != current);
220 ret = force_sig_mceerr(BUS_MCEERR_AR,
Wetp Zhang872e9a22020-06-01 21:50:11 -0700221 (void __user *)tk->addr, addr_lsb);
Tony Luck7329bbe2011-12-13 09:27:58 -0800222 } else {
223 /*
224 * Don't use force here, it's convenient if the signal
225 * can be temporarily blocked.
226 * This could cause a loop when the user sets SIGBUS
227 * to SIG_IGN, but hopefully no one will do that?
228 */
Dan Williamsae1139e2018-07-13 21:50:11 -0700229 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
Eric W. Biedermanc0f45552017-08-02 13:51:22 -0500230 addr_lsb, t); /* synchronous? */
Tony Luck7329bbe2011-12-13 09:27:58 -0800231 }
Andi Kleen6a460792009-09-16 11:50:15 +0200232 if (ret < 0)
Chen Yucong495367c02016-05-20 16:57:32 -0700233 pr_info("Memory failure: Error sending signal to %s:%d: %d\n",
Joe Perches11705322016-03-17 14:19:50 -0700234 t->comm, t->pid, ret);
Andi Kleen6a460792009-09-16 11:50:15 +0200235 return ret;
236}
237
238/*
Andi Kleen588f9ce2009-12-16 12:19:57 +0100239 * When a unknown page type is encountered drain as many buffers as possible
240 * in the hope to turn the page into a LRU or free page, which we can handle.
241 */
Andi Kleenfacb6012009-12-16 12:20:00 +0100242void shake_page(struct page *p, int access)
Andi Kleen588f9ce2009-12-16 12:19:57 +0100243{
Naoya Horiguchi8bcb74d2017-05-03 14:56:19 -0700244 if (PageHuge(p))
245 return;
246
Andi Kleen588f9ce2009-12-16 12:19:57 +0100247 if (!PageSlab(p)) {
248 lru_add_drain_all();
249 if (PageLRU(p))
250 return;
Vlastimil Babkac0554322014-12-10 15:43:10 -0800251 drain_all_pages(page_zone(p));
Andi Kleen588f9ce2009-12-16 12:19:57 +0100252 if (PageLRU(p) || is_free_buddy_page(p))
253 return;
254 }
Andi Kleenfacb6012009-12-16 12:20:00 +0100255
Andi Kleen588f9ce2009-12-16 12:19:57 +0100256 /*
Johannes Weiner6b4f7792014-12-12 16:56:13 -0800257 * Only call shrink_node_slabs here (which would also shrink
258 * other caches) if access is not potentially fatal.
Andi Kleen588f9ce2009-12-16 12:19:57 +0100259 */
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800260 if (access)
261 drop_slab_node(page_to_nid(p));
Andi Kleen588f9ce2009-12-16 12:19:57 +0100262}
263EXPORT_SYMBOL_GPL(shake_page);
264
Dan Williams6100e342018-07-13 21:50:21 -0700265static unsigned long dev_pagemap_mapping_shift(struct page *page,
266 struct vm_area_struct *vma)
267{
268 unsigned long address = vma_address(page, vma);
269 pgd_t *pgd;
270 p4d_t *p4d;
271 pud_t *pud;
272 pmd_t *pmd;
273 pte_t *pte;
Andi Kleen6a460792009-09-16 11:50:15 +0200274
Dan Williams6100e342018-07-13 21:50:21 -0700275 pgd = pgd_offset(vma->vm_mm, address);
276 if (!pgd_present(*pgd))
277 return 0;
278 p4d = p4d_offset(pgd, address);
279 if (!p4d_present(*p4d))
280 return 0;
281 pud = pud_offset(p4d, address);
282 if (!pud_present(*pud))
283 return 0;
284 if (pud_devmap(*pud))
285 return PUD_SHIFT;
286 pmd = pmd_offset(pud, address);
287 if (!pmd_present(*pmd))
288 return 0;
289 if (pmd_devmap(*pmd))
290 return PMD_SHIFT;
291 pte = pte_offset_map(pmd, address);
292 if (!pte_present(*pte))
293 return 0;
294 if (pte_devmap(*pte))
295 return PAGE_SHIFT;
296 return 0;
297}
Andi Kleen6a460792009-09-16 11:50:15 +0200298
299/*
300 * Failure handling: if we can't find or can't kill a process there's
301 * not much we can do. We just print a message and ignore otherwise.
302 */
303
304/*
305 * Schedule a process for later kill.
306 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
Andi Kleen6a460792009-09-16 11:50:15 +0200307 */
308static void add_to_kill(struct task_struct *tsk, struct page *p,
309 struct vm_area_struct *vma,
Jane Chu996ff7a2019-11-30 17:53:35 -0800310 struct list_head *to_kill)
Andi Kleen6a460792009-09-16 11:50:15 +0200311{
312 struct to_kill *tk;
313
Jane Chu996ff7a2019-11-30 17:53:35 -0800314 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
315 if (!tk) {
316 pr_err("Memory failure: Out of memory while machine check handling\n");
317 return;
Andi Kleen6a460792009-09-16 11:50:15 +0200318 }
Jane Chu996ff7a2019-11-30 17:53:35 -0800319
Andi Kleen6a460792009-09-16 11:50:15 +0200320 tk->addr = page_address_in_vma(p, vma);
Dan Williams6100e342018-07-13 21:50:21 -0700321 if (is_zone_device_page(p))
322 tk->size_shift = dev_pagemap_mapping_shift(p, vma);
323 else
Yunfeng Ye75068512019-11-30 17:53:41 -0800324 tk->size_shift = page_shift(compound_head(p));
Andi Kleen6a460792009-09-16 11:50:15 +0200325
326 /*
Jane Chu3d7fed42019-10-14 14:12:29 -0700327 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
328 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
329 * so "tk->size_shift == 0" effectively checks no mapping on
330 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
331 * to a process' address space, it's possible not all N VMAs
332 * contain mappings for the page, but at least one VMA does.
333 * Only deliver SIGBUS with payload derived from the VMA that
334 * has a mapping for the page.
Andi Kleen6a460792009-09-16 11:50:15 +0200335 */
Jane Chu3d7fed42019-10-14 14:12:29 -0700336 if (tk->addr == -EFAULT) {
Chen Yucong495367c02016-05-20 16:57:32 -0700337 pr_info("Memory failure: Unable to find user space address %lx in %s\n",
Andi Kleen6a460792009-09-16 11:50:15 +0200338 page_to_pfn(p), tsk->comm);
Jane Chu3d7fed42019-10-14 14:12:29 -0700339 } else if (tk->size_shift == 0) {
340 kfree(tk);
341 return;
Andi Kleen6a460792009-09-16 11:50:15 +0200342 }
Jane Chu996ff7a2019-11-30 17:53:35 -0800343
Andi Kleen6a460792009-09-16 11:50:15 +0200344 get_task_struct(tsk);
345 tk->tsk = tsk;
346 list_add_tail(&tk->nd, to_kill);
347}
348
349/*
350 * Kill the processes that have been collected earlier.
351 *
352 * Only do anything when DOIT is set, otherwise just free the list
353 * (this is used for clean pages which do not need killing)
354 * Also when FAIL is set do a force kill because something went
355 * wrong earlier.
356 */
Dan Williamsae1139e2018-07-13 21:50:11 -0700357static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
358 unsigned long pfn, int flags)
Andi Kleen6a460792009-09-16 11:50:15 +0200359{
360 struct to_kill *tk, *next;
361
362 list_for_each_entry_safe (tk, next, to_kill, nd) {
Tony Luck6751ed62012-07-11 10:20:47 -0700363 if (forcekill) {
Andi Kleen6a460792009-09-16 11:50:15 +0200364 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200365 * In case something went wrong with munmapping
Andi Kleen6a460792009-09-16 11:50:15 +0200366 * make sure the process doesn't catch the
367 * signal and then access the memory. Just kill it.
Andi Kleen6a460792009-09-16 11:50:15 +0200368 */
Jane Chu3d7fed42019-10-14 14:12:29 -0700369 if (fail || tk->addr == -EFAULT) {
Chen Yucong495367c02016-05-20 16:57:32 -0700370 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
Joe Perches11705322016-03-17 14:19:50 -0700371 pfn, tk->tsk->comm, tk->tsk->pid);
Naoya Horiguchi63763602019-02-01 14:21:08 -0800372 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
373 tk->tsk, PIDTYPE_PID);
Andi Kleen6a460792009-09-16 11:50:15 +0200374 }
375
376 /*
377 * In theory the process could have mapped
378 * something else on the address in-between. We could
379 * check for that, but we need to tell the
380 * process anyways.
381 */
Dan Williamsae1139e2018-07-13 21:50:11 -0700382 else if (kill_proc(tk, pfn, flags) < 0)
Chen Yucong495367c02016-05-20 16:57:32 -0700383 pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
Joe Perches11705322016-03-17 14:19:50 -0700384 pfn, tk->tsk->comm, tk->tsk->pid);
Andi Kleen6a460792009-09-16 11:50:15 +0200385 }
386 put_task_struct(tk->tsk);
387 kfree(tk);
388 }
389}
390
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700391/*
392 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
393 * on behalf of the thread group. Return task_struct of the (first found)
394 * dedicated thread if found, and return NULL otherwise.
395 *
396 * We already hold read_lock(&tasklist_lock) in the caller, so we don't
397 * have to call rcu_read_lock/unlock() in this function.
398 */
399static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
Andi Kleen6a460792009-09-16 11:50:15 +0200400{
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700401 struct task_struct *t;
402
Naoya Horiguchi4e018b42020-06-11 17:34:45 -0700403 for_each_thread(tsk, t) {
404 if (t->flags & PF_MCE_PROCESS) {
405 if (t->flags & PF_MCE_EARLY)
406 return t;
407 } else {
408 if (sysctl_memory_failure_early_kill)
409 return t;
410 }
411 }
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700412 return NULL;
413}
414
415/*
416 * Determine whether a given process is "early kill" process which expects
417 * to be signaled when some page under the process is hwpoisoned.
418 * Return task_struct of the dedicated thread (main thread unless explicitly
419 * specified) if the process is "early kill," and otherwise returns NULL.
Naoya Horiguchi03151c62020-06-11 17:34:48 -0700420 *
421 * Note that the above is true for Action Optional case, but not for Action
422 * Required case where SIGBUS should sent only to the current thread.
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700423 */
424static struct task_struct *task_early_kill(struct task_struct *tsk,
425 int force_early)
426{
Andi Kleen6a460792009-09-16 11:50:15 +0200427 if (!tsk->mm)
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700428 return NULL;
Naoya Horiguchi03151c62020-06-11 17:34:48 -0700429 if (force_early) {
430 /*
431 * Comparing ->mm here because current task might represent
432 * a subthread, while tsk always points to the main thread.
433 */
434 if (tsk->mm == current->mm)
435 return current;
436 else
437 return NULL;
438 }
Naoya Horiguchi4e018b42020-06-11 17:34:45 -0700439 return find_early_kill_thread(tsk);
Andi Kleen6a460792009-09-16 11:50:15 +0200440}
441
442/*
443 * Collect processes when the error hit an anonymous page.
444 */
445static void collect_procs_anon(struct page *page, struct list_head *to_kill,
Jane Chu996ff7a2019-11-30 17:53:35 -0800446 int force_early)
Andi Kleen6a460792009-09-16 11:50:15 +0200447{
448 struct vm_area_struct *vma;
449 struct task_struct *tsk;
450 struct anon_vma *av;
Michel Lespinassebf181b92012-10-08 16:31:39 -0700451 pgoff_t pgoff;
Andi Kleen6a460792009-09-16 11:50:15 +0200452
Ingo Molnar4fc3f1d2012-12-02 19:56:50 +0000453 av = page_lock_anon_vma_read(page);
Andi Kleen6a460792009-09-16 11:50:15 +0200454 if (av == NULL) /* Not actually mapped anymore */
Peter Zijlstra9b679322011-06-27 16:18:09 -0700455 return;
456
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700457 pgoff = page_to_pgoff(page);
Peter Zijlstra9b679322011-06-27 16:18:09 -0700458 read_lock(&tasklist_lock);
Andi Kleen6a460792009-09-16 11:50:15 +0200459 for_each_process (tsk) {
Rik van Riel5beb4932010-03-05 13:42:07 -0800460 struct anon_vma_chain *vmac;
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700461 struct task_struct *t = task_early_kill(tsk, force_early);
Rik van Riel5beb4932010-03-05 13:42:07 -0800462
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700463 if (!t)
Andi Kleen6a460792009-09-16 11:50:15 +0200464 continue;
Michel Lespinassebf181b92012-10-08 16:31:39 -0700465 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
466 pgoff, pgoff) {
Rik van Riel5beb4932010-03-05 13:42:07 -0800467 vma = vmac->vma;
Andi Kleen6a460792009-09-16 11:50:15 +0200468 if (!page_mapped_in_vma(page, vma))
469 continue;
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700470 if (vma->vm_mm == t->mm)
Jane Chu996ff7a2019-11-30 17:53:35 -0800471 add_to_kill(t, page, vma, to_kill);
Andi Kleen6a460792009-09-16 11:50:15 +0200472 }
473 }
Andi Kleen6a460792009-09-16 11:50:15 +0200474 read_unlock(&tasklist_lock);
Ingo Molnar4fc3f1d2012-12-02 19:56:50 +0000475 page_unlock_anon_vma_read(av);
Andi Kleen6a460792009-09-16 11:50:15 +0200476}
477
478/*
479 * Collect processes when the error hit a file mapped page.
480 */
481static void collect_procs_file(struct page *page, struct list_head *to_kill,
Jane Chu996ff7a2019-11-30 17:53:35 -0800482 int force_early)
Andi Kleen6a460792009-09-16 11:50:15 +0200483{
484 struct vm_area_struct *vma;
485 struct task_struct *tsk;
Andi Kleen6a460792009-09-16 11:50:15 +0200486 struct address_space *mapping = page->mapping;
Xianting Tianc43bc032020-10-13 16:54:42 -0700487 pgoff_t pgoff;
Andi Kleen6a460792009-09-16 11:50:15 +0200488
Davidlohr Buesod28eb9c2014-12-12 16:54:36 -0800489 i_mmap_lock_read(mapping);
Peter Zijlstra9b679322011-06-27 16:18:09 -0700490 read_lock(&tasklist_lock);
Xianting Tianc43bc032020-10-13 16:54:42 -0700491 pgoff = page_to_pgoff(page);
Andi Kleen6a460792009-09-16 11:50:15 +0200492 for_each_process(tsk) {
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700493 struct task_struct *t = task_early_kill(tsk, force_early);
Andi Kleen6a460792009-09-16 11:50:15 +0200494
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700495 if (!t)
Andi Kleen6a460792009-09-16 11:50:15 +0200496 continue;
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700497 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
Andi Kleen6a460792009-09-16 11:50:15 +0200498 pgoff) {
499 /*
500 * Send early kill signal to tasks where a vma covers
501 * the page but the corrupted page is not necessarily
502 * mapped it in its pte.
503 * Assume applications who requested early kill want
504 * to be informed of all such data corruptions.
505 */
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700506 if (vma->vm_mm == t->mm)
Jane Chu996ff7a2019-11-30 17:53:35 -0800507 add_to_kill(t, page, vma, to_kill);
Andi Kleen6a460792009-09-16 11:50:15 +0200508 }
509 }
Andi Kleen6a460792009-09-16 11:50:15 +0200510 read_unlock(&tasklist_lock);
Davidlohr Buesod28eb9c2014-12-12 16:54:36 -0800511 i_mmap_unlock_read(mapping);
Andi Kleen6a460792009-09-16 11:50:15 +0200512}
513
514/*
515 * Collect the processes who have the corrupted page mapped to kill.
Andi Kleen6a460792009-09-16 11:50:15 +0200516 */
Tony Luck74614de2014-06-04 16:11:01 -0700517static void collect_procs(struct page *page, struct list_head *tokill,
518 int force_early)
Andi Kleen6a460792009-09-16 11:50:15 +0200519{
Andi Kleen6a460792009-09-16 11:50:15 +0200520 if (!page->mapping)
521 return;
522
Andi Kleen6a460792009-09-16 11:50:15 +0200523 if (PageAnon(page))
Jane Chu996ff7a2019-11-30 17:53:35 -0800524 collect_procs_anon(page, tokill, force_early);
Andi Kleen6a460792009-09-16 11:50:15 +0200525 else
Jane Chu996ff7a2019-11-30 17:53:35 -0800526 collect_procs_file(page, tokill, force_early);
Andi Kleen6a460792009-09-16 11:50:15 +0200527}
528
Andi Kleen6a460792009-09-16 11:50:15 +0200529static const char *action_name[] = {
Xie XiuQicc637b12015-06-24 16:57:30 -0700530 [MF_IGNORED] = "Ignored",
531 [MF_FAILED] = "Failed",
532 [MF_DELAYED] = "Delayed",
533 [MF_RECOVERED] = "Recovered",
Naoya Horiguchi64d37a22015-04-15 16:13:05 -0700534};
535
536static const char * const action_page_types[] = {
Xie XiuQicc637b12015-06-24 16:57:30 -0700537 [MF_MSG_KERNEL] = "reserved kernel page",
538 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
539 [MF_MSG_SLAB] = "kernel slab page",
540 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
541 [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned",
542 [MF_MSG_HUGE] = "huge page",
543 [MF_MSG_FREE_HUGE] = "free huge page",
Naoya Horiguchi31286a82018-04-05 16:23:05 -0700544 [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page",
Xie XiuQicc637b12015-06-24 16:57:30 -0700545 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
546 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
547 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
548 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
549 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
550 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
551 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
552 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
553 [MF_MSG_CLEAN_LRU] = "clean LRU page",
554 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
555 [MF_MSG_BUDDY] = "free buddy page",
556 [MF_MSG_BUDDY_2ND] = "free buddy page (2nd try)",
Dan Williams6100e342018-07-13 21:50:21 -0700557 [MF_MSG_DAX] = "dax page",
Xie XiuQicc637b12015-06-24 16:57:30 -0700558 [MF_MSG_UNKNOWN] = "unknown page",
Naoya Horiguchi64d37a22015-04-15 16:13:05 -0700559};
560
Andi Kleen6a460792009-09-16 11:50:15 +0200561/*
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100562 * XXX: It is possible that a page is isolated from LRU cache,
563 * and then kept in swap cache or failed to remove from page cache.
564 * The page count will stop it from being freed by unpoison.
565 * Stress tests should be aware of this memory leak problem.
566 */
567static int delete_from_lru_cache(struct page *p)
568{
569 if (!isolate_lru_page(p)) {
570 /*
571 * Clear sensible page flags, so that the buddy system won't
572 * complain when the page is unpoison-and-freed.
573 */
574 ClearPageActive(p);
575 ClearPageUnevictable(p);
Michal Hocko18365222017-05-12 15:46:26 -0700576
577 /*
578 * Poisoned page might never drop its ref count to 0 so we have
579 * to uncharge it manually from its memcg.
580 */
581 mem_cgroup_uncharge(p);
582
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100583 /*
584 * drop the page count elevated by isolate_lru_page()
585 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300586 put_page(p);
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100587 return 0;
588 }
589 return -EIO;
590}
591
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700592static int truncate_error_page(struct page *p, unsigned long pfn,
593 struct address_space *mapping)
594{
595 int ret = MF_FAILED;
596
597 if (mapping->a_ops->error_remove_page) {
598 int err = mapping->a_ops->error_remove_page(mapping, p);
599
600 if (err != 0) {
601 pr_info("Memory failure: %#lx: Failed to punch page: %d\n",
602 pfn, err);
603 } else if (page_has_private(p) &&
604 !try_to_release_page(p, GFP_NOIO)) {
605 pr_info("Memory failure: %#lx: failed to release buffers\n",
606 pfn);
607 } else {
608 ret = MF_RECOVERED;
609 }
610 } else {
611 /*
612 * If the file system doesn't support it just invalidate
613 * This fails on dirty or anything with private pages
614 */
615 if (invalidate_inode_page(p))
616 ret = MF_RECOVERED;
617 else
618 pr_info("Memory failure: %#lx: Failed to invalidate\n",
619 pfn);
620 }
621
622 return ret;
623}
624
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100625/*
Andi Kleen6a460792009-09-16 11:50:15 +0200626 * Error hit kernel page.
627 * Do nothing, try to be lucky and not touch this instead. For a few cases we
628 * could be more sophisticated.
629 */
630static int me_kernel(struct page *p, unsigned long pfn)
631{
Xie XiuQicc637b12015-06-24 16:57:30 -0700632 return MF_IGNORED;
Andi Kleen6a460792009-09-16 11:50:15 +0200633}
634
635/*
636 * Page in unknown state. Do nothing.
637 */
638static int me_unknown(struct page *p, unsigned long pfn)
639{
Chen Yucong495367c02016-05-20 16:57:32 -0700640 pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
Xie XiuQicc637b12015-06-24 16:57:30 -0700641 return MF_FAILED;
Andi Kleen6a460792009-09-16 11:50:15 +0200642}
643
644/*
Andi Kleen6a460792009-09-16 11:50:15 +0200645 * Clean (or cleaned) page cache page.
646 */
647static int me_pagecache_clean(struct page *p, unsigned long pfn)
648{
Andi Kleen6a460792009-09-16 11:50:15 +0200649 struct address_space *mapping;
650
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100651 delete_from_lru_cache(p);
652
Andi Kleen6a460792009-09-16 11:50:15 +0200653 /*
654 * For anonymous pages we're done the only reference left
655 * should be the one m_f() holds.
656 */
657 if (PageAnon(p))
Xie XiuQicc637b12015-06-24 16:57:30 -0700658 return MF_RECOVERED;
Andi Kleen6a460792009-09-16 11:50:15 +0200659
660 /*
661 * Now truncate the page in the page cache. This is really
662 * more like a "temporary hole punch"
663 * Don't do this for block devices when someone else
664 * has a reference, because it could be file system metadata
665 * and that's not safe to truncate.
666 */
667 mapping = page_mapping(p);
668 if (!mapping) {
669 /*
670 * Page has been teared down in the meanwhile
671 */
Xie XiuQicc637b12015-06-24 16:57:30 -0700672 return MF_FAILED;
Andi Kleen6a460792009-09-16 11:50:15 +0200673 }
674
675 /*
676 * Truncation is a bit tricky. Enable it per file system for now.
677 *
678 * Open: to take i_mutex or not for this? Right now we don't.
679 */
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700680 return truncate_error_page(p, pfn, mapping);
Andi Kleen6a460792009-09-16 11:50:15 +0200681}
682
683/*
Zhi Yong Wu549543d2014-01-21 15:49:08 -0800684 * Dirty pagecache page
Andi Kleen6a460792009-09-16 11:50:15 +0200685 * Issues: when the error hit a hole page the error is not properly
686 * propagated.
687 */
688static int me_pagecache_dirty(struct page *p, unsigned long pfn)
689{
690 struct address_space *mapping = page_mapping(p);
691
692 SetPageError(p);
693 /* TBD: print more information about the file. */
694 if (mapping) {
695 /*
696 * IO error will be reported by write(), fsync(), etc.
697 * who check the mapping.
698 * This way the application knows that something went
699 * wrong with its dirty file data.
700 *
701 * There's one open issue:
702 *
703 * The EIO will be only reported on the next IO
704 * operation and then cleared through the IO map.
705 * Normally Linux has two mechanisms to pass IO error
706 * first through the AS_EIO flag in the address space
707 * and then through the PageError flag in the page.
708 * Since we drop pages on memory failure handling the
709 * only mechanism open to use is through AS_AIO.
710 *
711 * This has the disadvantage that it gets cleared on
712 * the first operation that returns an error, while
713 * the PageError bit is more sticky and only cleared
714 * when the page is reread or dropped. If an
715 * application assumes it will always get error on
716 * fsync, but does other operations on the fd before
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300717 * and the page is dropped between then the error
Andi Kleen6a460792009-09-16 11:50:15 +0200718 * will not be properly reported.
719 *
720 * This can already happen even without hwpoisoned
721 * pages: first on metadata IO errors (which only
722 * report through AS_EIO) or when the page is dropped
723 * at the wrong time.
724 *
725 * So right now we assume that the application DTRT on
726 * the first EIO, but we're not worse than other parts
727 * of the kernel.
728 */
Jeff Laytonaf21bfa2017-07-06 07:02:19 -0400729 mapping_set_error(mapping, -EIO);
Andi Kleen6a460792009-09-16 11:50:15 +0200730 }
731
732 return me_pagecache_clean(p, pfn);
733}
734
735/*
736 * Clean and dirty swap cache.
737 *
738 * Dirty swap cache page is tricky to handle. The page could live both in page
739 * cache and swap cache(ie. page is freshly swapped in). So it could be
740 * referenced concurrently by 2 types of PTEs:
741 * normal PTEs and swap PTEs. We try to handle them consistently by calling
742 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
743 * and then
744 * - clear dirty bit to prevent IO
745 * - remove from LRU
746 * - but keep in the swap cache, so that when we return to it on
747 * a later page fault, we know the application is accessing
748 * corrupted data and shall be killed (we installed simple
749 * interception code in do_swap_page to catch it).
750 *
751 * Clean swap cache pages can be directly isolated. A later page fault will
752 * bring in the known good data from disk.
753 */
754static int me_swapcache_dirty(struct page *p, unsigned long pfn)
755{
Andi Kleen6a460792009-09-16 11:50:15 +0200756 ClearPageDirty(p);
757 /* Trigger EIO in shmem: */
758 ClearPageUptodate(p);
759
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100760 if (!delete_from_lru_cache(p))
Xie XiuQicc637b12015-06-24 16:57:30 -0700761 return MF_DELAYED;
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100762 else
Xie XiuQicc637b12015-06-24 16:57:30 -0700763 return MF_FAILED;
Andi Kleen6a460792009-09-16 11:50:15 +0200764}
765
766static int me_swapcache_clean(struct page *p, unsigned long pfn)
767{
Andi Kleen6a460792009-09-16 11:50:15 +0200768 delete_from_swap_cache(p);
Wu Fengguange43c3af2009-09-29 13:16:20 +0800769
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100770 if (!delete_from_lru_cache(p))
Xie XiuQicc637b12015-06-24 16:57:30 -0700771 return MF_RECOVERED;
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100772 else
Xie XiuQicc637b12015-06-24 16:57:30 -0700773 return MF_FAILED;
Andi Kleen6a460792009-09-16 11:50:15 +0200774}
775
776/*
777 * Huge pages. Needs work.
778 * Issues:
Naoya Horiguchi93f70f92010-05-28 09:29:20 +0900779 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
780 * To narrow down kill region to one page, we need to break up pmd.
Andi Kleen6a460792009-09-16 11:50:15 +0200781 */
782static int me_huge_page(struct page *p, unsigned long pfn)
783{
Naoya Horiguchi6de2b1a2010-09-08 10:19:36 +0900784 int res = 0;
Naoya Horiguchi93f70f92010-05-28 09:29:20 +0900785 struct page *hpage = compound_head(p);
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700786 struct address_space *mapping;
Naoya Horiguchi2491ffe2015-06-24 16:56:53 -0700787
788 if (!PageHuge(hpage))
789 return MF_DELAYED;
790
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700791 mapping = page_mapping(hpage);
792 if (mapping) {
793 res = truncate_error_page(hpage, pfn, mapping);
794 } else {
795 unlock_page(hpage);
796 /*
797 * migration entry prevents later access on error anonymous
798 * hugepage, so we can free and dissolve it into buddy to
799 * save healthy subpages.
800 */
801 if (PageAnon(hpage))
802 put_page(hpage);
803 dissolve_free_huge_page(p);
804 res = MF_RECOVERED;
805 lock_page(hpage);
Naoya Horiguchi93f70f92010-05-28 09:29:20 +0900806 }
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700807
808 return res;
Andi Kleen6a460792009-09-16 11:50:15 +0200809}
810
811/*
812 * Various page states we can handle.
813 *
814 * A page state is defined by its current page->flags bits.
815 * The table matches them in order and calls the right handler.
816 *
817 * This is quite tricky because we can access page at any time
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300818 * in its live cycle, so all accesses have to be extremely careful.
Andi Kleen6a460792009-09-16 11:50:15 +0200819 *
820 * This is not complete. More states could be added.
821 * For any missing state don't attempt recovery.
822 */
823
824#define dirty (1UL << PG_dirty)
Nicholas Piggin6326fec2016-12-25 13:00:29 +1000825#define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
Andi Kleen6a460792009-09-16 11:50:15 +0200826#define unevict (1UL << PG_unevictable)
827#define mlock (1UL << PG_mlocked)
Andi Kleen6a460792009-09-16 11:50:15 +0200828#define lru (1UL << PG_lru)
Andi Kleen6a460792009-09-16 11:50:15 +0200829#define head (1UL << PG_head)
Andi Kleen6a460792009-09-16 11:50:15 +0200830#define slab (1UL << PG_slab)
Andi Kleen6a460792009-09-16 11:50:15 +0200831#define reserved (1UL << PG_reserved)
832
833static struct page_state {
834 unsigned long mask;
835 unsigned long res;
Xie XiuQicc637b12015-06-24 16:57:30 -0700836 enum mf_action_page_type type;
Andi Kleen6a460792009-09-16 11:50:15 +0200837 int (*action)(struct page *p, unsigned long pfn);
838} error_states[] = {
Xie XiuQicc637b12015-06-24 16:57:30 -0700839 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
Wu Fengguang95d01fc2009-12-16 12:19:58 +0100840 /*
841 * free pages are specially detected outside this table:
842 * PG_buddy pages only make a small fraction of all free pages.
843 */
Andi Kleen6a460792009-09-16 11:50:15 +0200844
845 /*
846 * Could in theory check if slab page is free or if we can drop
847 * currently unused objects without touching them. But just
848 * treat it as standard kernel for now.
849 */
Xie XiuQicc637b12015-06-24 16:57:30 -0700850 { slab, slab, MF_MSG_SLAB, me_kernel },
Andi Kleen6a460792009-09-16 11:50:15 +0200851
Xie XiuQicc637b12015-06-24 16:57:30 -0700852 { head, head, MF_MSG_HUGE, me_huge_page },
Andi Kleen6a460792009-09-16 11:50:15 +0200853
Xie XiuQicc637b12015-06-24 16:57:30 -0700854 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
855 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
Andi Kleen6a460792009-09-16 11:50:15 +0200856
Xie XiuQicc637b12015-06-24 16:57:30 -0700857 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
858 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
Andi Kleen6a460792009-09-16 11:50:15 +0200859
Xie XiuQicc637b12015-06-24 16:57:30 -0700860 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
861 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
Naoya Horiguchi5f4b9fc2013-02-22 16:35:53 -0800862
Xie XiuQicc637b12015-06-24 16:57:30 -0700863 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
864 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
Andi Kleen6a460792009-09-16 11:50:15 +0200865
866 /*
867 * Catchall entry: must be at end.
868 */
Xie XiuQicc637b12015-06-24 16:57:30 -0700869 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
Andi Kleen6a460792009-09-16 11:50:15 +0200870};
871
Andi Kleen2326c462009-12-16 12:20:00 +0100872#undef dirty
873#undef sc
874#undef unevict
875#undef mlock
Andi Kleen2326c462009-12-16 12:20:00 +0100876#undef lru
Andi Kleen2326c462009-12-16 12:20:00 +0100877#undef head
Andi Kleen2326c462009-12-16 12:20:00 +0100878#undef slab
879#undef reserved
880
Naoya Horiguchiff604cf2012-12-11 16:01:32 -0800881/*
882 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
883 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
884 */
Xie XiuQicc3e2af2015-06-24 16:57:33 -0700885static void action_result(unsigned long pfn, enum mf_action_page_type type,
886 enum mf_result result)
Andi Kleen6a460792009-09-16 11:50:15 +0200887{
Xie XiuQi97f0b132015-06-24 16:57:36 -0700888 trace_memory_failure_event(pfn, type, result);
889
Chen Yucong495367c02016-05-20 16:57:32 -0700890 pr_err("Memory failure: %#lx: recovery action for %s: %s\n",
Naoya Horiguchi64d37a22015-04-15 16:13:05 -0700891 pfn, action_page_types[type], action_name[result]);
Andi Kleen6a460792009-09-16 11:50:15 +0200892}
893
894static int page_action(struct page_state *ps, struct page *p,
Wu Fengguangbd1ce5f2009-12-16 12:19:57 +0100895 unsigned long pfn)
Andi Kleen6a460792009-09-16 11:50:15 +0200896{
897 int result;
Wu Fengguang7456b042009-10-19 08:15:01 +0200898 int count;
Andi Kleen6a460792009-09-16 11:50:15 +0200899
900 result = ps->action(p, pfn);
Wu Fengguang7456b042009-10-19 08:15:01 +0200901
Wu Fengguangbd1ce5f2009-12-16 12:19:57 +0100902 count = page_count(p) - 1;
Xie XiuQicc637b12015-06-24 16:57:30 -0700903 if (ps->action == me_swapcache_dirty && result == MF_DELAYED)
Wu Fengguang138ce282009-12-16 12:19:58 +0100904 count--;
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700905 if (count > 0) {
Chen Yucong495367c02016-05-20 16:57:32 -0700906 pr_err("Memory failure: %#lx: %s still referenced by %d users\n",
Naoya Horiguchi64d37a22015-04-15 16:13:05 -0700907 pfn, action_page_types[ps->type], count);
Xie XiuQicc637b12015-06-24 16:57:30 -0700908 result = MF_FAILED;
Wu Fengguang138ce282009-12-16 12:19:58 +0100909 }
Naoya Horiguchi64d37a22015-04-15 16:13:05 -0700910 action_result(pfn, ps->type, result);
Andi Kleen6a460792009-09-16 11:50:15 +0200911
912 /* Could do more checks here if page looks ok */
913 /*
914 * Could adjust zone counters here to correct for the missing page.
915 */
916
Xie XiuQicc637b12015-06-24 16:57:30 -0700917 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
Andi Kleen6a460792009-09-16 11:50:15 +0200918}
919
Naoya Horiguchiead07f62015-06-24 16:56:48 -0700920/**
921 * get_hwpoison_page() - Get refcount for memory error handling:
922 * @page: raw error page (hit by memory error)
923 *
924 * Return: return 0 if failed to grab the refcount, otherwise true (some
925 * non-zero value.)
926 */
Oscar Salvador7e27f222020-10-15 20:06:50 -0700927static int get_hwpoison_page(struct page *page)
Naoya Horiguchiead07f62015-06-24 16:56:48 -0700928{
929 struct page *head = compound_head(page);
930
Naoya Horiguchi4e41a302016-01-15 16:54:07 -0800931 if (!PageHuge(head) && PageTransHuge(head)) {
Naoya Horiguchi98ed2b02015-08-06 15:47:04 -0700932 /*
933 * Non anonymous thp exists only in allocation/free time. We
934 * can't handle such a case correctly, so let's give it up.
935 * This should be better than triggering BUG_ON when kernel
936 * tries to touch the "partially handled" page.
937 */
938 if (!PageAnon(head)) {
Chen Yucong495367c02016-05-20 16:57:32 -0700939 pr_err("Memory failure: %#lx: non anonymous thp\n",
Naoya Horiguchi98ed2b02015-08-06 15:47:04 -0700940 page_to_pfn(page));
941 return 0;
942 }
Naoya Horiguchiead07f62015-06-24 16:56:48 -0700943 }
944
Konstantin Khlebnikovc2e7e002016-04-28 16:19:03 -0700945 if (get_page_unless_zero(head)) {
946 if (head == compound_head(page))
947 return 1;
948
Chen Yucong495367c02016-05-20 16:57:32 -0700949 pr_info("Memory failure: %#lx cannot catch tail\n",
950 page_to_pfn(page));
Konstantin Khlebnikovc2e7e002016-04-28 16:19:03 -0700951 put_page(head);
952 }
953
954 return 0;
Naoya Horiguchiead07f62015-06-24 16:56:48 -0700955}
Naoya Horiguchiead07f62015-06-24 16:56:48 -0700956
Andi Kleen6a460792009-09-16 11:50:15 +0200957/*
958 * Do all that is necessary to remove user space mappings. Unmap
959 * the pages and send SIGBUS to the processes if the data was dirty.
960 */
Minchan Kim666e5a42017-05-03 14:54:20 -0700961static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
Eric W. Biederman83b57532017-07-09 18:14:01 -0500962 int flags, struct page **hpagep)
Andi Kleen6a460792009-09-16 11:50:15 +0200963{
Shaohua Lia128ca72017-05-03 14:52:22 -0700964 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
Andi Kleen6a460792009-09-16 11:50:15 +0200965 struct address_space *mapping;
966 LIST_HEAD(tokill);
Mike Kravetzc0d03812020-04-01 21:11:05 -0700967 bool unmap_success = true;
Tony Luck6751ed62012-07-11 10:20:47 -0700968 int kill = 1, forcekill;
Naoya Horiguchi54b9dd12014-01-23 15:53:14 -0800969 struct page *hpage = *hpagep;
Naoya Horiguchi286c4692017-05-03 14:56:22 -0700970 bool mlocked = PageMlocked(hpage);
Andi Kleen6a460792009-09-16 11:50:15 +0200971
Naoya Horiguchi93a9eb32014-07-30 16:08:28 -0700972 /*
973 * Here we are interested only in user-mapped pages, so skip any
974 * other types of pages.
975 */
976 if (PageReserved(p) || PageSlab(p))
Minchan Kim666e5a42017-05-03 14:54:20 -0700977 return true;
Naoya Horiguchi93a9eb32014-07-30 16:08:28 -0700978 if (!(PageLRU(hpage) || PageHuge(p)))
Minchan Kim666e5a42017-05-03 14:54:20 -0700979 return true;
Andi Kleen6a460792009-09-16 11:50:15 +0200980
Andi Kleen6a460792009-09-16 11:50:15 +0200981 /*
982 * This check implies we don't kill processes if their pages
983 * are in the swap cache early. Those are always late kills.
984 */
Naoya Horiguchi7af446a2010-05-28 09:29:17 +0900985 if (!page_mapped(hpage))
Minchan Kim666e5a42017-05-03 14:54:20 -0700986 return true;
Wu Fengguang1668bfd2009-12-16 12:19:58 +0100987
Naoya Horiguchi52089b12014-07-30 16:08:30 -0700988 if (PageKsm(p)) {
Chen Yucong495367c02016-05-20 16:57:32 -0700989 pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
Minchan Kim666e5a42017-05-03 14:54:20 -0700990 return false;
Naoya Horiguchi52089b12014-07-30 16:08:30 -0700991 }
Andi Kleen6a460792009-09-16 11:50:15 +0200992
993 if (PageSwapCache(p)) {
Chen Yucong495367c02016-05-20 16:57:32 -0700994 pr_err("Memory failure: %#lx: keeping poisoned page in swap cache\n",
995 pfn);
Andi Kleen6a460792009-09-16 11:50:15 +0200996 ttu |= TTU_IGNORE_HWPOISON;
997 }
998
999 /*
1000 * Propagate the dirty bit from PTEs to struct page first, because we
1001 * need this to decide if we should kill or just drop the page.
Wu Fengguangdb0480b2009-12-16 12:19:58 +01001002 * XXX: the dirty test could be racy: set_page_dirty() may not always
1003 * be called inside page lock (it's recommended but not enforced).
Andi Kleen6a460792009-09-16 11:50:15 +02001004 */
Naoya Horiguchi7af446a2010-05-28 09:29:17 +09001005 mapping = page_mapping(hpage);
Tony Luck6751ed62012-07-11 10:20:47 -07001006 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
Christoph Hellwigf56753a2020-09-24 08:51:40 +02001007 mapping_can_writeback(mapping)) {
Naoya Horiguchi7af446a2010-05-28 09:29:17 +09001008 if (page_mkclean(hpage)) {
1009 SetPageDirty(hpage);
Andi Kleen6a460792009-09-16 11:50:15 +02001010 } else {
1011 kill = 0;
1012 ttu |= TTU_IGNORE_HWPOISON;
Chen Yucong495367c02016-05-20 16:57:32 -07001013 pr_info("Memory failure: %#lx: corrupted page was clean: dropped without side effects\n",
Andi Kleen6a460792009-09-16 11:50:15 +02001014 pfn);
1015 }
1016 }
1017
Jin Dongminga6d30dd2011-02-01 15:52:40 -08001018 /*
Andi Kleen6a460792009-09-16 11:50:15 +02001019 * First collect all the processes that have the page
1020 * mapped in dirty form. This has to be done before try_to_unmap,
1021 * because ttu takes the rmap data structures down.
1022 *
1023 * Error handling: We ignore errors here because
1024 * there's nothing that can be done.
1025 */
1026 if (kill)
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001027 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
Andi Kleen6a460792009-09-16 11:50:15 +02001028
Mike Kravetzc0d03812020-04-01 21:11:05 -07001029 if (!PageHuge(hpage)) {
1030 unmap_success = try_to_unmap(hpage, ttu);
1031 } else {
1032 /*
1033 * For hugetlb pages, try_to_unmap could potentially call
1034 * huge_pmd_unshare. Because of this, take semaphore in
1035 * write mode here and set TTU_RMAP_LOCKED to indicate we
1036 * have taken the lock at this higer level.
1037 *
1038 * Note that the call to hugetlb_page_mapping_lock_write
1039 * is necessary even if mapping is already set. It handles
1040 * ugliness of potentially having to drop page lock to obtain
1041 * i_mmap_rwsem.
1042 */
1043 mapping = hugetlb_page_mapping_lock_write(hpage);
1044
1045 if (mapping) {
1046 unmap_success = try_to_unmap(hpage,
1047 ttu|TTU_RMAP_LOCKED);
1048 i_mmap_unlock_write(mapping);
1049 } else {
1050 pr_info("Memory failure: %#lx: could not find mapping for mapped huge page\n",
1051 pfn);
1052 unmap_success = false;
1053 }
1054 }
Minchan Kim666e5a42017-05-03 14:54:20 -07001055 if (!unmap_success)
Chen Yucong495367c02016-05-20 16:57:32 -07001056 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
Joe Perches11705322016-03-17 14:19:50 -07001057 pfn, page_mapcount(hpage));
Jin Dongminga6d30dd2011-02-01 15:52:40 -08001058
Andi Kleen6a460792009-09-16 11:50:15 +02001059 /*
Naoya Horiguchi286c4692017-05-03 14:56:22 -07001060 * try_to_unmap() might put mlocked page in lru cache, so call
1061 * shake_page() again to ensure that it's flushed.
1062 */
1063 if (mlocked)
1064 shake_page(hpage, 0);
1065
1066 /*
Andi Kleen6a460792009-09-16 11:50:15 +02001067 * Now that the dirty bit has been propagated to the
1068 * struct page and all unmaps done we can decide if
1069 * killing is needed or not. Only kill when the page
Tony Luck6751ed62012-07-11 10:20:47 -07001070 * was dirty or the process is not restartable,
1071 * otherwise the tokill list is merely
Andi Kleen6a460792009-09-16 11:50:15 +02001072 * freed. When there was a problem unmapping earlier
1073 * use a more force-full uncatchable kill to prevent
1074 * any accesses to the poisoned memory.
1075 */
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001076 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
Dan Williamsae1139e2018-07-13 21:50:11 -07001077 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
Wu Fengguang1668bfd2009-12-16 12:19:58 +01001078
Minchan Kim666e5a42017-05-03 14:54:20 -07001079 return unmap_success;
Andi Kleen6a460792009-09-16 11:50:15 +02001080}
1081
Naoya Horiguchi0348d2e2017-07-10 15:47:56 -07001082static int identify_page_state(unsigned long pfn, struct page *p,
1083 unsigned long page_flags)
Naoya Horiguchi761ad8d2017-07-10 15:47:47 -07001084{
1085 struct page_state *ps;
Naoya Horiguchi0348d2e2017-07-10 15:47:56 -07001086
1087 /*
1088 * The first check uses the current page flags which may not have any
1089 * relevant information. The second check with the saved page flags is
1090 * carried out only if the first check can't determine the page status.
1091 */
1092 for (ps = error_states;; ps++)
1093 if ((p->flags & ps->mask) == ps->res)
1094 break;
1095
1096 page_flags |= (p->flags & (1UL << PG_dirty));
1097
1098 if (!ps->mask)
1099 for (ps = error_states;; ps++)
1100 if ((page_flags & ps->mask) == ps->res)
1101 break;
1102 return page_action(ps, p, pfn);
1103}
1104
Eric W. Biederman83b57532017-07-09 18:14:01 -05001105static int memory_failure_hugetlb(unsigned long pfn, int flags)
Naoya Horiguchi0348d2e2017-07-10 15:47:56 -07001106{
Naoya Horiguchi761ad8d2017-07-10 15:47:47 -07001107 struct page *p = pfn_to_page(pfn);
1108 struct page *head = compound_head(p);
1109 int res;
1110 unsigned long page_flags;
1111
1112 if (TestSetPageHWPoison(head)) {
1113 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1114 pfn);
1115 return 0;
1116 }
1117
1118 num_poisoned_pages_inc();
1119
1120 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
1121 /*
1122 * Check "filter hit" and "race with other subpage."
1123 */
1124 lock_page(head);
1125 if (PageHWPoison(head)) {
1126 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1127 || (p != head && TestSetPageHWPoison(head))) {
1128 num_poisoned_pages_dec();
1129 unlock_page(head);
1130 return 0;
1131 }
1132 }
1133 unlock_page(head);
1134 dissolve_free_huge_page(p);
1135 action_result(pfn, MF_MSG_FREE_HUGE, MF_DELAYED);
1136 return 0;
1137 }
1138
1139 lock_page(head);
1140 page_flags = head->flags;
1141
1142 if (!PageHWPoison(head)) {
1143 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1144 num_poisoned_pages_dec();
1145 unlock_page(head);
Oscar Salvadordd6e2402020-10-15 20:06:57 -07001146 put_page(head);
Naoya Horiguchi761ad8d2017-07-10 15:47:47 -07001147 return 0;
1148 }
1149
Naoya Horiguchi31286a82018-04-05 16:23:05 -07001150 /*
1151 * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so
1152 * simply disable it. In order to make it work properly, we need
1153 * make sure that:
1154 * - conversion of a pud that maps an error hugetlb into hwpoison
1155 * entry properly works, and
1156 * - other mm code walking over page table is aware of pud-aligned
1157 * hwpoison entries.
1158 */
1159 if (huge_page_size(page_hstate(head)) > PMD_SIZE) {
1160 action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED);
1161 res = -EBUSY;
1162 goto out;
1163 }
1164
Eric W. Biederman83b57532017-07-09 18:14:01 -05001165 if (!hwpoison_user_mappings(p, pfn, flags, &head)) {
Naoya Horiguchi761ad8d2017-07-10 15:47:47 -07001166 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1167 res = -EBUSY;
1168 goto out;
1169 }
1170
Naoya Horiguchi0348d2e2017-07-10 15:47:56 -07001171 res = identify_page_state(pfn, p, page_flags);
Naoya Horiguchi761ad8d2017-07-10 15:47:47 -07001172out:
1173 unlock_page(head);
1174 return res;
1175}
1176
Dan Williams6100e342018-07-13 21:50:21 -07001177static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
1178 struct dev_pagemap *pgmap)
1179{
1180 struct page *page = pfn_to_page(pfn);
1181 const bool unmap_success = true;
1182 unsigned long size = 0;
1183 struct to_kill *tk;
1184 LIST_HEAD(tokill);
1185 int rc = -EBUSY;
1186 loff_t start;
Matthew Wilcox27359fd2018-11-30 11:05:06 -05001187 dax_entry_t cookie;
Dan Williams6100e342018-07-13 21:50:21 -07001188
1189 /*
1190 * Prevent the inode from being freed while we are interrogating
1191 * the address_space, typically this would be handled by
1192 * lock_page(), but dax pages do not use the page lock. This
1193 * also prevents changes to the mapping of this pfn until
1194 * poison signaling is complete.
1195 */
Matthew Wilcox27359fd2018-11-30 11:05:06 -05001196 cookie = dax_lock_page(page);
1197 if (!cookie)
Dan Williams6100e342018-07-13 21:50:21 -07001198 goto out;
1199
1200 if (hwpoison_filter(page)) {
1201 rc = 0;
1202 goto unlock;
1203 }
1204
Christoph Hellwig25b29952019-06-13 22:50:49 +02001205 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
Dan Williams6100e342018-07-13 21:50:21 -07001206 /*
1207 * TODO: Handle HMM pages which may need coordination
1208 * with device-side memory.
1209 */
1210 goto unlock;
Dan Williams6100e342018-07-13 21:50:21 -07001211 }
1212
1213 /*
1214 * Use this flag as an indication that the dax page has been
1215 * remapped UC to prevent speculative consumption of poison.
1216 */
1217 SetPageHWPoison(page);
1218
1219 /*
1220 * Unlike System-RAM there is no possibility to swap in a
1221 * different physical page at a given virtual address, so all
1222 * userspace consumption of ZONE_DEVICE memory necessitates
1223 * SIGBUS (i.e. MF_MUST_KILL)
1224 */
1225 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1226 collect_procs(page, &tokill, flags & MF_ACTION_REQUIRED);
1227
1228 list_for_each_entry(tk, &tokill, nd)
1229 if (tk->size_shift)
1230 size = max(size, 1UL << tk->size_shift);
1231 if (size) {
1232 /*
1233 * Unmap the largest mapping to avoid breaking up
1234 * device-dax mappings which are constant size. The
1235 * actual size of the mapping being torn down is
1236 * communicated in siginfo, see kill_proc()
1237 */
1238 start = (page->index << PAGE_SHIFT) & ~(size - 1);
1239 unmap_mapping_range(page->mapping, start, start + size, 0);
1240 }
1241 kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
1242 rc = 0;
1243unlock:
Matthew Wilcox27359fd2018-11-30 11:05:06 -05001244 dax_unlock_page(page, cookie);
Dan Williams6100e342018-07-13 21:50:21 -07001245out:
1246 /* drop pgmap ref acquired in caller */
1247 put_dev_pagemap(pgmap);
1248 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
1249 return rc;
1250}
1251
Tony Luckcd42f4a2011-12-15 10:48:12 -08001252/**
1253 * memory_failure - Handle memory failure of a page.
1254 * @pfn: Page Number of the corrupted page
Tony Luckcd42f4a2011-12-15 10:48:12 -08001255 * @flags: fine tune action taken
1256 *
1257 * This function is called by the low level machine check code
1258 * of an architecture when it detects hardware memory corruption
1259 * of a page. It tries its best to recover, which includes
1260 * dropping pages, killing processes etc.
1261 *
1262 * The function is primarily of use for corruptions that
1263 * happen outside the current execution context (e.g. when
1264 * detected by a background scrubber)
1265 *
1266 * Must run in process context (e.g. a work queue) with interrupts
1267 * enabled and no spinlocks hold.
1268 */
Eric W. Biederman83b57532017-07-09 18:14:01 -05001269int memory_failure(unsigned long pfn, int flags)
Andi Kleen6a460792009-09-16 11:50:15 +02001270{
Andi Kleen6a460792009-09-16 11:50:15 +02001271 struct page *p;
Naoya Horiguchi7af446a2010-05-28 09:29:17 +09001272 struct page *hpage;
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001273 struct page *orig_head;
Dan Williams6100e342018-07-13 21:50:21 -07001274 struct dev_pagemap *pgmap;
Andi Kleen6a460792009-09-16 11:50:15 +02001275 int res;
Naoya Horiguchi524fca12013-02-22 16:35:51 -08001276 unsigned long page_flags;
Andi Kleen6a460792009-09-16 11:50:15 +02001277
1278 if (!sysctl_memory_failure_recovery)
Eric W. Biederman83b57532017-07-09 18:14:01 -05001279 panic("Memory failure on page %lx", pfn);
Andi Kleen6a460792009-09-16 11:50:15 +02001280
David Hildenbrand96c804a2019-10-18 20:19:23 -07001281 p = pfn_to_online_page(pfn);
1282 if (!p) {
1283 if (pfn_valid(pfn)) {
1284 pgmap = get_dev_pagemap(pfn, NULL);
1285 if (pgmap)
1286 return memory_failure_dev_pagemap(pfn, flags,
1287 pgmap);
1288 }
Chen Yucong495367c02016-05-20 16:57:32 -07001289 pr_err("Memory failure: %#lx: memory outside kernel control\n",
1290 pfn);
Wu Fengguanga7560fc2009-12-16 12:19:57 +01001291 return -ENXIO;
Andi Kleen6a460792009-09-16 11:50:15 +02001292 }
1293
Naoya Horiguchi761ad8d2017-07-10 15:47:47 -07001294 if (PageHuge(p))
Eric W. Biederman83b57532017-07-09 18:14:01 -05001295 return memory_failure_hugetlb(pfn, flags);
Andi Kleen6a460792009-09-16 11:50:15 +02001296 if (TestSetPageHWPoison(p)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001297 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1298 pfn);
Andi Kleen6a460792009-09-16 11:50:15 +02001299 return 0;
1300 }
1301
Naoya Horiguchi761ad8d2017-07-10 15:47:47 -07001302 orig_head = hpage = compound_head(p);
Naoya Horiguchib37ff712017-07-10 15:47:38 -07001303 num_poisoned_pages_inc();
Andi Kleen6a460792009-09-16 11:50:15 +02001304
1305 /*
1306 * We need/can do nothing about count=0 pages.
1307 * 1) it's a free page, and therefore in safe hand:
1308 * prep_new_page() will be the gate keeper.
Naoya Horiguchi761ad8d2017-07-10 15:47:47 -07001309 * 2) it's part of a non-compound high order page.
Andi Kleen6a460792009-09-16 11:50:15 +02001310 * Implies some kernel user: cannot stop them from
1311 * R/W the page; let's pray that the page has been
1312 * used and will be freed some time later.
1313 * In fact it's dangerous to directly bump up page count from 0,
Jiang Biao1c4c3b92018-08-21 21:53:13 -07001314 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
Andi Kleen6a460792009-09-16 11:50:15 +02001315 */
Naoya Horiguchiead07f62015-06-24 16:56:48 -07001316 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
Wu Fengguang8d22ba12009-12-16 12:19:58 +01001317 if (is_free_buddy_page(p)) {
Xie XiuQicc637b12015-06-24 16:57:30 -07001318 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
Wu Fengguang8d22ba12009-12-16 12:19:58 +01001319 return 0;
1320 } else {
Xie XiuQicc637b12015-06-24 16:57:30 -07001321 action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
Wu Fengguang8d22ba12009-12-16 12:19:58 +01001322 return -EBUSY;
1323 }
Andi Kleen6a460792009-09-16 11:50:15 +02001324 }
1325
Naoya Horiguchi761ad8d2017-07-10 15:47:47 -07001326 if (PageTransHuge(hpage)) {
Naoya Horiguchic3901e72016-11-10 10:46:23 -08001327 lock_page(p);
1328 if (!PageAnon(p) || unlikely(split_huge_page(p))) {
1329 unlock_page(p);
1330 if (!PageAnon(p))
Chen Yucong495367c02016-05-20 16:57:32 -07001331 pr_err("Memory failure: %#lx: non anonymous thp\n",
1332 pfn);
Wanpeng Li7f6bf392015-08-14 15:35:08 -07001333 else
Chen Yucong495367c02016-05-20 16:57:32 -07001334 pr_err("Memory failure: %#lx: thp split failed\n",
1335 pfn);
Naoya Horiguchiead07f62015-06-24 16:56:48 -07001336 if (TestClearPageHWPoison(p))
Naoya Horiguchib37ff712017-07-10 15:47:38 -07001337 num_poisoned_pages_dec();
Oscar Salvadordd6e2402020-10-15 20:06:57 -07001338 put_page(p);
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001339 return -EBUSY;
1340 }
Naoya Horiguchic3901e72016-11-10 10:46:23 -08001341 unlock_page(p);
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001342 VM_BUG_ON_PAGE(!page_count(p), p);
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001343 }
1344
Andi Kleen6a460792009-09-16 11:50:15 +02001345 /*
Wu Fengguange43c3af2009-09-29 13:16:20 +08001346 * We ignore non-LRU pages for good reasons.
1347 * - PG_locked is only well defined for LRU pages and a few others
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08001348 * - to avoid races with __SetPageLocked()
Wu Fengguange43c3af2009-09-29 13:16:20 +08001349 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
1350 * The check (unnecessarily) ignores LRU pages being isolated and
1351 * walked by the page reclaim code, however that's not a big loss.
1352 */
Naoya Horiguchi8bcb74d2017-05-03 14:56:19 -07001353 shake_page(p, 0);
1354 /* shake_page could have turned it free. */
1355 if (!PageLRU(p) && is_free_buddy_page(p)) {
1356 if (flags & MF_COUNT_INCREASED)
1357 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
1358 else
1359 action_result(pfn, MF_MSG_BUDDY_2ND, MF_DELAYED);
1360 return 0;
Wu Fengguange43c3af2009-09-29 13:16:20 +08001361 }
Wu Fengguange43c3af2009-09-29 13:16:20 +08001362
Naoya Horiguchi761ad8d2017-07-10 15:47:47 -07001363 lock_page(p);
Wu Fengguang847ce402009-12-16 12:19:58 +01001364
1365 /*
Andi Kleenf37d4292014-08-06 16:06:49 -07001366 * The page could have changed compound pages during the locking.
1367 * If this happens just bail out.
1368 */
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001369 if (PageCompound(p) && compound_head(p) != orig_head) {
Xie XiuQicc637b12015-06-24 16:57:30 -07001370 action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
Andi Kleenf37d4292014-08-06 16:06:49 -07001371 res = -EBUSY;
1372 goto out;
1373 }
1374
1375 /*
Naoya Horiguchi524fca12013-02-22 16:35:51 -08001376 * We use page flags to determine what action should be taken, but
1377 * the flags can be modified by the error containment action. One
1378 * example is an mlocked page, where PG_mlocked is cleared by
1379 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
1380 * correctly, we save a copy of the page flags at this time.
1381 */
Naoya Horiguchi7d9d46a2020-10-15 20:06:38 -07001382 page_flags = p->flags;
Naoya Horiguchi524fca12013-02-22 16:35:51 -08001383
1384 /*
Wu Fengguang847ce402009-12-16 12:19:58 +01001385 * unpoison always clear PG_hwpoison inside page lock
1386 */
1387 if (!PageHWPoison(p)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001388 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
Naoya Horiguchib37ff712017-07-10 15:47:38 -07001389 num_poisoned_pages_dec();
Naoya Horiguchi761ad8d2017-07-10 15:47:47 -07001390 unlock_page(p);
Oscar Salvadordd6e2402020-10-15 20:06:57 -07001391 put_page(p);
Naoya Horiguchia09233f2015-08-06 15:46:58 -07001392 return 0;
Wu Fengguang847ce402009-12-16 12:19:58 +01001393 }
Wu Fengguang7c116f22009-12-16 12:19:59 +01001394 if (hwpoison_filter(p)) {
1395 if (TestClearPageHWPoison(p))
Naoya Horiguchib37ff712017-07-10 15:47:38 -07001396 num_poisoned_pages_dec();
Naoya Horiguchi761ad8d2017-07-10 15:47:47 -07001397 unlock_page(p);
Oscar Salvadordd6e2402020-10-15 20:06:57 -07001398 put_page(p);
Wu Fengguang7c116f22009-12-16 12:19:59 +01001399 return 0;
1400 }
Wu Fengguang847ce402009-12-16 12:19:58 +01001401
Naoya Horiguchi761ad8d2017-07-10 15:47:47 -07001402 if (!PageTransTail(p) && !PageLRU(p))
Chen Yucong0bc1f8b2014-07-02 15:22:37 -07001403 goto identify_page_state;
1404
Naoya Horiguchi7013feb2010-05-28 09:29:18 +09001405 /*
Naoya Horiguchi6edd6cc2014-06-04 16:10:35 -07001406 * It's very difficult to mess with pages currently under IO
1407 * and in many cases impossible, so we just avoid it here.
1408 */
Andi Kleen6a460792009-09-16 11:50:15 +02001409 wait_on_page_writeback(p);
1410
1411 /*
1412 * Now take care of user space mappings.
Minchan Kime64a7822011-03-22 16:32:44 -07001413 * Abort on fail: __delete_from_page_cache() assumes unmapped page.
Andi Kleen6a460792009-09-16 11:50:15 +02001414 */
Naoya Horiguchi1b473bec2020-10-15 20:06:42 -07001415 if (!hwpoison_user_mappings(p, pfn, flags, &p)) {
Xie XiuQicc637b12015-06-24 16:57:30 -07001416 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
Wu Fengguang1668bfd2009-12-16 12:19:58 +01001417 res = -EBUSY;
1418 goto out;
1419 }
Andi Kleen6a460792009-09-16 11:50:15 +02001420
1421 /*
1422 * Torn down by someone else?
1423 */
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +01001424 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
Xie XiuQicc637b12015-06-24 16:57:30 -07001425 action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
Wu Fengguangd95ea512009-12-16 12:19:58 +01001426 res = -EBUSY;
Andi Kleen6a460792009-09-16 11:50:15 +02001427 goto out;
1428 }
1429
Chen Yucong0bc1f8b2014-07-02 15:22:37 -07001430identify_page_state:
Naoya Horiguchi0348d2e2017-07-10 15:47:56 -07001431 res = identify_page_state(pfn, p, page_flags);
Andi Kleen6a460792009-09-16 11:50:15 +02001432out:
Naoya Horiguchi761ad8d2017-07-10 15:47:47 -07001433 unlock_page(p);
Andi Kleen6a460792009-09-16 11:50:15 +02001434 return res;
1435}
Tony Luckcd42f4a2011-12-15 10:48:12 -08001436EXPORT_SYMBOL_GPL(memory_failure);
Wu Fengguang847ce402009-12-16 12:19:58 +01001437
Huang Yingea8f5fb2011-07-13 13:14:27 +08001438#define MEMORY_FAILURE_FIFO_ORDER 4
1439#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
1440
1441struct memory_failure_entry {
1442 unsigned long pfn;
Huang Yingea8f5fb2011-07-13 13:14:27 +08001443 int flags;
1444};
1445
1446struct memory_failure_cpu {
1447 DECLARE_KFIFO(fifo, struct memory_failure_entry,
1448 MEMORY_FAILURE_FIFO_SIZE);
1449 spinlock_t lock;
1450 struct work_struct work;
1451};
1452
1453static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
1454
1455/**
1456 * memory_failure_queue - Schedule handling memory failure of a page.
1457 * @pfn: Page Number of the corrupted page
Huang Yingea8f5fb2011-07-13 13:14:27 +08001458 * @flags: Flags for memory failure handling
1459 *
1460 * This function is called by the low level hardware error handler
1461 * when it detects hardware memory corruption of a page. It schedules
1462 * the recovering of error page, including dropping pages, killing
1463 * processes etc.
1464 *
1465 * The function is primarily of use for corruptions that
1466 * happen outside the current execution context (e.g. when
1467 * detected by a background scrubber)
1468 *
1469 * Can run in IRQ context.
1470 */
Eric W. Biederman83b57532017-07-09 18:14:01 -05001471void memory_failure_queue(unsigned long pfn, int flags)
Huang Yingea8f5fb2011-07-13 13:14:27 +08001472{
1473 struct memory_failure_cpu *mf_cpu;
1474 unsigned long proc_flags;
1475 struct memory_failure_entry entry = {
1476 .pfn = pfn,
Huang Yingea8f5fb2011-07-13 13:14:27 +08001477 .flags = flags,
1478 };
1479
1480 mf_cpu = &get_cpu_var(memory_failure_cpu);
1481 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
Stefani Seibold498d3192013-11-14 14:32:17 -08001482 if (kfifo_put(&mf_cpu->fifo, entry))
Huang Yingea8f5fb2011-07-13 13:14:27 +08001483 schedule_work_on(smp_processor_id(), &mf_cpu->work);
1484 else
Joe Perches8e33a522013-07-25 11:53:25 -07001485 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
Huang Yingea8f5fb2011-07-13 13:14:27 +08001486 pfn);
1487 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1488 put_cpu_var(memory_failure_cpu);
1489}
1490EXPORT_SYMBOL_GPL(memory_failure_queue);
1491
1492static void memory_failure_work_func(struct work_struct *work)
1493{
1494 struct memory_failure_cpu *mf_cpu;
1495 struct memory_failure_entry entry = { 0, };
1496 unsigned long proc_flags;
1497 int gotten;
1498
James Morse06202232020-05-01 17:45:41 +01001499 mf_cpu = container_of(work, struct memory_failure_cpu, work);
Huang Yingea8f5fb2011-07-13 13:14:27 +08001500 for (;;) {
1501 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1502 gotten = kfifo_get(&mf_cpu->fifo, &entry);
1503 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1504 if (!gotten)
1505 break;
Naveen N. Raocf870c72013-07-10 14:57:01 +05301506 if (entry.flags & MF_SOFT_OFFLINE)
Naoya Horiguchifeec24a2019-11-30 17:53:38 -08001507 soft_offline_page(entry.pfn, entry.flags);
Naveen N. Raocf870c72013-07-10 14:57:01 +05301508 else
Eric W. Biederman83b57532017-07-09 18:14:01 -05001509 memory_failure(entry.pfn, entry.flags);
Huang Yingea8f5fb2011-07-13 13:14:27 +08001510 }
1511}
1512
James Morse06202232020-05-01 17:45:41 +01001513/*
1514 * Process memory_failure work queued on the specified CPU.
1515 * Used to avoid return-to-userspace racing with the memory_failure workqueue.
1516 */
1517void memory_failure_queue_kick(int cpu)
1518{
1519 struct memory_failure_cpu *mf_cpu;
1520
1521 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1522 cancel_work_sync(&mf_cpu->work);
1523 memory_failure_work_func(&mf_cpu->work);
1524}
1525
Huang Yingea8f5fb2011-07-13 13:14:27 +08001526static int __init memory_failure_init(void)
1527{
1528 struct memory_failure_cpu *mf_cpu;
1529 int cpu;
1530
1531 for_each_possible_cpu(cpu) {
1532 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1533 spin_lock_init(&mf_cpu->lock);
1534 INIT_KFIFO(mf_cpu->fifo);
1535 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
1536 }
1537
1538 return 0;
1539}
1540core_initcall(memory_failure_init);
1541
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001542#define unpoison_pr_info(fmt, pfn, rs) \
1543({ \
1544 if (__ratelimit(rs)) \
1545 pr_info(fmt, pfn); \
1546})
1547
Wu Fengguang847ce402009-12-16 12:19:58 +01001548/**
1549 * unpoison_memory - Unpoison a previously poisoned page
1550 * @pfn: Page number of the to be unpoisoned page
1551 *
1552 * Software-unpoison a page that has been poisoned by
1553 * memory_failure() earlier.
1554 *
1555 * This is only done on the software-level, so it only works
1556 * for linux injected failures, not real hardware failures
1557 *
1558 * Returns 0 for success, otherwise -errno.
1559 */
1560int unpoison_memory(unsigned long pfn)
1561{
1562 struct page *page;
1563 struct page *p;
1564 int freeit = 0;
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001565 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
1566 DEFAULT_RATELIMIT_BURST);
Wu Fengguang847ce402009-12-16 12:19:58 +01001567
1568 if (!pfn_valid(pfn))
1569 return -ENXIO;
1570
1571 p = pfn_to_page(pfn);
1572 page = compound_head(p);
1573
1574 if (!PageHWPoison(p)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001575 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001576 pfn, &unpoison_rs);
Wu Fengguang847ce402009-12-16 12:19:58 +01001577 return 0;
1578 }
1579
Naoya Horiguchi230ac712015-09-08 15:03:29 -07001580 if (page_count(page) > 1) {
Chen Yucong495367c02016-05-20 16:57:32 -07001581 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001582 pfn, &unpoison_rs);
Naoya Horiguchi230ac712015-09-08 15:03:29 -07001583 return 0;
1584 }
1585
1586 if (page_mapped(page)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001587 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001588 pfn, &unpoison_rs);
Naoya Horiguchi230ac712015-09-08 15:03:29 -07001589 return 0;
1590 }
1591
1592 if (page_mapping(page)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001593 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001594 pfn, &unpoison_rs);
Naoya Horiguchi230ac712015-09-08 15:03:29 -07001595 return 0;
1596 }
1597
Wanpeng Li0cea3fd2013-09-11 14:22:53 -07001598 /*
1599 * unpoison_memory() can encounter thp only when the thp is being
1600 * worked by memory_failure() and the page lock is not held yet.
1601 * In such case, we yield to memory_failure() and make unpoison fail.
1602 */
Wanpeng Lie76d30e2013-09-30 13:45:22 -07001603 if (!PageHuge(page) && PageTransHuge(page)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001604 unpoison_pr_info("Unpoison: Memory failure is now running on %#lx\n",
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001605 pfn, &unpoison_rs);
Naoya Horiguchiead07f62015-06-24 16:56:48 -07001606 return 0;
Wanpeng Li0cea3fd2013-09-11 14:22:53 -07001607 }
1608
Naoya Horiguchiead07f62015-06-24 16:56:48 -07001609 if (!get_hwpoison_page(p)) {
Wu Fengguang847ce402009-12-16 12:19:58 +01001610 if (TestClearPageHWPoison(p))
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001611 num_poisoned_pages_dec();
Chen Yucong495367c02016-05-20 16:57:32 -07001612 unpoison_pr_info("Unpoison: Software-unpoisoned free page %#lx\n",
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001613 pfn, &unpoison_rs);
Wu Fengguang847ce402009-12-16 12:19:58 +01001614 return 0;
1615 }
1616
Jens Axboe7eaceac2011-03-10 08:52:07 +01001617 lock_page(page);
Wu Fengguang847ce402009-12-16 12:19:58 +01001618 /*
1619 * This test is racy because PG_hwpoison is set outside of page lock.
1620 * That's acceptable because that won't trigger kernel panic. Instead,
1621 * the PG_hwpoison page will be caught and isolated on the entrance to
1622 * the free buddy page pool.
1623 */
Naoya Horiguchic9fbdd52010-05-28 09:29:19 +09001624 if (TestClearPageHWPoison(page)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001625 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001626 pfn, &unpoison_rs);
Naoya Horiguchib37ff712017-07-10 15:47:38 -07001627 num_poisoned_pages_dec();
Wu Fengguang847ce402009-12-16 12:19:58 +01001628 freeit = 1;
1629 }
1630 unlock_page(page);
1631
Oscar Salvadordd6e2402020-10-15 20:06:57 -07001632 put_page(page);
Wanpeng Li3ba5eeb2013-09-11 14:23:01 -07001633 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
Oscar Salvadordd6e2402020-10-15 20:06:57 -07001634 put_page(page);
Wu Fengguang847ce402009-12-16 12:19:58 +01001635
1636 return 0;
1637}
1638EXPORT_SYMBOL(unpoison_memory);
Andi Kleenfacb6012009-12-16 12:20:00 +01001639
Michal Hocko666feb22018-04-10 16:30:03 -07001640static struct page *new_page(struct page *p, unsigned long private)
Andi Kleenfacb6012009-12-16 12:20:00 +01001641{
Joonsoo Kim19fc7be2020-08-11 18:37:25 -07001642 struct migration_target_control mtc = {
1643 .nid = page_to_nid(p),
1644 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
1645 };
Anshuman Khandual94310cb2017-07-06 15:38:38 -07001646
Joonsoo Kim19fc7be2020-08-11 18:37:25 -07001647 return alloc_migration_target(p, (unsigned long)&mtc);
Andi Kleenfacb6012009-12-16 12:20:00 +01001648}
1649
1650/*
1651 * Safely get reference count of an arbitrary page.
1652 * Returns 0 for a free page, -EIO for a zero refcount page
1653 * that is not free, and 1 for any other page type.
1654 * For 1 the page is returned with increased page count, otherwise not.
1655 */
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001656static int __get_any_page(struct page *p, unsigned long pfn, int flags)
Andi Kleenfacb6012009-12-16 12:20:00 +01001657{
1658 int ret;
1659
1660 if (flags & MF_COUNT_INCREASED)
1661 return 1;
1662
1663 /*
Naoya Horiguchid950b952010-09-08 10:19:39 +09001664 * When the target page is a free hugepage, just remove it
1665 * from free hugepage list.
1666 */
Naoya Horiguchiead07f62015-06-24 16:56:48 -07001667 if (!get_hwpoison_page(p)) {
Naoya Horiguchid950b952010-09-08 10:19:39 +09001668 if (PageHuge(p)) {
Borislav Petkov71dd0b82012-05-29 15:06:16 -07001669 pr_info("%s: %#lx free huge page\n", __func__, pfn);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001670 ret = 0;
Naoya Horiguchid950b952010-09-08 10:19:39 +09001671 } else if (is_free_buddy_page(p)) {
Borislav Petkov71dd0b82012-05-29 15:06:16 -07001672 pr_info("%s: %#lx free buddy page\n", __func__, pfn);
Andi Kleenfacb6012009-12-16 12:20:00 +01001673 ret = 0;
1674 } else {
Borislav Petkov71dd0b82012-05-29 15:06:16 -07001675 pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
1676 __func__, pfn, p->flags);
Andi Kleenfacb6012009-12-16 12:20:00 +01001677 ret = -EIO;
1678 }
1679 } else {
1680 /* Not a free page */
1681 ret = 1;
1682 }
Andi Kleenfacb6012009-12-16 12:20:00 +01001683 return ret;
1684}
1685
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001686static int get_any_page(struct page *page, unsigned long pfn, int flags)
1687{
1688 int ret = __get_any_page(page, pfn, flags);
1689
Yisheng Xie85fbe5d2017-02-24 14:57:35 -08001690 if (ret == 1 && !PageHuge(page) &&
1691 !PageLRU(page) && !__PageMovable(page)) {
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001692 /*
1693 * Try to free it.
1694 */
Oscar Salvadordd6e2402020-10-15 20:06:57 -07001695 put_page(page);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001696 shake_page(page, 1);
1697
1698 /*
1699 * Did it turn free?
1700 */
1701 ret = __get_any_page(page, pfn, 0);
Naoya Horiguchid96b3392016-01-15 16:54:03 -08001702 if (ret == 1 && !PageLRU(page)) {
Wanpeng Li4f32be62015-08-14 15:34:56 -07001703 /* Drop page reference which is from __get_any_page() */
Oscar Salvadordd6e2402020-10-15 20:06:57 -07001704 put_page(page);
Anshuman Khandual82a24812017-05-03 14:55:31 -07001705 pr_info("soft_offline: %#lx: unknown non LRU page type %lx (%pGp)\n",
1706 pfn, page->flags, &page->flags);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001707 return -EIO;
1708 }
1709 }
1710 return ret;
1711}
1712
Naoya Horiguchid950b952010-09-08 10:19:39 +09001713static int soft_offline_huge_page(struct page *page, int flags)
1714{
1715 int ret;
1716 unsigned long pfn = page_to_pfn(page);
1717 struct page *hpage = compound_head(page);
Naoya Horiguchib8ec1ce2013-09-11 14:22:01 -07001718 LIST_HEAD(pagelist);
Naoya Horiguchid950b952010-09-08 10:19:39 +09001719
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001720 /*
1721 * This double-check of PageHWPoison is to avoid the race with
1722 * memory_failure(). See also comment in __soft_offline_page().
1723 */
1724 lock_page(hpage);
Xishi Qiu0ebff322013-02-22 16:33:59 -08001725 if (PageHWPoison(hpage)) {
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001726 unlock_page(hpage);
Oscar Salvadordd6e2402020-10-15 20:06:57 -07001727 put_page(hpage);
Xishi Qiu0ebff322013-02-22 16:33:59 -08001728 pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001729 return -EBUSY;
Xishi Qiu0ebff322013-02-22 16:33:59 -08001730 }
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001731 unlock_page(hpage);
Naoya Horiguchid950b952010-09-08 10:19:39 +09001732
Naoya Horiguchibcc54222015-04-15 16:14:38 -07001733 ret = isolate_huge_page(hpage, &pagelist);
Wanpeng Li03613802015-08-14 15:34:59 -07001734 /*
1735 * get_any_page() and isolate_huge_page() takes a refcount each,
1736 * so need to drop one here.
1737 */
Oscar Salvadordd6e2402020-10-15 20:06:57 -07001738 put_page(hpage);
Wanpeng Li03613802015-08-14 15:34:59 -07001739 if (!ret) {
Naoya Horiguchibcc54222015-04-15 16:14:38 -07001740 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
1741 return -EBUSY;
1742 }
1743
David Rientjes68711a72014-06-04 16:08:25 -07001744 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
Naoya Horiguchib8ec1ce2013-09-11 14:22:01 -07001745 MIGRATE_SYNC, MR_MEMORY_FAILURE);
Naoya Horiguchid950b952010-09-08 10:19:39 +09001746 if (ret) {
Laszlo Tothb6b18aa2017-11-15 17:37:00 -08001747 pr_info("soft offline: %#lx: hugepage migration failed %d, type %lx (%pGp)\n",
Anshuman Khandual82a24812017-05-03 14:55:31 -07001748 pfn, ret, page->flags, &page->flags);
Punit Agrawal30809f52017-06-02 14:46:40 -07001749 if (!list_empty(&pagelist))
1750 putback_movable_pages(&pagelist);
Naoya Horiguchib8ec1ce2013-09-11 14:22:01 -07001751 if (ret > 0)
1752 ret = -EIO;
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001753 } else {
Naoya Horiguchi6bc9b562018-08-23 17:00:38 -07001754 /*
1755 * We set PG_hwpoison only when the migration source hugepage
1756 * was successfully dissolved, because otherwise hwpoisoned
1757 * hugepage remains on free hugepage list, then userspace will
1758 * find it as SIGBUS by allocation failure. That's not expected
1759 * in soft-offlining.
1760 */
1761 ret = dissolve_free_huge_page(page);
1762 if (!ret) {
1763 if (set_hwpoison_free_buddy_page(page))
1764 num_poisoned_pages_inc();
Naoya Horiguchib38e5962019-06-28 12:06:53 -07001765 else
1766 ret = -EBUSY;
Naoya Horiguchi6bc9b562018-08-23 17:00:38 -07001767 }
Naoya Horiguchid950b952010-09-08 10:19:39 +09001768 }
Naoya Horiguchid950b952010-09-08 10:19:39 +09001769 return ret;
1770}
1771
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001772static int __soft_offline_page(struct page *page, int flags)
1773{
1774 int ret;
1775 unsigned long pfn = page_to_pfn(page);
Andi Kleenfacb6012009-12-16 12:20:00 +01001776
1777 /*
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001778 * Check PageHWPoison again inside page lock because PageHWPoison
1779 * is set by memory_failure() outside page lock. Note that
1780 * memory_failure() also double-checks PageHWPoison inside page lock,
1781 * so there's no race between soft_offline_page() and memory_failure().
Andi Kleenfacb6012009-12-16 12:20:00 +01001782 */
Xishi Qiu0ebff322013-02-22 16:33:59 -08001783 lock_page(page);
1784 wait_on_page_writeback(page);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001785 if (PageHWPoison(page)) {
1786 unlock_page(page);
Oscar Salvadordd6e2402020-10-15 20:06:57 -07001787 put_page(page);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001788 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1789 return -EBUSY;
1790 }
Andi Kleenfacb6012009-12-16 12:20:00 +01001791 /*
1792 * Try to invalidate first. This should work for
1793 * non dirty unmapped page cache pages.
1794 */
1795 ret = invalidate_inode_page(page);
1796 unlock_page(page);
Andi Kleenfacb6012009-12-16 12:20:00 +01001797 /*
Andi Kleenfacb6012009-12-16 12:20:00 +01001798 * RED-PEN would be better to keep it isolated here, but we
1799 * would need to fix isolation locking first.
1800 */
Andi Kleenfacb6012009-12-16 12:20:00 +01001801 if (ret == 1) {
Oscar Salvadordd6e2402020-10-15 20:06:57 -07001802 put_page(page);
Andi Kleenfb46e732010-09-27 23:31:30 +02001803 pr_info("soft_offline: %#lx: invalidated\n", pfn);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001804 SetPageHWPoison(page);
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001805 num_poisoned_pages_inc();
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001806 return 0;
Andi Kleenfacb6012009-12-16 12:20:00 +01001807 }
1808
1809 /*
1810 * Simple invalidation didn't work.
1811 * Try to migrate to a new page instead. migrate.c
1812 * handles a large number of cases for us.
1813 */
Yisheng Xie85fbe5d2017-02-24 14:57:35 -08001814 if (PageLRU(page))
1815 ret = isolate_lru_page(page);
1816 else
1817 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
Konstantin Khlebnikovbd486282011-05-24 17:12:20 -07001818 /*
1819 * Drop page reference which is came from get_any_page()
1820 * successful isolate_lru_page() already took another one.
1821 */
Oscar Salvadordd6e2402020-10-15 20:06:57 -07001822 put_page(page);
Andi Kleenfacb6012009-12-16 12:20:00 +01001823 if (!ret) {
1824 LIST_HEAD(pagelist);
Yisheng Xie85fbe5d2017-02-24 14:57:35 -08001825 /*
1826 * After isolated lru page, the PageLRU will be cleared,
1827 * so use !__PageMovable instead for LRU page's mapping
1828 * cannot have PAGE_MAPPING_MOVABLE.
1829 */
1830 if (!__PageMovable(page))
1831 inc_node_page_state(page, NR_ISOLATED_ANON +
Huang Ying9de4f222020-04-06 20:04:41 -07001832 page_is_file_lru(page));
Andi Kleenfacb6012009-12-16 12:20:00 +01001833 list_add(&page->lru, &pagelist);
David Rientjes68711a72014-06-04 16:08:25 -07001834 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001835 MIGRATE_SYNC, MR_MEMORY_FAILURE);
Andi Kleenfacb6012009-12-16 12:20:00 +01001836 if (ret) {
Yisheng Xie85fbe5d2017-02-24 14:57:35 -08001837 if (!list_empty(&pagelist))
1838 putback_movable_pages(&pagelist);
Joonsoo Kim59c82b72014-01-21 15:51:17 -08001839
Anshuman Khandual82a24812017-05-03 14:55:31 -07001840 pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n",
1841 pfn, ret, page->flags, &page->flags);
Andi Kleenfacb6012009-12-16 12:20:00 +01001842 if (ret > 0)
1843 ret = -EIO;
1844 }
1845 } else {
Anshuman Khandual82a24812017-05-03 14:55:31 -07001846 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx (%pGp)\n",
1847 pfn, ret, page_count(page), page->flags, &page->flags);
Andi Kleenfacb6012009-12-16 12:20:00 +01001848 }
Andi Kleenfacb6012009-12-16 12:20:00 +01001849 return ret;
1850}
Wanpeng Li86e05772013-09-11 14:22:56 -07001851
Naoya Horiguchiacc14dc2016-01-15 16:57:43 -08001852static int soft_offline_in_use_page(struct page *page, int flags)
1853{
1854 int ret;
Naoya Horiguchid4ae9912018-08-23 17:00:42 -07001855 int mt;
Naoya Horiguchiacc14dc2016-01-15 16:57:43 -08001856 struct page *hpage = compound_head(page);
1857
1858 if (!PageHuge(page) && PageTransHuge(hpage)) {
zhongjiang46612b72019-03-05 15:41:16 -08001859 lock_page(page);
1860 if (!PageAnon(page) || unlikely(split_huge_page(page))) {
1861 unlock_page(page);
1862 if (!PageAnon(page))
Naoya Horiguchi98fd1ef2016-01-15 16:57:46 -08001863 pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
1864 else
1865 pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
Oscar Salvadordd6e2402020-10-15 20:06:57 -07001866 put_page(page);
Naoya Horiguchiacc14dc2016-01-15 16:57:43 -08001867 return -EBUSY;
1868 }
zhongjiang46612b72019-03-05 15:41:16 -08001869 unlock_page(page);
Naoya Horiguchiacc14dc2016-01-15 16:57:43 -08001870 }
1871
Naoya Horiguchid4ae9912018-08-23 17:00:42 -07001872 /*
1873 * Setting MIGRATE_ISOLATE here ensures that the page will be linked
1874 * to free list immediately (not via pcplist) when released after
1875 * successful page migration. Otherwise we can't guarantee that the
1876 * page is really free after put_page() returns, so
1877 * set_hwpoison_free_buddy_page() highly likely fails.
1878 */
1879 mt = get_pageblock_migratetype(page);
1880 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
Naoya Horiguchiacc14dc2016-01-15 16:57:43 -08001881 if (PageHuge(page))
1882 ret = soft_offline_huge_page(page, flags);
1883 else
1884 ret = __soft_offline_page(page, flags);
Naoya Horiguchid4ae9912018-08-23 17:00:42 -07001885 set_pageblock_migratetype(page, mt);
Naoya Horiguchiacc14dc2016-01-15 16:57:43 -08001886 return ret;
1887}
1888
Naoya Horiguchid4ae9912018-08-23 17:00:42 -07001889static int soft_offline_free_page(struct page *page)
Naoya Horiguchiacc14dc2016-01-15 16:57:43 -08001890{
Naoya Horiguchifaf53de2019-06-28 12:06:56 -07001891 int rc = dissolve_free_huge_page(page);
Naoya Horiguchiacc14dc2016-01-15 16:57:43 -08001892
Naoya Horiguchid4ae9912018-08-23 17:00:42 -07001893 if (!rc) {
1894 if (set_hwpoison_free_buddy_page(page))
1895 num_poisoned_pages_inc();
1896 else
1897 rc = -EBUSY;
1898 }
1899 return rc;
Naoya Horiguchiacc14dc2016-01-15 16:57:43 -08001900}
1901
Wanpeng Li86e05772013-09-11 14:22:56 -07001902/**
1903 * soft_offline_page - Soft offline a page.
Naoya Horiguchifeec24a2019-11-30 17:53:38 -08001904 * @pfn: pfn to soft-offline
Wanpeng Li86e05772013-09-11 14:22:56 -07001905 * @flags: flags. Same as memory_failure().
1906 *
1907 * Returns 0 on success, otherwise negated errno.
1908 *
1909 * Soft offline a page, by migration or invalidation,
1910 * without killing anything. This is for the case when
1911 * a page is not corrupted yet (so it's still valid to access),
1912 * but has had a number of corrected errors and is better taken
1913 * out.
1914 *
1915 * The actual policy on when to do that is maintained by
1916 * user space.
1917 *
1918 * This should never impact any application or cause data loss,
1919 * however it might take some time.
1920 *
1921 * This is not a 100% solution for all memory, but tries to be
1922 * ``good enough'' for the majority of memory.
1923 */
Naoya Horiguchifeec24a2019-11-30 17:53:38 -08001924int soft_offline_page(unsigned long pfn, int flags)
Wanpeng Li86e05772013-09-11 14:22:56 -07001925{
1926 int ret;
Naoya Horiguchifeec24a2019-11-30 17:53:38 -08001927 struct page *page;
Wanpeng Li86e05772013-09-11 14:22:56 -07001928
Naoya Horiguchifeec24a2019-11-30 17:53:38 -08001929 if (!pfn_valid(pfn))
1930 return -ENXIO;
1931 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
1932 page = pfn_to_online_page(pfn);
1933 if (!page)
Dan Williams86a66812018-07-13 21:49:56 -07001934 return -EIO;
Dan Williams86a66812018-07-13 21:49:56 -07001935
Wanpeng Li86e05772013-09-11 14:22:56 -07001936 if (PageHWPoison(page)) {
1937 pr_info("soft offline: %#lx page already poisoned\n", pfn);
Wanpeng Li1e0e6352015-09-08 15:03:13 -07001938 if (flags & MF_COUNT_INCREASED)
Oscar Salvadordd6e2402020-10-15 20:06:57 -07001939 put_page(page);
Wanpeng Li86e05772013-09-11 14:22:56 -07001940 return -EBUSY;
1941 }
Wanpeng Li86e05772013-09-11 14:22:56 -07001942
Vladimir Davydovbfc8c902014-06-04 16:07:18 -07001943 get_online_mems();
Wanpeng Li86e05772013-09-11 14:22:56 -07001944 ret = get_any_page(page, pfn, flags);
Vladimir Davydovbfc8c902014-06-04 16:07:18 -07001945 put_online_mems();
Naoya Horiguchi4e41a302016-01-15 16:54:07 -08001946
Naoya Horiguchiacc14dc2016-01-15 16:57:43 -08001947 if (ret > 0)
1948 ret = soft_offline_in_use_page(page, flags);
1949 else if (ret == 0)
Naoya Horiguchid4ae9912018-08-23 17:00:42 -07001950 ret = soft_offline_free_page(page);
Naoya Horiguchi4e41a302016-01-15 16:54:07 -08001951
Wanpeng Li86e05772013-09-11 14:22:56 -07001952 return ret;
1953}