blob: c48f8df6e502683432097861bc31b010e08f263d [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/memory.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8/*
9 * demand-loading started 01.12.91 - seems it is high on the list of
10 * things wanted, and it should be easy to implement. - Linus
11 */
12
13/*
14 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15 * pages started 02.12.91, seems to work. - Linus.
16 *
17 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18 * would have taken more than the 6M I have free, but it worked well as
19 * far as I could see.
20 *
21 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22 */
23
24/*
25 * Real VM (paging to/from disk) started 18.12.91. Much more work and
26 * thought has to go into this. Oh, well..
27 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
28 * Found it. Everything seems to work now.
29 * 20.12.91 - Ok, making the swap-device changeable like the root.
30 */
31
32/*
33 * 05.04.94 - Multi-page memory management added for v1.1.
Tobin C Harding166f61b2017-02-24 14:59:01 -080034 * Idea by Alex Bligh (alex@cconcepts.co.uk)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 *
36 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
37 * (Gerhard.Wichert@pdb.siemens.de)
38 *
39 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40 */
41
42#include <linux/kernel_stat.h>
43#include <linux/mm.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010044#include <linux/sched/mm.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +010045#include <linux/sched/coredump.h>
Ingo Molnar6a3827d2017-02-08 18:51:31 +010046#include <linux/sched/numa_balancing.h>
Ingo Molnar29930022017-02-08 18:51:36 +010047#include <linux/sched/task.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/hugetlb.h>
49#include <linux/mman.h>
50#include <linux/swap.h>
51#include <linux/highmem.h>
52#include <linux/pagemap.h>
Jérôme Glisse5042db42017-09-08 16:11:43 -070053#include <linux/memremap.h>
Hugh Dickins9a840892009-09-21 17:02:01 -070054#include <linux/ksm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <linux/rmap.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040056#include <linux/export.h>
Shailabh Nagar0ff92242006-07-14 00:24:37 -070057#include <linux/delayacct.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/init.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080059#include <linux/pfn_t.h>
Peter Zijlstraedc79b22006-09-25 23:30:58 -070060#include <linux/writeback.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080061#include <linux/memcontrol.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070062#include <linux/mmu_notifier.h>
Hugh Dickins3dc14742009-01-06 14:40:08 -080063#include <linux/swapops.h>
64#include <linux/elf.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090065#include <linux/gfp.h>
Mel Gorman4daae3b2012-11-02 11:33:45 +000066#include <linux/migrate.h>
Andy Shevchenko2fbc57c2012-12-17 16:01:23 -080067#include <linux/string.h>
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -070068#include <linux/debugfs.h>
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -070069#include <linux/userfaultfd_k.h>
Jan Karabc2466e2016-05-12 18:29:19 +020070#include <linux/dax.h>
Michal Hocko6b31d592017-08-18 15:16:15 -070071#include <linux/oom.h>
Anshuman Khandual98fa15f2019-03-05 15:42:58 -080072#include <linux/numa.h>
Peter Xubce617e2020-08-11 18:37:44 -070073#include <linux/perf_event.h>
74#include <linux/ptrace.h>
Joerg Roedele80d3902020-09-04 16:35:43 -070075#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Joel Fernandes (Google)b3d14112019-11-30 17:50:30 -080077#include <trace/events/kmem.h>
78
Alexey Dobriyan6952b612009-09-18 23:55:55 +040079#include <asm/io.h>
Dave Hansen33a709b2016-02-12 13:02:19 -080080#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#include <asm/pgalloc.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080082#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <asm/tlb.h>
84#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Joerg Roedele80d3902020-09-04 16:35:43 -070086#include "pgalloc-track.h"
Jan Beulich42b77722008-07-23 21:27:10 -070087#include "internal.h"
88
Arnd Bergmannaf27d942018-02-16 16:25:53 +010089#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
Peter Zijlstra90572892013-10-07 11:29:20 +010090#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
Peter Zijlstra75980e92013-02-22 16:34:32 -080091#endif
92
Andy Whitcroftd41dee32005-06-23 00:07:54 -070093#ifndef CONFIG_NEED_MULTIPLE_NODES
Linus Torvalds1da177e2005-04-16 15:20:36 -070094/* use the per-pgdat data instead for discontigmem - mbligh */
95unsigned long max_mapnr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096EXPORT_SYMBOL(max_mapnr);
Tobin C Harding166f61b2017-02-24 14:59:01 -080097
98struct page *mem_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099EXPORT_SYMBOL(mem_map);
100#endif
101
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102/*
103 * A number of key systems in x86 including ioremap() rely on the assumption
104 * that high_memory defines the upper bound on direct map memory, then end
105 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
106 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
107 * and ZONE_HIGHMEM.
108 */
Tobin C Harding166f61b2017-02-24 14:59:01 -0800109void *high_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110EXPORT_SYMBOL(high_memory);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Ingo Molnar32a93232008-02-06 22:39:44 +0100112/*
113 * Randomize the address space (stacks, mmaps, brk, etc.).
114 *
115 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
116 * as ancient (libc5 based) binaries can segfault. )
117 */
118int randomize_va_space __read_mostly =
119#ifdef CONFIG_COMPAT_BRK
120 1;
121#else
122 2;
123#endif
Andi Kleena62eaf12006-02-16 23:41:58 +0100124
Jia He83d116c2019-10-11 22:09:39 +0800125#ifndef arch_faults_on_old_pte
126static inline bool arch_faults_on_old_pte(void)
127{
128 /*
129 * Those arches which don't have hw access flag feature need to
130 * implement their own helper. By default, "true" means pagefault
131 * will be hit on old pte.
132 */
133 return true;
134}
135#endif
136
Andi Kleena62eaf12006-02-16 23:41:58 +0100137static int __init disable_randmaps(char *s)
138{
139 randomize_va_space = 0;
OGAWA Hirofumi9b410462006-03-31 02:30:33 -0800140 return 1;
Andi Kleena62eaf12006-02-16 23:41:58 +0100141}
142__setup("norandmaps", disable_randmaps);
143
Hugh Dickins62eede62009-09-21 17:03:34 -0700144unsigned long zero_pfn __read_mostly;
Ard Biesheuvel0b700682014-09-12 22:17:23 +0200145EXPORT_SYMBOL(zero_pfn);
146
Tobin C Harding166f61b2017-02-24 14:59:01 -0800147unsigned long highest_memmap_pfn __read_mostly;
148
Hugh Dickinsa13ea5b2009-09-21 17:03:30 -0700149/*
150 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
151 */
152static int __init init_zero_pfn(void)
153{
154 zero_pfn = page_to_pfn(ZERO_PAGE(0));
155 return 0;
156}
157core_initcall(init_zero_pfn);
Andi Kleena62eaf12006-02-16 23:41:58 +0100158
Joel Fernandes (Google)e4dcad22019-11-30 17:50:33 -0800159void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
Joel Fernandes (Google)b3d14112019-11-30 17:50:30 -0800160{
Joel Fernandes (Google)e4dcad22019-11-30 17:50:33 -0800161 trace_rss_stat(mm, member, count);
Joel Fernandes (Google)b3d14112019-11-30 17:50:30 -0800162}
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800163
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800164#if defined(SPLIT_RSS_COUNTING)
165
David Rientjesea48cf72012-03-21 16:34:13 -0700166void sync_mm_rss(struct mm_struct *mm)
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800167{
168 int i;
169
170 for (i = 0; i < NR_MM_COUNTERS; i++) {
David Rientjes05af2e12012-03-21 16:34:13 -0700171 if (current->rss_stat.count[i]) {
172 add_mm_counter(mm, i, current->rss_stat.count[i]);
173 current->rss_stat.count[i] = 0;
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800174 }
175 }
David Rientjes05af2e12012-03-21 16:34:13 -0700176 current->rss_stat.events = 0;
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800177}
178
179static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
180{
181 struct task_struct *task = current;
182
183 if (likely(task->mm == mm))
184 task->rss_stat.count[member] += val;
185 else
186 add_mm_counter(mm, member, val);
187}
188#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
189#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
190
191/* sync counter once per 64 page faults */
192#define TASK_RSS_EVENTS_THRESH (64)
193static void check_sync_rss_stat(struct task_struct *task)
194{
195 if (unlikely(task != current))
196 return;
197 if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
David Rientjesea48cf72012-03-21 16:34:13 -0700198 sync_mm_rss(task->mm);
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800199}
Peter Zijlstra9547d012011-05-24 17:12:14 -0700200#else /* SPLIT_RSS_COUNTING */
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800201
202#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
203#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
204
205static void check_sync_rss_stat(struct task_struct *task)
206{
207}
208
Peter Zijlstra9547d012011-05-24 17:12:14 -0700209#endif /* SPLIT_RSS_COUNTING */
210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 * Note: this doesn't free the actual pages themselves. That
213 * has been handled earlier when unmapping all the memory regions.
214 */
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000215static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
216 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217{
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800218 pgtable_t token = pmd_pgtable(*pmd);
Hugh Dickinse0da3822005-04-19 13:29:15 -0700219 pmd_clear(pmd);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000220 pte_free_tlb(tlb, token, addr);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -0800221 mm_dec_nr_ptes(tlb->mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222}
223
Hugh Dickinse0da3822005-04-19 13:29:15 -0700224static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
225 unsigned long addr, unsigned long end,
226 unsigned long floor, unsigned long ceiling)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227{
228 pmd_t *pmd;
229 unsigned long next;
Hugh Dickinse0da3822005-04-19 13:29:15 -0700230 unsigned long start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
Hugh Dickinse0da3822005-04-19 13:29:15 -0700232 start = addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 pmd = pmd_offset(pud, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 do {
235 next = pmd_addr_end(addr, end);
236 if (pmd_none_or_clear_bad(pmd))
237 continue;
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000238 free_pte_range(tlb, pmd, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 } while (pmd++, addr = next, addr != end);
240
Hugh Dickinse0da3822005-04-19 13:29:15 -0700241 start &= PUD_MASK;
242 if (start < floor)
243 return;
244 if (ceiling) {
245 ceiling &= PUD_MASK;
246 if (!ceiling)
247 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 }
Hugh Dickinse0da3822005-04-19 13:29:15 -0700249 if (end - 1 > ceiling - 1)
250 return;
251
252 pmd = pmd_offset(pud, start);
253 pud_clear(pud);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000254 pmd_free_tlb(tlb, pmd, start);
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -0800255 mm_dec_nr_pmds(tlb->mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256}
257
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300258static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
Hugh Dickinse0da3822005-04-19 13:29:15 -0700259 unsigned long addr, unsigned long end,
260 unsigned long floor, unsigned long ceiling)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261{
262 pud_t *pud;
263 unsigned long next;
Hugh Dickinse0da3822005-04-19 13:29:15 -0700264 unsigned long start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
Hugh Dickinse0da3822005-04-19 13:29:15 -0700266 start = addr;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300267 pud = pud_offset(p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 do {
269 next = pud_addr_end(addr, end);
270 if (pud_none_or_clear_bad(pud))
271 continue;
Hugh Dickinse0da3822005-04-19 13:29:15 -0700272 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 } while (pud++, addr = next, addr != end);
274
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300275 start &= P4D_MASK;
276 if (start < floor)
277 return;
278 if (ceiling) {
279 ceiling &= P4D_MASK;
280 if (!ceiling)
281 return;
282 }
283 if (end - 1 > ceiling - 1)
284 return;
285
286 pud = pud_offset(p4d, start);
287 p4d_clear(p4d);
288 pud_free_tlb(tlb, pud, start);
Kirill A. Shutemovb4e98d92017-11-15 17:35:33 -0800289 mm_dec_nr_puds(tlb->mm);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300290}
291
292static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
293 unsigned long addr, unsigned long end,
294 unsigned long floor, unsigned long ceiling)
295{
296 p4d_t *p4d;
297 unsigned long next;
298 unsigned long start;
299
300 start = addr;
301 p4d = p4d_offset(pgd, addr);
302 do {
303 next = p4d_addr_end(addr, end);
304 if (p4d_none_or_clear_bad(p4d))
305 continue;
306 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
307 } while (p4d++, addr = next, addr != end);
308
Hugh Dickinse0da3822005-04-19 13:29:15 -0700309 start &= PGDIR_MASK;
310 if (start < floor)
311 return;
312 if (ceiling) {
313 ceiling &= PGDIR_MASK;
314 if (!ceiling)
315 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 }
Hugh Dickinse0da3822005-04-19 13:29:15 -0700317 if (end - 1 > ceiling - 1)
318 return;
319
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300320 p4d = p4d_offset(pgd, start);
Hugh Dickinse0da3822005-04-19 13:29:15 -0700321 pgd_clear(pgd);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300322 p4d_free_tlb(tlb, p4d, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
325/*
Hugh Dickinse0da3822005-04-19 13:29:15 -0700326 * This function frees user-level page tables of a process.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 */
Jan Beulich42b77722008-07-23 21:27:10 -0700328void free_pgd_range(struct mmu_gather *tlb,
Hugh Dickinse0da3822005-04-19 13:29:15 -0700329 unsigned long addr, unsigned long end,
330 unsigned long floor, unsigned long ceiling)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331{
332 pgd_t *pgd;
333 unsigned long next;
334
Hugh Dickinse0da3822005-04-19 13:29:15 -0700335 /*
336 * The next few lines have given us lots of grief...
337 *
338 * Why are we testing PMD* at this top level? Because often
339 * there will be no work to do at all, and we'd prefer not to
340 * go all the way down to the bottom just to discover that.
341 *
342 * Why all these "- 1"s? Because 0 represents both the bottom
343 * of the address space and the top of it (using -1 for the
344 * top wouldn't help much: the masks would do the wrong thing).
345 * The rule is that addr 0 and floor 0 refer to the bottom of
346 * the address space, but end 0 and ceiling 0 refer to the top
347 * Comparisons need to use "end - 1" and "ceiling - 1" (though
348 * that end 0 case should be mythical).
349 *
350 * Wherever addr is brought up or ceiling brought down, we must
351 * be careful to reject "the opposite 0" before it confuses the
352 * subsequent tests. But what about where end is brought down
353 * by PMD_SIZE below? no, end can't go down to 0 there.
354 *
355 * Whereas we round start (addr) and ceiling down, by different
356 * masks at different levels, in order to test whether a table
357 * now has no other vmas using it, so can be freed, we don't
358 * bother to round floor or end up - the tests don't need that.
359 */
360
361 addr &= PMD_MASK;
362 if (addr < floor) {
363 addr += PMD_SIZE;
364 if (!addr)
365 return;
366 }
367 if (ceiling) {
368 ceiling &= PMD_MASK;
369 if (!ceiling)
370 return;
371 }
372 if (end - 1 > ceiling - 1)
373 end -= PMD_SIZE;
374 if (addr > end - 1)
375 return;
Aneesh Kumar K.V07e32662016-12-12 16:42:40 -0800376 /*
377 * We add page table cache pages with PAGE_SIZE,
378 * (see pte_free_tlb()), flush the tlb if we need
379 */
Peter Zijlstraed6a7932018-08-31 14:46:08 +0200380 tlb_change_page_size(tlb, PAGE_SIZE);
Jan Beulich42b77722008-07-23 21:27:10 -0700381 pgd = pgd_offset(tlb->mm, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 do {
383 next = pgd_addr_end(addr, end);
384 if (pgd_none_or_clear_bad(pgd))
385 continue;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300386 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 } while (pgd++, addr = next, addr != end);
Hugh Dickinse0da3822005-04-19 13:29:15 -0700388}
389
Jan Beulich42b77722008-07-23 21:27:10 -0700390void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700391 unsigned long floor, unsigned long ceiling)
Hugh Dickinse0da3822005-04-19 13:29:15 -0700392{
393 while (vma) {
394 struct vm_area_struct *next = vma->vm_next;
395 unsigned long addr = vma->vm_start;
396
Hugh Dickins8f4f8c12005-10-29 18:16:29 -0700397 /*
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +1000398 * Hide vma from rmap and truncate_pagecache before freeing
399 * pgtables
Hugh Dickins8f4f8c12005-10-29 18:16:29 -0700400 */
Rik van Riel5beb4932010-03-05 13:42:07 -0800401 unlink_anon_vmas(vma);
Hugh Dickins8f4f8c12005-10-29 18:16:29 -0700402 unlink_file_vma(vma);
403
David Gibson9da61ae2006-03-22 00:08:57 -0800404 if (is_vm_hugetlb_page(vma)) {
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700405 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
Tobin C Harding166f61b2017-02-24 14:59:01 -0800406 floor, next ? next->vm_start : ceiling);
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700407 } else {
408 /*
409 * Optimization: gather nearby vmas into one call down
410 */
411 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
David Gibson48669202006-03-22 00:08:58 -0800412 && !is_vm_hugetlb_page(next)) {
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700413 vma = next;
414 next = vma->vm_next;
Rik van Riel5beb4932010-03-05 13:42:07 -0800415 unlink_anon_vmas(vma);
Hugh Dickins8f4f8c12005-10-29 18:16:29 -0700416 unlink_file_vma(vma);
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700417 }
418 free_pgd_range(tlb, addr, vma->vm_end,
Tobin C Harding166f61b2017-02-24 14:59:01 -0800419 floor, next ? next->vm_start : ceiling);
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700420 }
Hugh Dickinse0da3822005-04-19 13:29:15 -0700421 vma = next;
422 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423}
424
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800425int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800427 spinlock_t *ptl;
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800428 pgtable_t new = pte_alloc_one(mm);
Hugh Dickins1bb36302005-10-29 18:16:22 -0700429 if (!new)
430 return -ENOMEM;
431
Nick Piggin362a61a2008-05-14 06:37:36 +0200432 /*
433 * Ensure all pte setup (eg. pte page lock and page clearing) are
434 * visible before the pte is made visible to other CPUs by being
435 * put into page tables.
436 *
437 * The other side of the story is the pointer chasing in the page
438 * table walking code (when walking the page table without locking;
439 * ie. most of the time). Fortunately, these data accesses consist
440 * of a chain of data-dependent loads, meaning most CPUs (alpha
441 * being the notable exception) will already guarantee loads are
442 * seen in-order. See the alpha page table accessors for the
Will Deaconbb7cdd32019-10-30 17:15:01 +0000443 * smp_rmb() barriers in page table walking code.
Nick Piggin362a61a2008-05-14 06:37:36 +0200444 */
445 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
446
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800447 ptl = pmd_lock(mm, pmd);
Andrea Arcangeli8ac1f832011-01-13 15:46:43 -0800448 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
Kirill A. Shutemovc4812902017-11-15 17:35:37 -0800449 mm_inc_nr_ptes(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 pmd_populate(mm, pmd, new);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800451 new = NULL;
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -0800452 }
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800453 spin_unlock(ptl);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800454 if (new)
455 pte_free(mm, new);
Hugh Dickins1bb36302005-10-29 18:16:22 -0700456 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457}
458
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800459int __pte_alloc_kernel(pmd_t *pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460{
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800461 pte_t *new = pte_alloc_one_kernel(&init_mm);
Hugh Dickins1bb36302005-10-29 18:16:22 -0700462 if (!new)
463 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Nick Piggin362a61a2008-05-14 06:37:36 +0200465 smp_wmb(); /* See comment in __pte_alloc */
466
Hugh Dickins1bb36302005-10-29 18:16:22 -0700467 spin_lock(&init_mm.page_table_lock);
Andrea Arcangeli8ac1f832011-01-13 15:46:43 -0800468 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
Hugh Dickins1bb36302005-10-29 18:16:22 -0700469 pmd_populate_kernel(&init_mm, pmd, new);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800470 new = NULL;
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -0800471 }
Hugh Dickins1bb36302005-10-29 18:16:22 -0700472 spin_unlock(&init_mm.page_table_lock);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800473 if (new)
474 pte_free_kernel(&init_mm, new);
Hugh Dickins1bb36302005-10-29 18:16:22 -0700475 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476}
477
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800478static inline void init_rss_vec(int *rss)
Hugh Dickinsae859762005-10-29 18:16:05 -0700479{
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800480 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
481}
482
483static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
484{
485 int i;
486
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800487 if (current->mm == mm)
David Rientjes05af2e12012-03-21 16:34:13 -0700488 sync_mm_rss(mm);
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800489 for (i = 0; i < NR_MM_COUNTERS; i++)
490 if (rss[i])
491 add_mm_counter(mm, i, rss[i]);
Hugh Dickinsae859762005-10-29 18:16:05 -0700492}
493
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494/*
Linus Torvalds6aab3412005-11-28 14:34:23 -0800495 * This function is called to print an error when a bad pte
496 * is found. For example, we might have a PFN-mapped pte in
497 * a region that doesn't allow it.
Nick Pigginb5810032005-10-29 18:16:12 -0700498 *
499 * The calling function must still handle the error.
500 */
Hugh Dickins3dc14742009-01-06 14:40:08 -0800501static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
502 pte_t pte, struct page *page)
Nick Pigginb5810032005-10-29 18:16:12 -0700503{
Hugh Dickins3dc14742009-01-06 14:40:08 -0800504 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300505 p4d_t *p4d = p4d_offset(pgd, addr);
506 pud_t *pud = pud_offset(p4d, addr);
Hugh Dickins3dc14742009-01-06 14:40:08 -0800507 pmd_t *pmd = pmd_offset(pud, addr);
508 struct address_space *mapping;
509 pgoff_t index;
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800510 static unsigned long resume;
511 static unsigned long nr_shown;
512 static unsigned long nr_unshown;
513
514 /*
515 * Allow a burst of 60 reports, then keep quiet for that minute;
516 * or allow a steady drip of one report per second.
517 */
518 if (nr_shown == 60) {
519 if (time_before(jiffies, resume)) {
520 nr_unshown++;
521 return;
522 }
523 if (nr_unshown) {
Joe Perches11705322016-03-17 14:19:50 -0700524 pr_alert("BUG: Bad page map: %lu messages suppressed\n",
525 nr_unshown);
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800526 nr_unshown = 0;
527 }
528 nr_shown = 0;
529 }
530 if (nr_shown++ == 0)
531 resume = jiffies + 60 * HZ;
Hugh Dickins3dc14742009-01-06 14:40:08 -0800532
533 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
534 index = linear_page_index(vma, addr);
535
Joe Perches11705322016-03-17 14:19:50 -0700536 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
537 current->comm,
538 (long long)pte_val(pte), (long long)pmd_val(*pmd));
Wu Fengguang718a3822010-03-10 15:20:43 -0800539 if (page)
Dave Hansenf0b791a2014-01-23 15:52:49 -0800540 dump_page(page, "bad pte");
Kefeng Wang6aa9b8b2019-09-23 15:35:34 -0700541 pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
Joe Perches11705322016-03-17 14:19:50 -0700542 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
Sakari Ailusd75f7732019-03-25 21:32:28 +0200543 pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
Konstantin Khlebnikov26825822015-04-15 16:15:08 -0700544 vma->vm_file,
545 vma->vm_ops ? vma->vm_ops->fault : NULL,
546 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
547 mapping ? mapping->a_ops->readpage : NULL);
Nick Pigginb5810032005-10-29 18:16:12 -0700548 dump_stack();
Rusty Russell373d4d02013-01-21 17:17:39 +1030549 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Nick Pigginb5810032005-10-29 18:16:12 -0700550}
551
552/*
Nick Piggin7e675132008-04-28 02:13:00 -0700553 * vm_normal_page -- This function gets the "struct page" associated with a pte.
Linus Torvalds6aab3412005-11-28 14:34:23 -0800554 *
Nick Piggin7e675132008-04-28 02:13:00 -0700555 * "Special" mappings do not wish to be associated with a "struct page" (either
556 * it doesn't exist, or it exists but they don't want to touch it). In this
557 * case, NULL is returned here. "Normal" mappings do have a struct page.
Jared Hulbertb379d792008-04-28 02:12:58 -0700558 *
Nick Piggin7e675132008-04-28 02:13:00 -0700559 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
560 * pte bit, in which case this function is trivial. Secondly, an architecture
561 * may not have a spare pte bit, which requires a more complicated scheme,
562 * described below.
563 *
564 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
565 * special mapping (even if there are underlying and valid "struct pages").
566 * COWed pages of a VM_PFNMAP are always normal.
Linus Torvalds6aab3412005-11-28 14:34:23 -0800567 *
Jared Hulbertb379d792008-04-28 02:12:58 -0700568 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
569 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
Nick Piggin7e675132008-04-28 02:13:00 -0700570 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
571 * mapping will always honor the rule
Linus Torvalds6aab3412005-11-28 14:34:23 -0800572 *
573 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
574 *
Nick Piggin7e675132008-04-28 02:13:00 -0700575 * And for normal mappings this is false.
Jared Hulbertb379d792008-04-28 02:12:58 -0700576 *
Nick Piggin7e675132008-04-28 02:13:00 -0700577 * This restricts such mappings to be a linear translation from virtual address
578 * to pfn. To get around this restriction, we allow arbitrary mappings so long
579 * as the vma is not a COW mapping; in that case, we know that all ptes are
580 * special (because none can have been COWed).
Jared Hulbertb379d792008-04-28 02:12:58 -0700581 *
582 *
Nick Piggin7e675132008-04-28 02:13:00 -0700583 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
584 *
Jared Hulbertb379d792008-04-28 02:12:58 -0700585 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
586 * page" backing, however the difference is that _all_ pages with a struct
587 * page (that is, those where pfn_valid is true) are refcounted and considered
588 * normal pages by the VM. The disadvantage is that pages are refcounted
589 * (which can be slower and simply not an option for some PFNMAP users). The
590 * advantage is that we don't have to follow the strict linearity rule of
591 * PFNMAP mappings in order to support COWable mappings.
592 *
Hugh Dickinsee498ed2005-11-21 21:32:18 -0800593 */
Christoph Hellwig25b29952019-06-13 22:50:49 +0200594struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
595 pte_t pte)
Hugh Dickinsee498ed2005-11-21 21:32:18 -0800596{
Hugh Dickins22b31ee2009-01-06 14:40:09 -0800597 unsigned long pfn = pte_pfn(pte);
Nick Piggin7e675132008-04-28 02:13:00 -0700598
Laurent Dufour00b3a332018-06-07 17:06:12 -0700599 if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
Hugh Dickinsb38af472014-08-29 15:18:44 -0700600 if (likely(!pte_special(pte)))
Hugh Dickins22b31ee2009-01-06 14:40:09 -0800601 goto check_pfn;
David Vrabel667a0a02014-12-18 14:48:15 +0000602 if (vma->vm_ops && vma->vm_ops->find_special_page)
603 return vma->vm_ops->find_special_page(vma, addr);
Hugh Dickinsa13ea5b2009-09-21 17:03:30 -0700604 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
605 return NULL;
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700606 if (is_zero_pfn(pfn))
607 return NULL;
Dave Jiange1fb4a02018-08-17 15:43:40 -0700608 if (pte_devmap(pte))
609 return NULL;
610
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700611 print_bad_pte(vma, addr, pte, NULL);
Nick Piggin7e675132008-04-28 02:13:00 -0700612 return NULL;
613 }
614
Laurent Dufour00b3a332018-06-07 17:06:12 -0700615 /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
Nick Piggin7e675132008-04-28 02:13:00 -0700616
Jared Hulbertb379d792008-04-28 02:12:58 -0700617 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
618 if (vma->vm_flags & VM_MIXEDMAP) {
619 if (!pfn_valid(pfn))
620 return NULL;
621 goto out;
622 } else {
Nick Piggin7e675132008-04-28 02:13:00 -0700623 unsigned long off;
624 off = (addr - vma->vm_start) >> PAGE_SHIFT;
Jared Hulbertb379d792008-04-28 02:12:58 -0700625 if (pfn == vma->vm_pgoff + off)
626 return NULL;
627 if (!is_cow_mapping(vma->vm_flags))
628 return NULL;
629 }
Linus Torvalds6aab3412005-11-28 14:34:23 -0800630 }
631
Hugh Dickinsb38af472014-08-29 15:18:44 -0700632 if (is_zero_pfn(pfn))
633 return NULL;
Laurent Dufour00b3a332018-06-07 17:06:12 -0700634
Hugh Dickins22b31ee2009-01-06 14:40:09 -0800635check_pfn:
636 if (unlikely(pfn > highest_memmap_pfn)) {
637 print_bad_pte(vma, addr, pte, NULL);
638 return NULL;
639 }
Linus Torvalds6aab3412005-11-28 14:34:23 -0800640
641 /*
Nick Piggin7e675132008-04-28 02:13:00 -0700642 * NOTE! We still have PageReserved() pages in the page tables.
Nick Piggin7e675132008-04-28 02:13:00 -0700643 * eg. VDSO mappings can cause them to exist.
Linus Torvalds6aab3412005-11-28 14:34:23 -0800644 */
Jared Hulbertb379d792008-04-28 02:12:58 -0700645out:
Linus Torvalds6aab3412005-11-28 14:34:23 -0800646 return pfn_to_page(pfn);
Hugh Dickinsee498ed2005-11-21 21:32:18 -0800647}
648
Gerald Schaefer28093f92016-04-28 16:18:35 -0700649#ifdef CONFIG_TRANSPARENT_HUGEPAGE
650struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
651 pmd_t pmd)
652{
653 unsigned long pfn = pmd_pfn(pmd);
654
655 /*
656 * There is no pmd_special() but there may be special pmds, e.g.
657 * in a direct-access (dax) mapping, so let's just replicate the
Laurent Dufour00b3a332018-06-07 17:06:12 -0700658 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
Gerald Schaefer28093f92016-04-28 16:18:35 -0700659 */
660 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
661 if (vma->vm_flags & VM_MIXEDMAP) {
662 if (!pfn_valid(pfn))
663 return NULL;
664 goto out;
665 } else {
666 unsigned long off;
667 off = (addr - vma->vm_start) >> PAGE_SHIFT;
668 if (pfn == vma->vm_pgoff + off)
669 return NULL;
670 if (!is_cow_mapping(vma->vm_flags))
671 return NULL;
672 }
673 }
674
Dave Jiange1fb4a02018-08-17 15:43:40 -0700675 if (pmd_devmap(pmd))
676 return NULL;
Yu Zhao3cde2872019-12-04 16:49:56 -0800677 if (is_huge_zero_pmd(pmd))
Gerald Schaefer28093f92016-04-28 16:18:35 -0700678 return NULL;
679 if (unlikely(pfn > highest_memmap_pfn))
680 return NULL;
681
682 /*
683 * NOTE! We still have PageReserved() pages in the page tables.
684 * eg. VDSO mappings can cause them to exist.
685 */
686out:
687 return pfn_to_page(pfn);
688}
689#endif
690
Hugh Dickinsee498ed2005-11-21 21:32:18 -0800691/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 * copy one vm_area from one task to the other. Assumes the page tables
693 * already present in the new task to be cleared in the whole range
694 * covered by this vma.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 */
696
Linus Torvaldsdf3a57d2020-09-23 09:56:59 -0700697static unsigned long
698copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
699 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
700 unsigned long addr, int *rss)
701{
702 unsigned long vm_flags = vma->vm_flags;
703 pte_t pte = *src_pte;
704 struct page *page;
705 swp_entry_t entry = pte_to_swp_entry(pte);
706
707 if (likely(!non_swap_entry(entry))) {
708 if (swap_duplicate(entry) < 0)
709 return entry.val;
710
711 /* make sure dst_mm is on swapoff's mmlist. */
712 if (unlikely(list_empty(&dst_mm->mmlist))) {
713 spin_lock(&mmlist_lock);
714 if (list_empty(&dst_mm->mmlist))
715 list_add(&dst_mm->mmlist,
716 &src_mm->mmlist);
717 spin_unlock(&mmlist_lock);
718 }
719 rss[MM_SWAPENTS]++;
720 } else if (is_migration_entry(entry)) {
721 page = migration_entry_to_page(entry);
722
723 rss[mm_counter(page)]++;
724
725 if (is_write_migration_entry(entry) &&
726 is_cow_mapping(vm_flags)) {
727 /*
728 * COW mappings require pages in both
729 * parent and child to be set to read.
730 */
731 make_migration_entry_read(&entry);
732 pte = swp_entry_to_pte(entry);
733 if (pte_swp_soft_dirty(*src_pte))
734 pte = pte_swp_mksoft_dirty(pte);
735 if (pte_swp_uffd_wp(*src_pte))
736 pte = pte_swp_mkuffd_wp(pte);
737 set_pte_at(src_mm, addr, src_pte, pte);
738 }
739 } else if (is_device_private_entry(entry)) {
740 page = device_private_entry_to_page(entry);
741
742 /*
743 * Update rss count even for unaddressable pages, as
744 * they should treated just like normal pages in this
745 * respect.
746 *
747 * We will likely want to have some new rss counters
748 * for unaddressable pages, at some point. But for now
749 * keep things as they are.
750 */
751 get_page(page);
752 rss[mm_counter(page)]++;
753 page_dup_rmap(page, false);
754
755 /*
756 * We do not preserve soft-dirty information, because so
757 * far, checkpoint/restore is the only feature that
758 * requires that. And checkpoint/restore does not work
759 * when a device driver is involved (you cannot easily
760 * save and restore device driver state).
761 */
762 if (is_write_device_private_entry(entry) &&
763 is_cow_mapping(vm_flags)) {
764 make_device_private_entry_read(&entry);
765 pte = swp_entry_to_pte(entry);
766 if (pte_swp_uffd_wp(*src_pte))
767 pte = pte_swp_mkuffd_wp(pte);
768 set_pte_at(src_mm, addr, src_pte, pte);
769 }
770 }
771 set_pte_at(dst_mm, addr, dst_pte, pte);
772 return 0;
773}
774
Peter Xu70e806e2020-09-25 18:25:59 -0400775/*
776 * Copy a present and normal page if necessary.
777 *
778 * NOTE! The usual case is that this doesn't need to do
779 * anything, and can just return a positive value. That
780 * will let the caller know that it can just increase
781 * the page refcount and re-use the pte the traditional
782 * way.
783 *
784 * But _if_ we need to copy it because it needs to be
785 * pinned in the parent (and the child should get its own
786 * copy rather than just a reference to the same page),
787 * we'll do that here and return zero to let the caller
788 * know we're done.
789 *
790 * And if we need a pre-allocated page but don't yet have
791 * one, return a negative error to let the preallocation
792 * code know so that it can do so outside the page table
793 * lock.
794 */
795static inline int
Peter Xuc78f4632020-10-13 16:54:21 -0700796copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
797 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
798 struct page **prealloc, pte_t pte, struct page *page)
Peter Xu70e806e2020-09-25 18:25:59 -0400799{
Peter Xuc78f4632020-10-13 16:54:21 -0700800 struct mm_struct *src_mm = src_vma->vm_mm;
Peter Xu70e806e2020-09-25 18:25:59 -0400801 struct page *new_page;
802
Peter Xuc78f4632020-10-13 16:54:21 -0700803 if (!is_cow_mapping(src_vma->vm_flags))
Peter Xu70e806e2020-09-25 18:25:59 -0400804 return 1;
805
806 /*
Peter Xu70e806e2020-09-25 18:25:59 -0400807 * What we want to do is to check whether this page may
808 * have been pinned by the parent process. If so,
809 * instead of wrprotect the pte on both sides, we copy
810 * the page immediately so that we'll always guarantee
811 * the pinned page won't be randomly replaced in the
812 * future.
813 *
Linus Torvaldsf3c64ed2020-09-28 12:50:03 -0700814 * The page pinning checks are just "has this mm ever
815 * seen pinning", along with the (inexact) check of
816 * the page count. That might give false positives for
817 * for pinning, but it will work correctly.
Peter Xu70e806e2020-09-25 18:25:59 -0400818 */
819 if (likely(!atomic_read(&src_mm->has_pinned)))
820 return 1;
821 if (likely(!page_maybe_dma_pinned(page)))
822 return 1;
823
Peter Xu70e806e2020-09-25 18:25:59 -0400824 new_page = *prealloc;
825 if (!new_page)
826 return -EAGAIN;
827
828 /*
829 * We have a prealloc page, all good! Take it
830 * over and copy the page & arm it.
831 */
832 *prealloc = NULL;
Peter Xuc78f4632020-10-13 16:54:21 -0700833 copy_user_highpage(new_page, page, addr, src_vma);
Peter Xu70e806e2020-09-25 18:25:59 -0400834 __SetPageUptodate(new_page);
Peter Xuc78f4632020-10-13 16:54:21 -0700835 page_add_new_anon_rmap(new_page, dst_vma, addr, false);
836 lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
Peter Xu70e806e2020-09-25 18:25:59 -0400837 rss[mm_counter(new_page)]++;
838
839 /* All done, just insert the new page copy in the child */
Peter Xuc78f4632020-10-13 16:54:21 -0700840 pte = mk_pte(new_page, dst_vma->vm_page_prot);
841 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
842 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
Peter Xu70e806e2020-09-25 18:25:59 -0400843 return 0;
844}
845
846/*
847 * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page
848 * is required to copy this pte.
849 */
850static inline int
Peter Xuc78f4632020-10-13 16:54:21 -0700851copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
852 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
853 struct page **prealloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854{
Peter Xuc78f4632020-10-13 16:54:21 -0700855 struct mm_struct *src_mm = src_vma->vm_mm;
856 unsigned long vm_flags = src_vma->vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 pte_t pte = *src_pte;
858 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
Peter Xuc78f4632020-10-13 16:54:21 -0700860 page = vm_normal_page(src_vma, addr, pte);
Peter Xu70e806e2020-09-25 18:25:59 -0400861 if (page) {
862 int retval;
863
Peter Xuc78f4632020-10-13 16:54:21 -0700864 retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
865 addr, rss, prealloc, pte, page);
Peter Xu70e806e2020-09-25 18:25:59 -0400866 if (retval <= 0)
867 return retval;
868
869 get_page(page);
870 page_dup_rmap(page, false);
871 rss[mm_counter(page)]++;
872 }
873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 /*
875 * If it's a COW mapping, write protect it both
876 * in the parent and the child
877 */
Linus Torvalds1b2de5d2018-07-09 13:19:49 -0700878 if (is_cow_mapping(vm_flags) && pte_write(pte)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 ptep_set_wrprotect(src_mm, addr, src_pte);
Zachary Amsden3dc90792006-09-30 23:29:30 -0700880 pte = pte_wrprotect(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 }
882
883 /*
884 * If it's a shared mapping, mark it clean in
885 * the child
886 */
887 if (vm_flags & VM_SHARED)
888 pte = pte_mkclean(pte);
889 pte = pte_mkold(pte);
Linus Torvalds6aab3412005-11-28 14:34:23 -0800890
Peter Xub569a172020-04-06 20:05:53 -0700891 /*
892 * Make sure the _PAGE_UFFD_WP bit is cleared if the new VMA
893 * does not have the VM_UFFD_WP, which means that the uffd
894 * fork event is not enabled.
895 */
896 if (!(vm_flags & VM_UFFD_WP))
897 pte = pte_clear_uffd_wp(pte);
898
Peter Xuc78f4632020-10-13 16:54:21 -0700899 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
Peter Xu70e806e2020-09-25 18:25:59 -0400900 return 0;
901}
902
903static inline struct page *
904page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
905 unsigned long addr)
906{
907 struct page *new_page;
908
909 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
910 if (!new_page)
911 return NULL;
912
913 if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) {
914 put_page(new_page);
915 return NULL;
916 }
917 cgroup_throttle_swaprate(new_page, GFP_KERNEL);
918
919 return new_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920}
921
Peter Xuc78f4632020-10-13 16:54:21 -0700922static int
923copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
924 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
925 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926{
Peter Xuc78f4632020-10-13 16:54:21 -0700927 struct mm_struct *dst_mm = dst_vma->vm_mm;
928 struct mm_struct *src_mm = src_vma->vm_mm;
Daisuke Nishimurac36987e2009-10-26 16:50:23 -0700929 pte_t *orig_src_pte, *orig_dst_pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 pte_t *src_pte, *dst_pte;
Hugh Dickinsc74df322005-10-29 18:16:23 -0700931 spinlock_t *src_ptl, *dst_ptl;
Peter Xu70e806e2020-09-25 18:25:59 -0400932 int progress, ret = 0;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800933 int rss[NR_MM_COUNTERS];
Hugh Dickins570a335b2009-12-14 17:58:46 -0800934 swp_entry_t entry = (swp_entry_t){0};
Peter Xu70e806e2020-09-25 18:25:59 -0400935 struct page *prealloc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936
937again:
Peter Xu70e806e2020-09-25 18:25:59 -0400938 progress = 0;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800939 init_rss_vec(rss);
940
Hugh Dickinsc74df322005-10-29 18:16:23 -0700941 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
Peter Xu70e806e2020-09-25 18:25:59 -0400942 if (!dst_pte) {
943 ret = -ENOMEM;
944 goto out;
945 }
Peter Zijlstraece0e2b2010-10-26 14:21:52 -0700946 src_pte = pte_offset_map(src_pmd, addr);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700947 src_ptl = pte_lockptr(src_mm, src_pmd);
Ingo Molnarf20dc5f2006-07-03 00:25:08 -0700948 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
Daisuke Nishimurac36987e2009-10-26 16:50:23 -0700949 orig_src_pte = src_pte;
950 orig_dst_pte = dst_pte;
Zachary Amsden6606c3e2006-09-30 23:29:33 -0700951 arch_enter_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 do {
954 /*
955 * We are holding two locks at this point - either of them
956 * could generate latencies in another task on another CPU.
957 */
Hugh Dickinse040f212005-10-29 18:15:53 -0700958 if (progress >= 32) {
959 progress = 0;
960 if (need_resched() ||
Nick Piggin95c354f2008-01-30 13:31:20 +0100961 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
Hugh Dickinse040f212005-10-29 18:15:53 -0700962 break;
963 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 if (pte_none(*src_pte)) {
965 progress++;
966 continue;
967 }
Linus Torvalds79a19712020-09-23 10:04:16 -0700968 if (unlikely(!pte_present(*src_pte))) {
969 entry.val = copy_nonpresent_pte(dst_mm, src_mm,
970 dst_pte, src_pte,
Peter Xuc78f4632020-10-13 16:54:21 -0700971 src_vma, addr, rss);
Linus Torvalds79a19712020-09-23 10:04:16 -0700972 if (entry.val)
973 break;
974 progress += 8;
975 continue;
976 }
Peter Xu70e806e2020-09-25 18:25:59 -0400977 /* copy_present_pte() will clear `*prealloc' if consumed */
Peter Xuc78f4632020-10-13 16:54:21 -0700978 ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
979 addr, rss, &prealloc);
Peter Xu70e806e2020-09-25 18:25:59 -0400980 /*
981 * If we need a pre-allocated page for this pte, drop the
982 * locks, allocate, and try again.
983 */
984 if (unlikely(ret == -EAGAIN))
985 break;
986 if (unlikely(prealloc)) {
987 /*
988 * pre-alloc page cannot be reused by next time so as
989 * to strictly follow mempolicy (e.g., alloc_page_vma()
990 * will allocate page according to address). This
991 * could only happen if one pinned pte changed.
992 */
993 put_page(prealloc);
994 prealloc = NULL;
995 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 progress += 8;
997 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
Zachary Amsden6606c3e2006-09-30 23:29:33 -0700999 arch_leave_lazy_mmu_mode();
Hugh Dickinsc74df322005-10-29 18:16:23 -07001000 spin_unlock(src_ptl);
Peter Zijlstraece0e2b2010-10-26 14:21:52 -07001001 pte_unmap(orig_src_pte);
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -08001002 add_mm_rss_vec(dst_mm, rss);
Daisuke Nishimurac36987e2009-10-26 16:50:23 -07001003 pte_unmap_unlock(orig_dst_pte, dst_ptl);
Hugh Dickinsc74df322005-10-29 18:16:23 -07001004 cond_resched();
Hugh Dickins570a335b2009-12-14 17:58:46 -08001005
1006 if (entry.val) {
Peter Xu70e806e2020-09-25 18:25:59 -04001007 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1008 ret = -ENOMEM;
1009 goto out;
1010 }
1011 entry.val = 0;
1012 } else if (ret) {
1013 WARN_ON_ONCE(ret != -EAGAIN);
Peter Xuc78f4632020-10-13 16:54:21 -07001014 prealloc = page_copy_prealloc(src_mm, src_vma, addr);
Peter Xu70e806e2020-09-25 18:25:59 -04001015 if (!prealloc)
Hugh Dickins570a335b2009-12-14 17:58:46 -08001016 return -ENOMEM;
Peter Xu70e806e2020-09-25 18:25:59 -04001017 /* We've captured and resolved the error. Reset, try again. */
1018 ret = 0;
Hugh Dickins570a335b2009-12-14 17:58:46 -08001019 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 if (addr != end)
1021 goto again;
Peter Xu70e806e2020-09-25 18:25:59 -04001022out:
1023 if (unlikely(prealloc))
1024 put_page(prealloc);
1025 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026}
1027
Peter Xuc78f4632020-10-13 16:54:21 -07001028static inline int
1029copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1030 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1031 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032{
Peter Xuc78f4632020-10-13 16:54:21 -07001033 struct mm_struct *dst_mm = dst_vma->vm_mm;
1034 struct mm_struct *src_mm = src_vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 pmd_t *src_pmd, *dst_pmd;
1036 unsigned long next;
1037
1038 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1039 if (!dst_pmd)
1040 return -ENOMEM;
1041 src_pmd = pmd_offset(src_pud, addr);
1042 do {
1043 next = pmd_addr_end(addr, end);
Zi Yan84c3fc42017-09-08 16:11:01 -07001044 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1045 || pmd_devmap(*src_pmd)) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001046 int err;
Peter Xuc78f4632020-10-13 16:54:21 -07001047 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001048 err = copy_huge_pmd(dst_mm, src_mm,
Peter Xuc78f4632020-10-13 16:54:21 -07001049 dst_pmd, src_pmd, addr, src_vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001050 if (err == -ENOMEM)
1051 return -ENOMEM;
1052 if (!err)
1053 continue;
1054 /* fall through */
1055 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 if (pmd_none_or_clear_bad(src_pmd))
1057 continue;
Peter Xuc78f4632020-10-13 16:54:21 -07001058 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1059 addr, next))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 return -ENOMEM;
1061 } while (dst_pmd++, src_pmd++, addr = next, addr != end);
1062 return 0;
1063}
1064
Peter Xuc78f4632020-10-13 16:54:21 -07001065static inline int
1066copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1067 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1068 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069{
Peter Xuc78f4632020-10-13 16:54:21 -07001070 struct mm_struct *dst_mm = dst_vma->vm_mm;
1071 struct mm_struct *src_mm = src_vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 pud_t *src_pud, *dst_pud;
1073 unsigned long next;
1074
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001075 dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 if (!dst_pud)
1077 return -ENOMEM;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001078 src_pud = pud_offset(src_p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 do {
1080 next = pud_addr_end(addr, end);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001081 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1082 int err;
1083
Peter Xuc78f4632020-10-13 16:54:21 -07001084 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001085 err = copy_huge_pud(dst_mm, src_mm,
Peter Xuc78f4632020-10-13 16:54:21 -07001086 dst_pud, src_pud, addr, src_vma);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001087 if (err == -ENOMEM)
1088 return -ENOMEM;
1089 if (!err)
1090 continue;
1091 /* fall through */
1092 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 if (pud_none_or_clear_bad(src_pud))
1094 continue;
Peter Xuc78f4632020-10-13 16:54:21 -07001095 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1096 addr, next))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 return -ENOMEM;
1098 } while (dst_pud++, src_pud++, addr = next, addr != end);
1099 return 0;
1100}
1101
Peter Xuc78f4632020-10-13 16:54:21 -07001102static inline int
1103copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1104 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1105 unsigned long end)
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001106{
Peter Xuc78f4632020-10-13 16:54:21 -07001107 struct mm_struct *dst_mm = dst_vma->vm_mm;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001108 p4d_t *src_p4d, *dst_p4d;
1109 unsigned long next;
1110
1111 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1112 if (!dst_p4d)
1113 return -ENOMEM;
1114 src_p4d = p4d_offset(src_pgd, addr);
1115 do {
1116 next = p4d_addr_end(addr, end);
1117 if (p4d_none_or_clear_bad(src_p4d))
1118 continue;
Peter Xuc78f4632020-10-13 16:54:21 -07001119 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1120 addr, next))
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001121 return -ENOMEM;
1122 } while (dst_p4d++, src_p4d++, addr = next, addr != end);
1123 return 0;
1124}
1125
Peter Xuc78f4632020-10-13 16:54:21 -07001126int
1127copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128{
1129 pgd_t *src_pgd, *dst_pgd;
1130 unsigned long next;
Peter Xuc78f4632020-10-13 16:54:21 -07001131 unsigned long addr = src_vma->vm_start;
1132 unsigned long end = src_vma->vm_end;
1133 struct mm_struct *dst_mm = dst_vma->vm_mm;
1134 struct mm_struct *src_mm = src_vma->vm_mm;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001135 struct mmu_notifier_range range;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001136 bool is_cow;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001137 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
Nick Piggind9928952005-08-28 16:49:11 +10001139 /*
1140 * Don't copy ptes where a page fault will fill them correctly.
1141 * Fork becomes much lighter when there are big shared or private
1142 * readonly mappings. The tradeoff is that copy_page_range is more
1143 * efficient than faulting.
1144 */
Peter Xuc78f4632020-10-13 16:54:21 -07001145 if (!(src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
1146 !src_vma->anon_vma)
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08001147 return 0;
Nick Piggind9928952005-08-28 16:49:11 +10001148
Peter Xuc78f4632020-10-13 16:54:21 -07001149 if (is_vm_hugetlb_page(src_vma))
1150 return copy_hugetlb_page_range(dst_mm, src_mm, src_vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
Peter Xuc78f4632020-10-13 16:54:21 -07001152 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
venkatesh.pallipadi@intel.com2ab64032008-12-18 11:41:29 -08001153 /*
1154 * We do not free on error cases below as remove_vma
1155 * gets called on error from higher level routine
1156 */
Peter Xuc78f4632020-10-13 16:54:21 -07001157 ret = track_pfn_copy(src_vma);
venkatesh.pallipadi@intel.com2ab64032008-12-18 11:41:29 -08001158 if (ret)
1159 return ret;
1160 }
1161
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001162 /*
1163 * We need to invalidate the secondary MMU mappings only when
1164 * there could be a permission downgrade on the ptes of the
1165 * parent mm. And a permission downgrade will only happen if
1166 * is_cow_mapping() returns true.
1167 */
Peter Xuc78f4632020-10-13 16:54:21 -07001168 is_cow = is_cow_mapping(src_vma->vm_flags);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001169
1170 if (is_cow) {
Jérôme Glisse7269f992019-05-13 17:20:53 -07001171 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
Peter Xuc78f4632020-10-13 16:54:21 -07001172 0, src_vma, src_mm, addr, end);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001173 mmu_notifier_invalidate_range_start(&range);
1174 }
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001175
1176 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 dst_pgd = pgd_offset(dst_mm, addr);
1178 src_pgd = pgd_offset(src_mm, addr);
1179 do {
1180 next = pgd_addr_end(addr, end);
1181 if (pgd_none_or_clear_bad(src_pgd))
1182 continue;
Peter Xuc78f4632020-10-13 16:54:21 -07001183 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1184 addr, next))) {
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001185 ret = -ENOMEM;
1186 break;
1187 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001189
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001190 if (is_cow)
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001191 mmu_notifier_invalidate_range_end(&range);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001192 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193}
1194
Robin Holt51c6f662005-11-13 16:06:42 -08001195static unsigned long zap_pte_range(struct mmu_gather *tlb,
Nick Pigginb5810032005-10-29 18:16:12 -07001196 struct vm_area_struct *vma, pmd_t *pmd,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 unsigned long addr, unsigned long end,
Peter Zijlstra97a89412011-05-24 17:12:04 -07001198 struct zap_details *details)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199{
Nick Pigginb5810032005-10-29 18:16:12 -07001200 struct mm_struct *mm = tlb->mm;
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001201 int force_flush = 0;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -08001202 int rss[NR_MM_COUNTERS];
Peter Zijlstra97a89412011-05-24 17:12:04 -07001203 spinlock_t *ptl;
Steven Rostedt5f1a1902011-06-15 15:08:23 -07001204 pte_t *start_pte;
Peter Zijlstra97a89412011-05-24 17:12:04 -07001205 pte_t *pte;
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08001206 swp_entry_t entry;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -08001207
Peter Zijlstraed6a7932018-08-31 14:46:08 +02001208 tlb_change_page_size(tlb, PAGE_SIZE);
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001209again:
Peter Zijlstrae3032972011-05-24 17:12:01 -07001210 init_rss_vec(rss);
Steven Rostedt5f1a1902011-06-15 15:08:23 -07001211 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1212 pte = start_pte;
Mel Gorman3ea27712017-08-02 13:31:52 -07001213 flush_tlb_batched_pending(mm);
Zachary Amsden6606c3e2006-09-30 23:29:33 -07001214 arch_enter_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 do {
1216 pte_t ptent = *pte;
Tobin C Harding166f61b2017-02-24 14:59:01 -08001217 if (pte_none(ptent))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 continue;
Hugh Dickins6f5e6b92006-03-16 23:04:09 -08001219
Minchan Kim7b167b62019-09-24 00:02:24 +00001220 if (need_resched())
1221 break;
1222
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 if (pte_present(ptent)) {
Hugh Dickinsee498ed2005-11-21 21:32:18 -08001224 struct page *page;
Robin Holt51c6f662005-11-13 16:06:42 -08001225
Christoph Hellwig25b29952019-06-13 22:50:49 +02001226 page = vm_normal_page(vma, addr, ptent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 if (unlikely(details) && page) {
1228 /*
1229 * unmap_shared_mapping_pages() wants to
1230 * invalidate cache without truncating:
1231 * unmap shared but keep private pages.
1232 */
1233 if (details->check_mapping &&
Kirill A. Shutemov800d8c62016-07-26 15:26:18 -07001234 details->check_mapping != page_rmapping(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 }
Nick Pigginb5810032005-10-29 18:16:12 -07001237 ptent = ptep_get_and_clear_full(mm, addr, pte,
Zachary Amsdena6003882005-09-03 15:55:04 -07001238 tlb->fullmm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 tlb_remove_tlb_entry(tlb, pte, addr);
1240 if (unlikely(!page))
1241 continue;
Jerome Marchandeca56ff2016-01-14 15:19:26 -08001242
1243 if (!PageAnon(page)) {
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001244 if (pte_dirty(ptent)) {
1245 force_flush = 1;
Hugh Dickins6237bcd2005-10-29 18:15:54 -07001246 set_page_dirty(page);
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001247 }
Johannes Weiner4917e5d2009-01-06 14:39:17 -08001248 if (pte_young(ptent) &&
Joe Perches64363aa2013-07-08 16:00:18 -07001249 likely(!(vma->vm_flags & VM_SEQ_READ)))
Nick Pigginbf3f3bc2009-01-06 14:38:55 -08001250 mark_page_accessed(page);
Hugh Dickins6237bcd2005-10-29 18:15:54 -07001251 }
Jerome Marchandeca56ff2016-01-14 15:19:26 -08001252 rss[mm_counter(page)]--;
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08001253 page_remove_rmap(page, false);
Hugh Dickins3dc14742009-01-06 14:40:08 -08001254 if (unlikely(page_mapcount(page) < 0))
1255 print_bad_pte(vma, addr, ptent, page);
Aneesh Kumar K.Ve9d55e12016-07-26 15:24:09 -07001256 if (unlikely(__tlb_remove_page(tlb, page))) {
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001257 force_flush = 1;
Will Deaconce9ec372014-10-28 13:16:28 -07001258 addr += PAGE_SIZE;
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001259 break;
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001260 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 continue;
1262 }
Jérôme Glisse5042db42017-09-08 16:11:43 -07001263
1264 entry = pte_to_swp_entry(ptent);
Ralph Campbell463b7a12020-08-06 23:22:21 -07001265 if (is_device_private_entry(entry)) {
Jérôme Glisse5042db42017-09-08 16:11:43 -07001266 struct page *page = device_private_entry_to_page(entry);
1267
1268 if (unlikely(details && details->check_mapping)) {
1269 /*
1270 * unmap_shared_mapping_pages() wants to
1271 * invalidate cache without truncating:
1272 * unmap shared but keep private pages.
1273 */
1274 if (details->check_mapping !=
1275 page_rmapping(page))
1276 continue;
1277 }
1278
1279 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1280 rss[mm_counter(page)]--;
1281 page_remove_rmap(page, false);
1282 put_page(page);
1283 continue;
1284 }
1285
Kirill A. Shutemov3e8715f2017-02-22 15:46:34 -08001286 /* If details->check_mapping, we leave swap entries. */
1287 if (unlikely(details))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 continue;
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -08001289
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08001290 if (!non_swap_entry(entry))
1291 rss[MM_SWAPENTS]--;
1292 else if (is_migration_entry(entry)) {
1293 struct page *page;
Konstantin Khlebnikov9f9f1ac2012-01-20 14:34:24 -08001294
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08001295 page = migration_entry_to_page(entry);
Jerome Marchandeca56ff2016-01-14 15:19:26 -08001296 rss[mm_counter(page)]--;
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -08001297 }
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08001298 if (unlikely(!free_swap_and_cache(entry)))
1299 print_bad_pte(vma, addr, ptent, NULL);
Zachary Amsden9888a1c2006-09-30 23:29:31 -07001300 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
Peter Zijlstra97a89412011-05-24 17:12:04 -07001301 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickinsae859762005-10-29 18:16:05 -07001302
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -08001303 add_mm_rss_vec(mm, rss);
Zachary Amsden6606c3e2006-09-30 23:29:33 -07001304 arch_leave_lazy_mmu_mode();
Robin Holt51c6f662005-11-13 16:06:42 -08001305
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001306 /* Do the actual TLB flush before dropping ptl */
Will Deaconfb7332a2014-10-29 10:03:09 +00001307 if (force_flush)
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001308 tlb_flush_mmu_tlbonly(tlb);
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001309 pte_unmap_unlock(start_pte, ptl);
1310
1311 /*
1312 * If we forced a TLB flush (either due to running out of
1313 * batch buffers or because we needed to flush dirty TLB
1314 * entries before releasing the ptl), free the batched
1315 * memory too. Restart if we didn't do everything.
1316 */
1317 if (force_flush) {
1318 force_flush = 0;
Peter Zijlstrafa0aafb2018-09-20 10:54:04 +02001319 tlb_flush_mmu(tlb);
Minchan Kim7b167b62019-09-24 00:02:24 +00001320 }
1321
1322 if (addr != end) {
1323 cond_resched();
1324 goto again;
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001325 }
1326
Robin Holt51c6f662005-11-13 16:06:42 -08001327 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328}
1329
Robin Holt51c6f662005-11-13 16:06:42 -08001330static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
Nick Pigginb5810032005-10-29 18:16:12 -07001331 struct vm_area_struct *vma, pud_t *pud,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 unsigned long addr, unsigned long end,
Peter Zijlstra97a89412011-05-24 17:12:04 -07001333 struct zap_details *details)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334{
1335 pmd_t *pmd;
1336 unsigned long next;
1337
1338 pmd = pmd_offset(pud, addr);
1339 do {
1340 next = pmd_addr_end(addr, end);
Zi Yan84c3fc42017-09-08 16:11:01 -07001341 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
Hugh Dickins53406ed2018-08-01 11:31:52 -07001342 if (next - addr != HPAGE_PMD_SIZE)
David Rientjesfd607752016-12-12 16:42:20 -08001343 __split_huge_pmd(vma, pmd, addr, false, NULL);
Hugh Dickins53406ed2018-08-01 11:31:52 -07001344 else if (zap_huge_pmd(tlb, vma, pmd, addr))
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001345 goto next;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001346 /* fall through */
1347 }
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001348 /*
1349 * Here there can be other concurrent MADV_DONTNEED or
1350 * trans huge page faults running, and if the pmd is
1351 * none or trans huge it can change under us. This is
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001352 * because MADV_DONTNEED holds the mmap_lock in read
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001353 * mode.
1354 */
1355 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1356 goto next;
Peter Zijlstra97a89412011-05-24 17:12:04 -07001357 next = zap_pte_range(tlb, vma, pmd, addr, next, details);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001358next:
Peter Zijlstra97a89412011-05-24 17:12:04 -07001359 cond_resched();
1360 } while (pmd++, addr = next, addr != end);
Robin Holt51c6f662005-11-13 16:06:42 -08001361
1362 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363}
1364
Robin Holt51c6f662005-11-13 16:06:42 -08001365static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001366 struct vm_area_struct *vma, p4d_t *p4d,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 unsigned long addr, unsigned long end,
Peter Zijlstra97a89412011-05-24 17:12:04 -07001368 struct zap_details *details)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369{
1370 pud_t *pud;
1371 unsigned long next;
1372
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001373 pud = pud_offset(p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 do {
1375 next = pud_addr_end(addr, end);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001376 if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1377 if (next - addr != HPAGE_PUD_SIZE) {
Michel Lespinasse42fc5412020-06-08 21:33:44 -07001378 mmap_assert_locked(tlb->mm);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001379 split_huge_pud(vma, pud, addr);
1380 } else if (zap_huge_pud(tlb, vma, pud, addr))
1381 goto next;
1382 /* fall through */
1383 }
Peter Zijlstra97a89412011-05-24 17:12:04 -07001384 if (pud_none_or_clear_bad(pud))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 continue;
Peter Zijlstra97a89412011-05-24 17:12:04 -07001386 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001387next:
1388 cond_resched();
Peter Zijlstra97a89412011-05-24 17:12:04 -07001389 } while (pud++, addr = next, addr != end);
Robin Holt51c6f662005-11-13 16:06:42 -08001390
1391 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392}
1393
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001394static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1395 struct vm_area_struct *vma, pgd_t *pgd,
1396 unsigned long addr, unsigned long end,
1397 struct zap_details *details)
1398{
1399 p4d_t *p4d;
1400 unsigned long next;
1401
1402 p4d = p4d_offset(pgd, addr);
1403 do {
1404 next = p4d_addr_end(addr, end);
1405 if (p4d_none_or_clear_bad(p4d))
1406 continue;
1407 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1408 } while (p4d++, addr = next, addr != end);
1409
1410 return addr;
1411}
1412
Michal Hockoaac45362016-03-25 14:20:24 -07001413void unmap_page_range(struct mmu_gather *tlb,
Al Viro038c7aa2012-03-05 13:25:09 -05001414 struct vm_area_struct *vma,
1415 unsigned long addr, unsigned long end,
1416 struct zap_details *details)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417{
1418 pgd_t *pgd;
1419 unsigned long next;
1420
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 BUG_ON(addr >= end);
1422 tlb_start_vma(tlb, vma);
1423 pgd = pgd_offset(vma->vm_mm, addr);
1424 do {
1425 next = pgd_addr_end(addr, end);
Peter Zijlstra97a89412011-05-24 17:12:04 -07001426 if (pgd_none_or_clear_bad(pgd))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 continue;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001428 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
Peter Zijlstra97a89412011-05-24 17:12:04 -07001429 } while (pgd++, addr = next, addr != end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 tlb_end_vma(tlb, vma);
1431}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
Al Virof5cc4ee2012-03-05 14:14:20 -05001433
1434static void unmap_single_vma(struct mmu_gather *tlb,
1435 struct vm_area_struct *vma, unsigned long start_addr,
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07001436 unsigned long end_addr,
Al Virof5cc4ee2012-03-05 14:14:20 -05001437 struct zap_details *details)
1438{
1439 unsigned long start = max(vma->vm_start, start_addr);
1440 unsigned long end;
1441
1442 if (start >= vma->vm_end)
1443 return;
1444 end = min(vma->vm_end, end_addr);
1445 if (end <= vma->vm_start)
1446 return;
1447
Srikar Dronamrajucbc91f72012-04-11 16:05:27 +05301448 if (vma->vm_file)
1449 uprobe_munmap(vma, start, end);
1450
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07001451 if (unlikely(vma->vm_flags & VM_PFNMAP))
Suresh Siddha5180da42012-10-08 16:28:29 -07001452 untrack_pfn(vma, 0, 0);
Al Virof5cc4ee2012-03-05 14:14:20 -05001453
1454 if (start != end) {
1455 if (unlikely(is_vm_hugetlb_page(vma))) {
1456 /*
1457 * It is undesirable to test vma->vm_file as it
1458 * should be non-null for valid hugetlb area.
1459 * However, vm_file will be NULL in the error
Davidlohr Bueso7aa6b4a2014-04-07 15:37:01 -07001460 * cleanup path of mmap_region. When
Al Virof5cc4ee2012-03-05 14:14:20 -05001461 * hugetlbfs ->mmap method fails,
Davidlohr Bueso7aa6b4a2014-04-07 15:37:01 -07001462 * mmap_region() nullifies vma->vm_file
Al Virof5cc4ee2012-03-05 14:14:20 -05001463 * before calling this function to clean up.
1464 * Since no pte has actually been setup, it is
1465 * safe to do nothing in this case.
1466 */
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -07001467 if (vma->vm_file) {
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -08001468 i_mmap_lock_write(vma->vm_file->f_mapping);
Mel Gormand8333522012-07-31 16:46:20 -07001469 __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -08001470 i_mmap_unlock_write(vma->vm_file->f_mapping);
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -07001471 }
Al Virof5cc4ee2012-03-05 14:14:20 -05001472 } else
1473 unmap_page_range(tlb, vma, start, end, details);
1474 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475}
1476
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477/**
1478 * unmap_vmas - unmap a range of memory covered by a list of vma's
Randy Dunlap0164f692011-06-15 15:08:09 -07001479 * @tlb: address of the caller's struct mmu_gather
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 * @vma: the starting vma
1481 * @start_addr: virtual address at which to start unmapping
1482 * @end_addr: virtual address at which to end unmapping
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 *
Hugh Dickins508034a2005-10-29 18:16:30 -07001484 * Unmap all pages in the vma list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 * Only addresses between `start' and `end' will be unmapped.
1487 *
1488 * The VMA list must be sorted in ascending virtual address order.
1489 *
1490 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1491 * range after unmap_vmas() returns. So the only responsibility here is to
1492 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1493 * drops the lock and schedules.
1494 */
Al Viro6e8bb012012-03-05 13:41:15 -05001495void unmap_vmas(struct mmu_gather *tlb,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 struct vm_area_struct *vma, unsigned long start_addr,
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07001497 unsigned long end_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001499 struct mmu_notifier_range range;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001501 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1502 start_addr, end_addr);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001503 mmu_notifier_invalidate_range_start(&range);
Al Virof5cc4ee2012-03-05 14:14:20 -05001504 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07001505 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001506 mmu_notifier_invalidate_range_end(&range);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507}
1508
1509/**
1510 * zap_page_range - remove user pages in a given range
1511 * @vma: vm_area_struct holding the applicable pages
Randy Dunlapeb4546b2012-06-20 12:53:02 -07001512 * @start: starting address of pages to zap
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 * @size: number of bytes to zap
Al Virof5cc4ee2012-03-05 14:14:20 -05001514 *
1515 * Caller must protect the VMA list
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 */
Linus Torvalds7e027b12012-05-06 13:43:15 -07001517void zap_page_range(struct vm_area_struct *vma, unsigned long start,
Kirill A. Shutemovecf13852017-02-22 15:46:37 -08001518 unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001520 struct mmu_notifier_range range;
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001521 struct mmu_gather tlb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 lru_add_drain();
Jérôme Glisse7269f992019-05-13 17:20:53 -07001524 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001525 start, start + size);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001526 tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
1527 update_hiwater_rss(vma->vm_mm);
1528 mmu_notifier_invalidate_range_start(&range);
1529 for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
1530 unmap_single_vma(&tlb, vma, start, range.end, NULL);
1531 mmu_notifier_invalidate_range_end(&range);
1532 tlb_finish_mmu(&tlb, start, range.end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533}
1534
Jack Steinerc627f9c2008-07-29 22:33:53 -07001535/**
Al Virof5cc4ee2012-03-05 14:14:20 -05001536 * zap_page_range_single - remove user pages in a given range
1537 * @vma: vm_area_struct holding the applicable pages
1538 * @address: starting address of pages to zap
1539 * @size: number of bytes to zap
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08001540 * @details: details of shared cache invalidation
Al Virof5cc4ee2012-03-05 14:14:20 -05001541 *
1542 * The range must fit into one VMA.
1543 */
1544static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1545 unsigned long size, struct zap_details *details)
1546{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001547 struct mmu_notifier_range range;
Al Virof5cc4ee2012-03-05 14:14:20 -05001548 struct mmu_gather tlb;
Al Virof5cc4ee2012-03-05 14:14:20 -05001549
1550 lru_add_drain();
Jérôme Glisse7269f992019-05-13 17:20:53 -07001551 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001552 address, address + size);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001553 tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
1554 update_hiwater_rss(vma->vm_mm);
1555 mmu_notifier_invalidate_range_start(&range);
1556 unmap_single_vma(&tlb, vma, address, range.end, details);
1557 mmu_notifier_invalidate_range_end(&range);
1558 tlb_finish_mmu(&tlb, address, range.end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559}
1560
Jack Steinerc627f9c2008-07-29 22:33:53 -07001561/**
1562 * zap_vma_ptes - remove ptes mapping the vma
1563 * @vma: vm_area_struct holding ptes to be zapped
1564 * @address: starting address of pages to zap
1565 * @size: number of bytes to zap
1566 *
1567 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1568 *
1569 * The entire address range must be fully contained within the vma.
1570 *
Jack Steinerc627f9c2008-07-29 22:33:53 -07001571 */
Leon Romanovsky27d036e2018-05-29 15:14:07 +03001572void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
Jack Steinerc627f9c2008-07-29 22:33:53 -07001573 unsigned long size)
1574{
1575 if (address < vma->vm_start || address + size > vma->vm_end ||
1576 !(vma->vm_flags & VM_PFNMAP))
Leon Romanovsky27d036e2018-05-29 15:14:07 +03001577 return;
1578
Al Virof5cc4ee2012-03-05 14:14:20 -05001579 zap_page_range_single(vma, address, size, NULL);
Jack Steinerc627f9c2008-07-29 22:33:53 -07001580}
1581EXPORT_SYMBOL_GPL(zap_vma_ptes);
1582
Arjun Roy8cd39842020-04-10 14:33:01 -07001583static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
Linus Torvaldsc9cfcdd2005-11-29 14:03:14 -08001584{
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001585 pgd_t *pgd;
1586 p4d_t *p4d;
1587 pud_t *pud;
1588 pmd_t *pmd;
1589
1590 pgd = pgd_offset(mm, addr);
1591 p4d = p4d_alloc(mm, pgd, addr);
1592 if (!p4d)
1593 return NULL;
1594 pud = pud_alloc(mm, p4d, addr);
1595 if (!pud)
1596 return NULL;
1597 pmd = pmd_alloc(mm, pud, addr);
1598 if (!pmd)
1599 return NULL;
1600
1601 VM_BUG_ON(pmd_trans_huge(*pmd));
Arjun Roy8cd39842020-04-10 14:33:01 -07001602 return pmd;
1603}
1604
1605pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1606 spinlock_t **ptl)
1607{
1608 pmd_t *pmd = walk_to_pmd(mm, addr);
1609
1610 if (!pmd)
1611 return NULL;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001612 return pte_alloc_map_lock(mm, pmd, addr, ptl);
Linus Torvaldsc9cfcdd2005-11-29 14:03:14 -08001613}
1614
Arjun Roy8efd6f5b2020-04-10 14:32:51 -07001615static int validate_page_before_insert(struct page *page)
1616{
1617 if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1618 return -EINVAL;
1619 flush_dcache_page(page);
1620 return 0;
1621}
1622
1623static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte,
1624 unsigned long addr, struct page *page, pgprot_t prot)
1625{
1626 if (!pte_none(*pte))
1627 return -EBUSY;
1628 /* Ok, finally just insert the thing.. */
1629 get_page(page);
1630 inc_mm_counter_fast(mm, mm_counter_file(page));
1631 page_add_file_rmap(page, false);
1632 set_pte_at(mm, addr, pte, mk_pte(page, prot));
1633 return 0;
1634}
1635
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636/*
Linus Torvalds238f58d2005-11-29 13:01:56 -08001637 * This is the old fallback for page remapping.
1638 *
1639 * For historical reasons, it only allows reserved pages. Only
1640 * old drivers should use this, and they needed to mark their
1641 * pages reserved for the old functions anyway.
1642 */
Nick Piggin423bad602008-04-28 02:13:01 -07001643static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1644 struct page *page, pgprot_t prot)
Linus Torvalds238f58d2005-11-29 13:01:56 -08001645{
Nick Piggin423bad602008-04-28 02:13:01 -07001646 struct mm_struct *mm = vma->vm_mm;
Linus Torvalds238f58d2005-11-29 13:01:56 -08001647 int retval;
Linus Torvaldsc9cfcdd2005-11-29 14:03:14 -08001648 pte_t *pte;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08001649 spinlock_t *ptl;
1650
Arjun Roy8efd6f5b2020-04-10 14:32:51 -07001651 retval = validate_page_before_insert(page);
1652 if (retval)
KAMEZAWA Hiroyuki5b4e6552008-10-18 20:28:10 -07001653 goto out;
Linus Torvalds238f58d2005-11-29 13:01:56 -08001654 retval = -ENOMEM;
Linus Torvaldsc9cfcdd2005-11-29 14:03:14 -08001655 pte = get_locked_pte(mm, addr, &ptl);
Linus Torvalds238f58d2005-11-29 13:01:56 -08001656 if (!pte)
KAMEZAWA Hiroyuki5b4e6552008-10-18 20:28:10 -07001657 goto out;
Arjun Roy8efd6f5b2020-04-10 14:32:51 -07001658 retval = insert_page_into_pte_locked(mm, pte, addr, page, prot);
Linus Torvalds238f58d2005-11-29 13:01:56 -08001659 pte_unmap_unlock(pte, ptl);
1660out:
1661 return retval;
1662}
1663
Arjun Roy8cd39842020-04-10 14:33:01 -07001664#ifdef pte_index
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001665static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
Arjun Roy8cd39842020-04-10 14:33:01 -07001666 unsigned long addr, struct page *page, pgprot_t prot)
1667{
1668 int err;
1669
1670 if (!page_count(page))
1671 return -EINVAL;
1672 err = validate_page_before_insert(page);
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001673 if (err)
1674 return err;
1675 return insert_page_into_pte_locked(mm, pte, addr, page, prot);
Arjun Roy8cd39842020-04-10 14:33:01 -07001676}
1677
1678/* insert_pages() amortizes the cost of spinlock operations
1679 * when inserting pages in a loop. Arch *must* define pte_index.
1680 */
1681static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1682 struct page **pages, unsigned long *num, pgprot_t prot)
1683{
1684 pmd_t *pmd = NULL;
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001685 pte_t *start_pte, *pte;
1686 spinlock_t *pte_lock;
Arjun Roy8cd39842020-04-10 14:33:01 -07001687 struct mm_struct *const mm = vma->vm_mm;
1688 unsigned long curr_page_idx = 0;
1689 unsigned long remaining_pages_total = *num;
1690 unsigned long pages_to_write_in_pmd;
1691 int ret;
1692more:
1693 ret = -EFAULT;
1694 pmd = walk_to_pmd(mm, addr);
1695 if (!pmd)
1696 goto out;
1697
1698 pages_to_write_in_pmd = min_t(unsigned long,
1699 remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1700
1701 /* Allocate the PTE if necessary; takes PMD lock once only. */
1702 ret = -ENOMEM;
1703 if (pte_alloc(mm, pmd))
1704 goto out;
Arjun Roy8cd39842020-04-10 14:33:01 -07001705
1706 while (pages_to_write_in_pmd) {
1707 int pte_idx = 0;
1708 const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1709
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001710 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
1711 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
1712 int err = insert_page_in_batch_locked(mm, pte,
Arjun Roy8cd39842020-04-10 14:33:01 -07001713 addr, pages[curr_page_idx], prot);
1714 if (unlikely(err)) {
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001715 pte_unmap_unlock(start_pte, pte_lock);
Arjun Roy8cd39842020-04-10 14:33:01 -07001716 ret = err;
1717 remaining_pages_total -= pte_idx;
1718 goto out;
1719 }
1720 addr += PAGE_SIZE;
1721 ++curr_page_idx;
1722 }
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001723 pte_unmap_unlock(start_pte, pte_lock);
Arjun Roy8cd39842020-04-10 14:33:01 -07001724 pages_to_write_in_pmd -= batch_size;
1725 remaining_pages_total -= batch_size;
1726 }
1727 if (remaining_pages_total)
1728 goto more;
1729 ret = 0;
1730out:
1731 *num = remaining_pages_total;
1732 return ret;
1733}
1734#endif /* ifdef pte_index */
1735
1736/**
1737 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1738 * @vma: user vma to map to
1739 * @addr: target start user address of these pages
1740 * @pages: source kernel pages
1741 * @num: in: number of pages to map. out: number of pages that were *not*
1742 * mapped. (0 means all pages were successfully mapped).
1743 *
1744 * Preferred over vm_insert_page() when inserting multiple pages.
1745 *
1746 * In case of error, we may have mapped a subset of the provided
1747 * pages. It is the caller's responsibility to account for this case.
1748 *
1749 * The same restrictions apply as in vm_insert_page().
1750 */
1751int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1752 struct page **pages, unsigned long *num)
1753{
1754#ifdef pte_index
1755 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1756
1757 if (addr < vma->vm_start || end_addr >= vma->vm_end)
1758 return -EFAULT;
1759 if (!(vma->vm_flags & VM_MIXEDMAP)) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001760 BUG_ON(mmap_read_trylock(vma->vm_mm));
Arjun Roy8cd39842020-04-10 14:33:01 -07001761 BUG_ON(vma->vm_flags & VM_PFNMAP);
1762 vma->vm_flags |= VM_MIXEDMAP;
1763 }
1764 /* Defer page refcount checking till we're about to map that page. */
1765 return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1766#else
1767 unsigned long idx = 0, pgcount = *num;
Tom Rix45779b02020-07-23 21:15:18 -07001768 int err = -EINVAL;
Arjun Roy8cd39842020-04-10 14:33:01 -07001769
1770 for (; idx < pgcount; ++idx) {
1771 err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1772 if (err)
1773 break;
1774 }
1775 *num = pgcount - idx;
1776 return err;
1777#endif /* ifdef pte_index */
1778}
1779EXPORT_SYMBOL(vm_insert_pages);
1780
Rolf Eike Beerbfa5bf62006-09-25 23:31:22 -07001781/**
1782 * vm_insert_page - insert single page into user vma
1783 * @vma: user vma to map to
1784 * @addr: target user address of this page
1785 * @page: source kernel page
1786 *
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001787 * This allows drivers to insert individual pages they've allocated
1788 * into a user vma.
1789 *
1790 * The page has to be a nice clean _individual_ kernel allocation.
1791 * If you allocate a compound page, you need to have marked it as
1792 * such (__GFP_COMP), or manually just split the page up yourself
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08001793 * (see split_page()).
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001794 *
1795 * NOTE! Traditionally this was done with "remap_pfn_range()" which
1796 * took an arbitrary page protection parameter. This doesn't allow
1797 * that. Your vma protection will have to be set up correctly, which
1798 * means that if you want a shared writable mapping, you'd better
1799 * ask for a shared writable mapping!
1800 *
1801 * The page does not need to be reserved.
Konstantin Khlebnikov4b6e1e32012-10-08 16:28:40 -07001802 *
1803 * Usually this function is called from f_op->mmap() handler
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001804 * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
Konstantin Khlebnikov4b6e1e32012-10-08 16:28:40 -07001805 * Caller must set VM_MIXEDMAP on vma if it wants to call this
1806 * function from other places, for example from page-fault handler.
Mike Rapoporta862f682019-03-05 15:48:42 -08001807 *
1808 * Return: %0 on success, negative error code otherwise.
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001809 */
Nick Piggin423bad602008-04-28 02:13:01 -07001810int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1811 struct page *page)
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001812{
1813 if (addr < vma->vm_start || addr >= vma->vm_end)
1814 return -EFAULT;
1815 if (!page_count(page))
1816 return -EINVAL;
Konstantin Khlebnikov4b6e1e32012-10-08 16:28:40 -07001817 if (!(vma->vm_flags & VM_MIXEDMAP)) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001818 BUG_ON(mmap_read_trylock(vma->vm_mm));
Konstantin Khlebnikov4b6e1e32012-10-08 16:28:40 -07001819 BUG_ON(vma->vm_flags & VM_PFNMAP);
1820 vma->vm_flags |= VM_MIXEDMAP;
1821 }
Nick Piggin423bad602008-04-28 02:13:01 -07001822 return insert_page(vma, addr, page, vma->vm_page_prot);
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001823}
Linus Torvaldse3c33742005-12-03 20:48:11 -08001824EXPORT_SYMBOL(vm_insert_page);
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001825
Souptick Joardera667d742019-05-13 17:21:56 -07001826/*
1827 * __vm_map_pages - maps range of kernel pages into user vma
1828 * @vma: user vma to map to
1829 * @pages: pointer to array of source kernel pages
1830 * @num: number of pages in page array
1831 * @offset: user's requested vm_pgoff
1832 *
1833 * This allows drivers to map range of kernel pages into a user vma.
1834 *
1835 * Return: 0 on success and error code otherwise.
1836 */
1837static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1838 unsigned long num, unsigned long offset)
1839{
1840 unsigned long count = vma_pages(vma);
1841 unsigned long uaddr = vma->vm_start;
1842 int ret, i;
1843
1844 /* Fail if the user requested offset is beyond the end of the object */
Miguel Ojeda96756fc2019-07-11 20:58:47 -07001845 if (offset >= num)
Souptick Joardera667d742019-05-13 17:21:56 -07001846 return -ENXIO;
1847
1848 /* Fail if the user requested size exceeds available object size */
1849 if (count > num - offset)
1850 return -ENXIO;
1851
1852 for (i = 0; i < count; i++) {
1853 ret = vm_insert_page(vma, uaddr, pages[offset + i]);
1854 if (ret < 0)
1855 return ret;
1856 uaddr += PAGE_SIZE;
1857 }
1858
1859 return 0;
1860}
1861
1862/**
1863 * vm_map_pages - maps range of kernel pages starts with non zero offset
1864 * @vma: user vma to map to
1865 * @pages: pointer to array of source kernel pages
1866 * @num: number of pages in page array
1867 *
1868 * Maps an object consisting of @num pages, catering for the user's
1869 * requested vm_pgoff
1870 *
1871 * If we fail to insert any page into the vma, the function will return
1872 * immediately leaving any previously inserted pages present. Callers
1873 * from the mmap handler may immediately return the error as their caller
1874 * will destroy the vma, removing any successfully inserted pages. Other
1875 * callers should make their own arrangements for calling unmap_region().
1876 *
1877 * Context: Process context. Called by mmap handlers.
1878 * Return: 0 on success and error code otherwise.
1879 */
1880int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1881 unsigned long num)
1882{
1883 return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
1884}
1885EXPORT_SYMBOL(vm_map_pages);
1886
1887/**
1888 * vm_map_pages_zero - map range of kernel pages starts with zero offset
1889 * @vma: user vma to map to
1890 * @pages: pointer to array of source kernel pages
1891 * @num: number of pages in page array
1892 *
1893 * Similar to vm_map_pages(), except that it explicitly sets the offset
1894 * to 0. This function is intended for the drivers that did not consider
1895 * vm_pgoff.
1896 *
1897 * Context: Process context. Called by mmap handlers.
1898 * Return: 0 on success and error code otherwise.
1899 */
1900int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
1901 unsigned long num)
1902{
1903 return __vm_map_pages(vma, pages, num, 0);
1904}
1905EXPORT_SYMBOL(vm_map_pages_zero);
1906
Matthew Wilcox9b5a8e02018-10-26 15:04:40 -07001907static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
Ross Zwislerb2770da62017-09-06 16:18:35 -07001908 pfn_t pfn, pgprot_t prot, bool mkwrite)
Nick Piggin423bad602008-04-28 02:13:01 -07001909{
1910 struct mm_struct *mm = vma->vm_mm;
Nick Piggin423bad602008-04-28 02:13:01 -07001911 pte_t *pte, entry;
1912 spinlock_t *ptl;
1913
Nick Piggin423bad602008-04-28 02:13:01 -07001914 pte = get_locked_pte(mm, addr, &ptl);
1915 if (!pte)
Matthew Wilcox9b5a8e02018-10-26 15:04:40 -07001916 return VM_FAULT_OOM;
Ross Zwislerb2770da62017-09-06 16:18:35 -07001917 if (!pte_none(*pte)) {
1918 if (mkwrite) {
1919 /*
1920 * For read faults on private mappings the PFN passed
1921 * in may not match the PFN we have mapped if the
1922 * mapped PFN is a writeable COW page. In the mkwrite
1923 * case we are creating a writable PTE for a shared
Jan Karaf2c57d92018-10-30 15:10:47 -07001924 * mapping and we expect the PFNs to match. If they
1925 * don't match, we are likely racing with block
1926 * allocation and mapping invalidation so just skip the
1927 * update.
Ross Zwislerb2770da62017-09-06 16:18:35 -07001928 */
Jan Karaf2c57d92018-10-30 15:10:47 -07001929 if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
1930 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
Ross Zwislerb2770da62017-09-06 16:18:35 -07001931 goto out_unlock;
Jan Karaf2c57d92018-10-30 15:10:47 -07001932 }
Jan Karacae85cb2019-03-28 20:43:19 -07001933 entry = pte_mkyoung(*pte);
1934 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1935 if (ptep_set_access_flags(vma, addr, pte, entry, 1))
1936 update_mmu_cache(vma, addr, pte);
1937 }
1938 goto out_unlock;
Ross Zwislerb2770da62017-09-06 16:18:35 -07001939 }
Nick Piggin423bad602008-04-28 02:13:01 -07001940
1941 /* Ok, finally just insert the thing.. */
Dan Williams01c8f1c2016-01-15 16:56:40 -08001942 if (pfn_t_devmap(pfn))
1943 entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
1944 else
1945 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
Ross Zwislerb2770da62017-09-06 16:18:35 -07001946
Ross Zwislerb2770da62017-09-06 16:18:35 -07001947 if (mkwrite) {
1948 entry = pte_mkyoung(entry);
1949 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1950 }
1951
Nick Piggin423bad602008-04-28 02:13:01 -07001952 set_pte_at(mm, addr, pte, entry);
Russell King4b3073e2009-12-18 16:40:18 +00001953 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
Nick Piggin423bad602008-04-28 02:13:01 -07001954
Nick Piggin423bad602008-04-28 02:13:01 -07001955out_unlock:
1956 pte_unmap_unlock(pte, ptl);
Matthew Wilcox9b5a8e02018-10-26 15:04:40 -07001957 return VM_FAULT_NOPAGE;
Nick Piggin423bad602008-04-28 02:13:01 -07001958}
1959
Matthew Wilcoxf5e6d1d2018-10-26 15:04:13 -07001960/**
1961 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
1962 * @vma: user vma to map to
1963 * @addr: target user address of this page
1964 * @pfn: source kernel pfn
1965 * @pgprot: pgprot flags for the inserted page
1966 *
Randy Dunlapa1a0aea2020-08-11 18:33:05 -07001967 * This is exactly like vmf_insert_pfn(), except that it allows drivers
Matthew Wilcoxf5e6d1d2018-10-26 15:04:13 -07001968 * to override pgprot on a per-page basis.
1969 *
1970 * This only makes sense for IO mappings, and it makes no sense for
1971 * COW mappings. In general, using multiple vmas is preferable;
Matthew Wilcoxae2b01f2018-10-26 15:04:29 -07001972 * vmf_insert_pfn_prot should only be used if using multiple VMAs is
Matthew Wilcoxf5e6d1d2018-10-26 15:04:13 -07001973 * impractical.
1974 *
Thomas Hellstrom574c5b3d2019-11-22 09:25:12 +01001975 * See vmf_insert_mixed_prot() for a discussion of the implication of using
1976 * a value of @pgprot different from that of @vma->vm_page_prot.
1977 *
Matthew Wilcoxae2b01f2018-10-26 15:04:29 -07001978 * Context: Process context. May allocate using %GFP_KERNEL.
Matthew Wilcoxf5e6d1d2018-10-26 15:04:13 -07001979 * Return: vm_fault_t value.
1980 */
1981vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
1982 unsigned long pfn, pgprot_t pgprot)
1983{
Matthew Wilcox6d958542018-10-26 15:04:33 -07001984 /*
1985 * Technically, architectures with pte_special can avoid all these
1986 * restrictions (same for remap_pfn_range). However we would like
1987 * consistency in testing and feature parity among all, so we should
1988 * try to keep these invariants in place for everybody.
1989 */
1990 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1991 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1992 (VM_PFNMAP|VM_MIXEDMAP));
1993 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1994 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
1995
1996 if (addr < vma->vm_start || addr >= vma->vm_end)
1997 return VM_FAULT_SIGBUS;
1998
1999 if (!pfn_modify_allowed(pfn, pgprot))
2000 return VM_FAULT_SIGBUS;
2001
2002 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2003
Matthew Wilcox9b5a8e02018-10-26 15:04:40 -07002004 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
Matthew Wilcox6d958542018-10-26 15:04:33 -07002005 false);
Matthew Wilcoxf5e6d1d2018-10-26 15:04:13 -07002006}
2007EXPORT_SYMBOL(vmf_insert_pfn_prot);
Nick Piggine0dc0d82007-02-12 00:51:36 -08002008
Matthew Wilcoxae2b01f2018-10-26 15:04:29 -07002009/**
2010 * vmf_insert_pfn - insert single pfn into user vma
2011 * @vma: user vma to map to
2012 * @addr: target user address of this page
2013 * @pfn: source kernel pfn
2014 *
2015 * Similar to vm_insert_page, this allows drivers to insert individual pages
2016 * they've allocated into a user vma. Same comments apply.
2017 *
2018 * This function should only be called from a vm_ops->fault handler, and
2019 * in that case the handler should return the result of this function.
2020 *
2021 * vma cannot be a COW mapping.
2022 *
2023 * As this is called only for pages that do not currently exist, we
2024 * do not need to flush old virtual caches or the TLB.
2025 *
2026 * Context: Process context. May allocate using %GFP_KERNEL.
2027 * Return: vm_fault_t value.
2028 */
2029vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2030 unsigned long pfn)
2031{
2032 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2033}
2034EXPORT_SYMBOL(vmf_insert_pfn);
2035
Dan Williams785a3fa2017-10-23 07:20:00 -07002036static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2037{
2038 /* these checks mirror the abort conditions in vm_normal_page */
2039 if (vma->vm_flags & VM_MIXEDMAP)
2040 return true;
2041 if (pfn_t_devmap(pfn))
2042 return true;
2043 if (pfn_t_special(pfn))
2044 return true;
2045 if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2046 return true;
2047 return false;
2048}
2049
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002050static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
Thomas Hellstrom574c5b3d2019-11-22 09:25:12 +01002051 unsigned long addr, pfn_t pfn, pgprot_t pgprot,
2052 bool mkwrite)
Nick Piggin423bad602008-04-28 02:13:01 -07002053{
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002054 int err;
Dan Williams87744ab2016-10-07 17:00:18 -07002055
Dan Williams785a3fa2017-10-23 07:20:00 -07002056 BUG_ON(!vm_mixed_ok(vma, pfn));
Nick Piggin423bad602008-04-28 02:13:01 -07002057
2058 if (addr < vma->vm_start || addr >= vma->vm_end)
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002059 return VM_FAULT_SIGBUS;
Borislav Petkov308a0472016-10-26 19:43:43 +02002060
2061 track_pfn_insert(vma, &pgprot, pfn);
Nick Piggin423bad602008-04-28 02:13:01 -07002062
Andi Kleen42e40892018-06-13 15:48:27 -07002063 if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002064 return VM_FAULT_SIGBUS;
Andi Kleen42e40892018-06-13 15:48:27 -07002065
Nick Piggin423bad602008-04-28 02:13:01 -07002066 /*
2067 * If we don't have pte special, then we have to use the pfn_valid()
2068 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2069 * refcount the page if pfn_valid is true (hence insert_page rather
Hugh Dickins62eede62009-09-21 17:03:34 -07002070 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
2071 * without pte special, it would there be refcounted as a normal page.
Nick Piggin423bad602008-04-28 02:13:01 -07002072 */
Laurent Dufour00b3a332018-06-07 17:06:12 -07002073 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2074 !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
Nick Piggin423bad602008-04-28 02:13:01 -07002075 struct page *page;
2076
Dan Williams03fc2da2016-01-26 09:48:05 -08002077 /*
2078 * At this point we are committed to insert_page()
2079 * regardless of whether the caller specified flags that
2080 * result in pfn_t_has_page() == false.
2081 */
2082 page = pfn_to_page(pfn_t_to_pfn(pfn));
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002083 err = insert_page(vma, addr, page, pgprot);
2084 } else {
Matthew Wilcox9b5a8e02018-10-26 15:04:40 -07002085 return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
Nick Piggin423bad602008-04-28 02:13:01 -07002086 }
Ross Zwislerb2770da62017-09-06 16:18:35 -07002087
Matthew Wilcox5d747632018-10-26 15:04:10 -07002088 if (err == -ENOMEM)
2089 return VM_FAULT_OOM;
2090 if (err < 0 && err != -EBUSY)
2091 return VM_FAULT_SIGBUS;
2092
2093 return VM_FAULT_NOPAGE;
Nick Piggin423bad602008-04-28 02:13:01 -07002094}
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002095
Thomas Hellstrom574c5b3d2019-11-22 09:25:12 +01002096/**
2097 * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
2098 * @vma: user vma to map to
2099 * @addr: target user address of this page
2100 * @pfn: source kernel pfn
2101 * @pgprot: pgprot flags for the inserted page
2102 *
Randy Dunlapa1a0aea2020-08-11 18:33:05 -07002103 * This is exactly like vmf_insert_mixed(), except that it allows drivers
Thomas Hellstrom574c5b3d2019-11-22 09:25:12 +01002104 * to override pgprot on a per-page basis.
2105 *
2106 * Typically this function should be used by drivers to set caching- and
2107 * encryption bits different than those of @vma->vm_page_prot, because
2108 * the caching- or encryption mode may not be known at mmap() time.
2109 * This is ok as long as @vma->vm_page_prot is not used by the core vm
2110 * to set caching and encryption bits for those vmas (except for COW pages).
2111 * This is ensured by core vm only modifying these page table entries using
2112 * functions that don't touch caching- or encryption bits, using pte_modify()
2113 * if needed. (See for example mprotect()).
2114 * Also when new page-table entries are created, this is only done using the
2115 * fault() callback, and never using the value of vma->vm_page_prot,
2116 * except for page-table entries that point to anonymous pages as the result
2117 * of COW.
2118 *
2119 * Context: Process context. May allocate using %GFP_KERNEL.
2120 * Return: vm_fault_t value.
2121 */
2122vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2123 pfn_t pfn, pgprot_t pgprot)
2124{
2125 return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
2126}
Thomas Hellstrom5379e4d2019-11-22 09:34:35 +01002127EXPORT_SYMBOL(vmf_insert_mixed_prot);
Thomas Hellstrom574c5b3d2019-11-22 09:25:12 +01002128
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002129vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2130 pfn_t pfn)
2131{
Thomas Hellstrom574c5b3d2019-11-22 09:25:12 +01002132 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002133}
Matthew Wilcox5d747632018-10-26 15:04:10 -07002134EXPORT_SYMBOL(vmf_insert_mixed);
Nick Piggin423bad602008-04-28 02:13:01 -07002135
Souptick Joarderab77dab2018-06-07 17:04:29 -07002136/*
2137 * If the insertion of PTE failed because someone else already added a
2138 * different entry in the mean time, we treat that as success as we assume
2139 * the same entry was actually inserted.
2140 */
Souptick Joarderab77dab2018-06-07 17:04:29 -07002141vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2142 unsigned long addr, pfn_t pfn)
Ross Zwislerb2770da62017-09-06 16:18:35 -07002143{
Thomas Hellstrom574c5b3d2019-11-22 09:25:12 +01002144 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
Ross Zwislerb2770da62017-09-06 16:18:35 -07002145}
Souptick Joarderab77dab2018-06-07 17:04:29 -07002146EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
Ross Zwislerb2770da62017-09-06 16:18:35 -07002147
Linus Torvaldsa145dd42005-11-30 09:35:19 -08002148/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 * maps a range of physical memory into the requested pages. the old
2150 * mappings are removed. any references to nonexistent pages results
2151 * in null mappings (currently treated as "copy-on-access")
2152 */
2153static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2154 unsigned long addr, unsigned long end,
2155 unsigned long pfn, pgprot_t prot)
2156{
2157 pte_t *pte;
Hugh Dickinsc74df322005-10-29 18:16:23 -07002158 spinlock_t *ptl;
Andi Kleen42e40892018-06-13 15:48:27 -07002159 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160
Hugh Dickinsc74df322005-10-29 18:16:23 -07002161 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 if (!pte)
2163 return -ENOMEM;
Zachary Amsden6606c3e2006-09-30 23:29:33 -07002164 arch_enter_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 do {
2166 BUG_ON(!pte_none(*pte));
Andi Kleen42e40892018-06-13 15:48:27 -07002167 if (!pfn_modify_allowed(pfn, prot)) {
2168 err = -EACCES;
2169 break;
2170 }
Nick Piggin7e675132008-04-28 02:13:00 -07002171 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 pfn++;
2173 } while (pte++, addr += PAGE_SIZE, addr != end);
Zachary Amsden6606c3e2006-09-30 23:29:33 -07002174 arch_leave_lazy_mmu_mode();
Hugh Dickinsc74df322005-10-29 18:16:23 -07002175 pte_unmap_unlock(pte - 1, ptl);
Andi Kleen42e40892018-06-13 15:48:27 -07002176 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177}
2178
2179static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2180 unsigned long addr, unsigned long end,
2181 unsigned long pfn, pgprot_t prot)
2182{
2183 pmd_t *pmd;
2184 unsigned long next;
Andi Kleen42e40892018-06-13 15:48:27 -07002185 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186
2187 pfn -= addr >> PAGE_SHIFT;
2188 pmd = pmd_alloc(mm, pud, addr);
2189 if (!pmd)
2190 return -ENOMEM;
Andrea Arcangelif66055ab2011-01-13 15:46:54 -08002191 VM_BUG_ON(pmd_trans_huge(*pmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 do {
2193 next = pmd_addr_end(addr, end);
Andi Kleen42e40892018-06-13 15:48:27 -07002194 err = remap_pte_range(mm, pmd, addr, next,
2195 pfn + (addr >> PAGE_SHIFT), prot);
2196 if (err)
2197 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 } while (pmd++, addr = next, addr != end);
2199 return 0;
2200}
2201
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002202static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 unsigned long addr, unsigned long end,
2204 unsigned long pfn, pgprot_t prot)
2205{
2206 pud_t *pud;
2207 unsigned long next;
Andi Kleen42e40892018-06-13 15:48:27 -07002208 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
2210 pfn -= addr >> PAGE_SHIFT;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002211 pud = pud_alloc(mm, p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 if (!pud)
2213 return -ENOMEM;
2214 do {
2215 next = pud_addr_end(addr, end);
Andi Kleen42e40892018-06-13 15:48:27 -07002216 err = remap_pmd_range(mm, pud, addr, next,
2217 pfn + (addr >> PAGE_SHIFT), prot);
2218 if (err)
2219 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 } while (pud++, addr = next, addr != end);
2221 return 0;
2222}
2223
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002224static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2225 unsigned long addr, unsigned long end,
2226 unsigned long pfn, pgprot_t prot)
2227{
2228 p4d_t *p4d;
2229 unsigned long next;
Andi Kleen42e40892018-06-13 15:48:27 -07002230 int err;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002231
2232 pfn -= addr >> PAGE_SHIFT;
2233 p4d = p4d_alloc(mm, pgd, addr);
2234 if (!p4d)
2235 return -ENOMEM;
2236 do {
2237 next = p4d_addr_end(addr, end);
Andi Kleen42e40892018-06-13 15:48:27 -07002238 err = remap_pud_range(mm, p4d, addr, next,
2239 pfn + (addr >> PAGE_SHIFT), prot);
2240 if (err)
2241 return err;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002242 } while (p4d++, addr = next, addr != end);
2243 return 0;
2244}
2245
Rolf Eike Beerbfa5bf62006-09-25 23:31:22 -07002246/**
2247 * remap_pfn_range - remap kernel memory to userspace
2248 * @vma: user vma to map to
Alex Zhang0c4123e2020-08-06 23:22:24 -07002249 * @addr: target page aligned user address to start at
WANG Wenhu86a76332020-04-01 21:09:03 -07002250 * @pfn: page frame number of kernel physical memory address
chenqiwu552657b2020-04-06 20:08:33 -07002251 * @size: size of mapping area
Rolf Eike Beerbfa5bf62006-09-25 23:31:22 -07002252 * @prot: page protection flags for this mapping
2253 *
Mike Rapoporta862f682019-03-05 15:48:42 -08002254 * Note: this is only safe if the mm semaphore is held when called.
2255 *
2256 * Return: %0 on success, negative error code otherwise.
Rolf Eike Beerbfa5bf62006-09-25 23:31:22 -07002257 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2259 unsigned long pfn, unsigned long size, pgprot_t prot)
2260{
2261 pgd_t *pgd;
2262 unsigned long next;
Hugh Dickins2d15cab2005-06-25 14:54:33 -07002263 unsigned long end = addr + PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 struct mm_struct *mm = vma->vm_mm;
Yongji Xied5957d22016-05-20 16:57:41 -07002265 unsigned long remap_pfn = pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 int err;
2267
Alex Zhang0c4123e2020-08-06 23:22:24 -07002268 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2269 return -EINVAL;
2270
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 /*
2272 * Physically remapped pages are special. Tell the
2273 * rest of the world about it:
2274 * VM_IO tells people not to look at these pages
2275 * (accesses can have side effects).
Linus Torvalds6aab3412005-11-28 14:34:23 -08002276 * VM_PFNMAP tells the core MM that the base pages are just
2277 * raw PFN mappings, and do not have a "struct page" associated
2278 * with them.
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07002279 * VM_DONTEXPAND
2280 * Disable vma merging and expanding with mremap().
2281 * VM_DONTDUMP
2282 * Omit vma from core dump, even when VM_IO turned off.
Linus Torvaldsfb155c12005-12-11 19:46:02 -08002283 *
2284 * There's a horrible special case to handle copy-on-write
2285 * behaviour that some programs depend on. We mark the "original"
2286 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07002287 * See vm_normal_page() for details.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 */
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07002289 if (is_cow_mapping(vma->vm_flags)) {
2290 if (addr != vma->vm_start || end != vma->vm_end)
2291 return -EINVAL;
Linus Torvaldsfb155c12005-12-11 19:46:02 -08002292 vma->vm_pgoff = pfn;
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07002293 }
2294
Yongji Xied5957d22016-05-20 16:57:41 -07002295 err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07002296 if (err)
venkatesh.pallipadi@intel.com3c8bb732008-12-18 11:41:27 -08002297 return -EINVAL;
Linus Torvaldsfb155c12005-12-11 19:46:02 -08002298
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07002299 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
2301 BUG_ON(addr >= end);
2302 pfn -= addr >> PAGE_SHIFT;
2303 pgd = pgd_offset(mm, addr);
2304 flush_cache_range(vma, addr, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 do {
2306 next = pgd_addr_end(addr, end);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002307 err = remap_p4d_range(mm, pgd, addr, next,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 pfn + (addr >> PAGE_SHIFT), prot);
2309 if (err)
2310 break;
2311 } while (pgd++, addr = next, addr != end);
venkatesh.pallipadi@intel.com2ab64032008-12-18 11:41:29 -08002312
2313 if (err)
Yongji Xied5957d22016-05-20 16:57:41 -07002314 untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
venkatesh.pallipadi@intel.com2ab64032008-12-18 11:41:29 -08002315
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 return err;
2317}
2318EXPORT_SYMBOL(remap_pfn_range);
2319
Linus Torvaldsb4cbb192013-04-16 13:45:37 -07002320/**
2321 * vm_iomap_memory - remap memory to userspace
2322 * @vma: user vma to map to
Wang Wenhuabd69b92020-04-01 21:09:07 -07002323 * @start: start of the physical memory to be mapped
Linus Torvaldsb4cbb192013-04-16 13:45:37 -07002324 * @len: size of area
2325 *
2326 * This is a simplified io_remap_pfn_range() for common driver use. The
2327 * driver just needs to give us the physical memory range to be mapped,
2328 * we'll figure out the rest from the vma information.
2329 *
2330 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2331 * whatever write-combining details or similar.
Mike Rapoporta862f682019-03-05 15:48:42 -08002332 *
2333 * Return: %0 on success, negative error code otherwise.
Linus Torvaldsb4cbb192013-04-16 13:45:37 -07002334 */
2335int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2336{
2337 unsigned long vm_len, pfn, pages;
2338
2339 /* Check that the physical memory area passed in looks valid */
2340 if (start + len < start)
2341 return -EINVAL;
2342 /*
2343 * You *really* shouldn't map things that aren't page-aligned,
2344 * but we've historically allowed it because IO memory might
2345 * just have smaller alignment.
2346 */
2347 len += start & ~PAGE_MASK;
2348 pfn = start >> PAGE_SHIFT;
2349 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2350 if (pfn + pages < pfn)
2351 return -EINVAL;
2352
2353 /* We start the mapping 'vm_pgoff' pages into the area */
2354 if (vma->vm_pgoff > pages)
2355 return -EINVAL;
2356 pfn += vma->vm_pgoff;
2357 pages -= vma->vm_pgoff;
2358
2359 /* Can we fit all of the mapping? */
2360 vm_len = vma->vm_end - vma->vm_start;
2361 if (vm_len >> PAGE_SHIFT > pages)
2362 return -EINVAL;
2363
2364 /* Ok, let it rip */
2365 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2366}
2367EXPORT_SYMBOL(vm_iomap_memory);
2368
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002369static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2370 unsigned long addr, unsigned long end,
Joerg Roedele80d3902020-09-04 16:35:43 -07002371 pte_fn_t fn, void *data, bool create,
2372 pgtbl_mod_mask *mask)
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002373{
2374 pte_t *pte;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002375 int err = 0;
Kees Cook3f649ab2020-06-03 13:09:38 -07002376 spinlock_t *ptl;
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002377
Daniel Axtensbe1db472019-12-17 20:51:41 -08002378 if (create) {
2379 pte = (mm == &init_mm) ?
Joerg Roedele80d3902020-09-04 16:35:43 -07002380 pte_alloc_kernel_track(pmd, addr, mask) :
Daniel Axtensbe1db472019-12-17 20:51:41 -08002381 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2382 if (!pte)
2383 return -ENOMEM;
2384 } else {
2385 pte = (mm == &init_mm) ?
2386 pte_offset_kernel(pmd, addr) :
2387 pte_offset_map_lock(mm, pmd, addr, &ptl);
2388 }
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002389
2390 BUG_ON(pmd_huge(*pmd));
2391
Jeremy Fitzhardinge38e0edb2009-01-06 14:39:21 -08002392 arch_enter_lazy_mmu_mode();
2393
Christoph Hellwigeeb4a052020-10-17 16:15:14 -07002394 if (fn) {
2395 do {
2396 if (create || !pte_none(*pte)) {
2397 err = fn(pte++, addr, data);
2398 if (err)
2399 break;
2400 }
2401 } while (addr += PAGE_SIZE, addr != end);
2402 }
Joerg Roedele80d3902020-09-04 16:35:43 -07002403 *mask |= PGTBL_PTE_MODIFIED;
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002404
Jeremy Fitzhardinge38e0edb2009-01-06 14:39:21 -08002405 arch_leave_lazy_mmu_mode();
2406
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002407 if (mm != &init_mm)
2408 pte_unmap_unlock(pte-1, ptl);
2409 return err;
2410}
2411
2412static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2413 unsigned long addr, unsigned long end,
Joerg Roedele80d3902020-09-04 16:35:43 -07002414 pte_fn_t fn, void *data, bool create,
2415 pgtbl_mod_mask *mask)
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002416{
2417 pmd_t *pmd;
2418 unsigned long next;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002419 int err = 0;
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002420
Andi Kleenceb86872008-07-23 21:27:50 -07002421 BUG_ON(pud_huge(*pud));
2422
Daniel Axtensbe1db472019-12-17 20:51:41 -08002423 if (create) {
Joerg Roedele80d3902020-09-04 16:35:43 -07002424 pmd = pmd_alloc_track(mm, pud, addr, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002425 if (!pmd)
2426 return -ENOMEM;
2427 } else {
2428 pmd = pmd_offset(pud, addr);
2429 }
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002430 do {
2431 next = pmd_addr_end(addr, end);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002432 if (create || !pmd_none_or_clear_bad(pmd)) {
2433 err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
Joerg Roedele80d3902020-09-04 16:35:43 -07002434 create, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002435 if (err)
2436 break;
2437 }
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002438 } while (pmd++, addr = next, addr != end);
2439 return err;
2440}
2441
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002442static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002443 unsigned long addr, unsigned long end,
Joerg Roedele80d3902020-09-04 16:35:43 -07002444 pte_fn_t fn, void *data, bool create,
2445 pgtbl_mod_mask *mask)
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002446{
2447 pud_t *pud;
2448 unsigned long next;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002449 int err = 0;
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002450
Daniel Axtensbe1db472019-12-17 20:51:41 -08002451 if (create) {
Joerg Roedele80d3902020-09-04 16:35:43 -07002452 pud = pud_alloc_track(mm, p4d, addr, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002453 if (!pud)
2454 return -ENOMEM;
2455 } else {
2456 pud = pud_offset(p4d, addr);
2457 }
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002458 do {
2459 next = pud_addr_end(addr, end);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002460 if (create || !pud_none_or_clear_bad(pud)) {
2461 err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
Joerg Roedele80d3902020-09-04 16:35:43 -07002462 create, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002463 if (err)
2464 break;
2465 }
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002466 } while (pud++, addr = next, addr != end);
2467 return err;
2468}
2469
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002470static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2471 unsigned long addr, unsigned long end,
Joerg Roedele80d3902020-09-04 16:35:43 -07002472 pte_fn_t fn, void *data, bool create,
2473 pgtbl_mod_mask *mask)
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002474{
2475 p4d_t *p4d;
2476 unsigned long next;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002477 int err = 0;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002478
Daniel Axtensbe1db472019-12-17 20:51:41 -08002479 if (create) {
Joerg Roedele80d3902020-09-04 16:35:43 -07002480 p4d = p4d_alloc_track(mm, pgd, addr, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002481 if (!p4d)
2482 return -ENOMEM;
2483 } else {
2484 p4d = p4d_offset(pgd, addr);
2485 }
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002486 do {
2487 next = p4d_addr_end(addr, end);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002488 if (create || !p4d_none_or_clear_bad(p4d)) {
2489 err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
Joerg Roedele80d3902020-09-04 16:35:43 -07002490 create, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002491 if (err)
2492 break;
2493 }
2494 } while (p4d++, addr = next, addr != end);
2495 return err;
2496}
2497
2498static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2499 unsigned long size, pte_fn_t fn,
2500 void *data, bool create)
2501{
2502 pgd_t *pgd;
Joerg Roedele80d3902020-09-04 16:35:43 -07002503 unsigned long start = addr, next;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002504 unsigned long end = addr + size;
Joerg Roedele80d3902020-09-04 16:35:43 -07002505 pgtbl_mod_mask mask = 0;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002506 int err = 0;
2507
2508 if (WARN_ON(addr >= end))
2509 return -EINVAL;
2510
2511 pgd = pgd_offset(mm, addr);
2512 do {
2513 next = pgd_addr_end(addr, end);
2514 if (!create && pgd_none_or_clear_bad(pgd))
2515 continue;
Joerg Roedele80d3902020-09-04 16:35:43 -07002516 err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002517 if (err)
2518 break;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002519 } while (pgd++, addr = next, addr != end);
2520
Joerg Roedele80d3902020-09-04 16:35:43 -07002521 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2522 arch_sync_kernel_mappings(start, start + size);
2523
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002524 return err;
2525}
2526
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002527/*
2528 * Scan a region of virtual memory, filling in page tables as necessary
2529 * and calling a provided function on each leaf page table.
2530 */
2531int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2532 unsigned long size, pte_fn_t fn, void *data)
2533{
Daniel Axtensbe1db472019-12-17 20:51:41 -08002534 return __apply_to_page_range(mm, addr, size, fn, data, true);
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002535}
2536EXPORT_SYMBOL_GPL(apply_to_page_range);
2537
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538/*
Daniel Axtensbe1db472019-12-17 20:51:41 -08002539 * Scan a region of virtual memory, calling a provided function on
2540 * each leaf page table where it exists.
2541 *
2542 * Unlike apply_to_page_range, this does _not_ fill in page tables
2543 * where they are absent.
2544 */
2545int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2546 unsigned long size, pte_fn_t fn, void *data)
2547{
2548 return __apply_to_page_range(mm, addr, size, fn, data, false);
2549}
2550EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2551
2552/*
Kirill A. Shutemov9b4bdd22015-02-10 14:09:51 -08002553 * handle_pte_fault chooses page fault handler according to an entry which was
2554 * read non-atomically. Before making any commitment, on those architectures
2555 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2556 * parts, do_swap_page must check under lock before unmapping the pte and
2557 * proceeding (but do_wp_page is only called after already making such a check;
Ryota Ozakia335b2e2011-02-10 13:56:28 +09002558 * and do_anonymous_page can safely check later on).
Hugh Dickins8f4e2102005-10-29 18:16:26 -07002559 */
Hugh Dickins4c21e2f2005-10-29 18:16:40 -07002560static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
Hugh Dickins8f4e2102005-10-29 18:16:26 -07002561 pte_t *page_table, pte_t orig_pte)
2562{
2563 int same = 1;
Thomas Gleixner923717c2019-10-15 21:18:12 +02002564#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
Hugh Dickins8f4e2102005-10-29 18:16:26 -07002565 if (sizeof(pte_t) > sizeof(unsigned long)) {
Hugh Dickins4c21e2f2005-10-29 18:16:40 -07002566 spinlock_t *ptl = pte_lockptr(mm, pmd);
2567 spin_lock(ptl);
Hugh Dickins8f4e2102005-10-29 18:16:26 -07002568 same = pte_same(*page_table, orig_pte);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -07002569 spin_unlock(ptl);
Hugh Dickins8f4e2102005-10-29 18:16:26 -07002570 }
2571#endif
2572 pte_unmap(page_table);
2573 return same;
2574}
2575
Jia He83d116c2019-10-11 22:09:39 +08002576static inline bool cow_user_page(struct page *dst, struct page *src,
2577 struct vm_fault *vmf)
Linus Torvalds6aab3412005-11-28 14:34:23 -08002578{
Jia He83d116c2019-10-11 22:09:39 +08002579 bool ret;
2580 void *kaddr;
2581 void __user *uaddr;
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002582 bool locked = false;
Jia He83d116c2019-10-11 22:09:39 +08002583 struct vm_area_struct *vma = vmf->vma;
2584 struct mm_struct *mm = vma->vm_mm;
2585 unsigned long addr = vmf->address;
2586
Jia He83d116c2019-10-11 22:09:39 +08002587 if (likely(src)) {
2588 copy_user_highpage(dst, src, addr, vma);
2589 return true;
2590 }
2591
Linus Torvalds6aab3412005-11-28 14:34:23 -08002592 /*
2593 * If the source page was a PFN mapping, we don't have
2594 * a "struct page" for it. We do a best-effort copy by
2595 * just copying from the original user address. If that
2596 * fails, we just zero-fill it. Live with it.
2597 */
Jia He83d116c2019-10-11 22:09:39 +08002598 kaddr = kmap_atomic(dst);
2599 uaddr = (void __user *)(addr & PAGE_MASK);
Linus Torvalds5d2a2dbbc2005-11-29 14:07:55 -08002600
Jia He83d116c2019-10-11 22:09:39 +08002601 /*
2602 * On architectures with software "accessed" bits, we would
2603 * take a double page fault, so mark it accessed here.
2604 */
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002605 if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
Jia He83d116c2019-10-11 22:09:39 +08002606 pte_t entry;
2607
2608 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002609 locked = true;
Jia He83d116c2019-10-11 22:09:39 +08002610 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2611 /*
2612 * Other thread has already handled the fault
Bibo Mao7df67692020-05-27 10:25:18 +08002613 * and update local tlb only
Jia He83d116c2019-10-11 22:09:39 +08002614 */
Bibo Mao7df67692020-05-27 10:25:18 +08002615 update_mmu_tlb(vma, addr, vmf->pte);
Jia He83d116c2019-10-11 22:09:39 +08002616 ret = false;
2617 goto pte_unlock;
2618 }
2619
2620 entry = pte_mkyoung(vmf->orig_pte);
2621 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2622 update_mmu_cache(vma, addr, vmf->pte);
2623 }
2624
2625 /*
2626 * This really shouldn't fail, because the page is there
2627 * in the page tables. But it might just be unreadable,
2628 * in which case we just give up and fill the result with
2629 * zeroes.
2630 */
2631 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002632 if (locked)
2633 goto warn;
2634
2635 /* Re-validate under PTL if the page is still mapped */
2636 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2637 locked = true;
2638 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
Bibo Mao7df67692020-05-27 10:25:18 +08002639 /* The PTE changed under us, update local tlb */
2640 update_mmu_tlb(vma, addr, vmf->pte);
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002641 ret = false;
2642 goto pte_unlock;
2643 }
2644
Linus Torvalds5d2a2dbbc2005-11-29 14:07:55 -08002645 /*
Ethon Paul985ba002020-06-04 16:49:43 -07002646 * The same page can be mapped back since last copy attempt.
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002647 * Try to copy again under PTL.
Linus Torvalds5d2a2dbbc2005-11-29 14:07:55 -08002648 */
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002649 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2650 /*
2651 * Give a warn in case there can be some obscure
2652 * use-case
2653 */
2654warn:
2655 WARN_ON_ONCE(1);
2656 clear_page(kaddr);
2657 }
Jia He83d116c2019-10-11 22:09:39 +08002658 }
2659
2660 ret = true;
2661
2662pte_unlock:
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002663 if (locked)
Jia He83d116c2019-10-11 22:09:39 +08002664 pte_unmap_unlock(vmf->pte, vmf->ptl);
2665 kunmap_atomic(kaddr);
2666 flush_dcache_page(dst);
2667
2668 return ret;
Linus Torvalds6aab3412005-11-28 14:34:23 -08002669}
2670
Michal Hockoc20cd452016-01-14 15:20:12 -08002671static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2672{
2673 struct file *vm_file = vma->vm_file;
2674
2675 if (vm_file)
2676 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2677
2678 /*
2679 * Special mappings (e.g. VDSO) do not have any file so fake
2680 * a default GFP_KERNEL for them.
2681 */
2682 return GFP_KERNEL;
2683}
2684
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685/*
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07002686 * Notify the address space that the page is about to become writable so that
2687 * it can prohibit this or wait for the page to get into an appropriate state.
2688 *
2689 * We do this without the lock held, so that it can sleep if it needs to.
2690 */
Souptick Joarder2b740302018-08-23 17:01:36 -07002691static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07002692{
Souptick Joarder2b740302018-08-23 17:01:36 -07002693 vm_fault_t ret;
Jan Kara38b8cb72016-12-14 15:07:30 -08002694 struct page *page = vmf->page;
2695 unsigned int old_flags = vmf->flags;
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07002696
Jan Kara38b8cb72016-12-14 15:07:30 -08002697 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07002698
Darrick J. Wongdc617f22019-08-20 07:55:16 -07002699 if (vmf->vma->vm_file &&
2700 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2701 return VM_FAULT_SIGBUS;
2702
Dave Jiang11bac802017-02-24 14:56:41 -08002703 ret = vmf->vma->vm_ops->page_mkwrite(vmf);
Jan Kara38b8cb72016-12-14 15:07:30 -08002704 /* Restore original flags so that caller is not surprised */
2705 vmf->flags = old_flags;
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07002706 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2707 return ret;
2708 if (unlikely(!(ret & VM_FAULT_LOCKED))) {
2709 lock_page(page);
2710 if (!page->mapping) {
2711 unlock_page(page);
2712 return 0; /* retry */
2713 }
2714 ret |= VM_FAULT_LOCKED;
2715 } else
2716 VM_BUG_ON_PAGE(!PageLocked(page), page);
2717 return ret;
2718}
2719
2720/*
Jan Kara97ba0c22016-12-14 15:07:27 -08002721 * Handle dirtying of a page in shared file mapping on a write fault.
2722 *
2723 * The function expects the page to be locked and unlocks it.
2724 */
Johannes Weiner89b15332019-11-30 17:50:22 -08002725static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
Jan Kara97ba0c22016-12-14 15:07:27 -08002726{
Johannes Weiner89b15332019-11-30 17:50:22 -08002727 struct vm_area_struct *vma = vmf->vma;
Jan Kara97ba0c22016-12-14 15:07:27 -08002728 struct address_space *mapping;
Johannes Weiner89b15332019-11-30 17:50:22 -08002729 struct page *page = vmf->page;
Jan Kara97ba0c22016-12-14 15:07:27 -08002730 bool dirtied;
2731 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2732
2733 dirtied = set_page_dirty(page);
2734 VM_BUG_ON_PAGE(PageAnon(page), page);
2735 /*
2736 * Take a local copy of the address_space - page.mapping may be zeroed
2737 * by truncate after unlock_page(). The address_space itself remains
2738 * pinned by vma->vm_file's reference. We rely on unlock_page()'s
2739 * release semantics to prevent the compiler from undoing this copying.
2740 */
2741 mapping = page_rmapping(page);
2742 unlock_page(page);
2743
Jan Kara97ba0c22016-12-14 15:07:27 -08002744 if (!page_mkwrite)
2745 file_update_time(vma->vm_file);
Johannes Weiner89b15332019-11-30 17:50:22 -08002746
2747 /*
2748 * Throttle page dirtying rate down to writeback speed.
2749 *
2750 * mapping may be NULL here because some device drivers do not
2751 * set page.mapping but still dirty their pages
2752 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002753 * Drop the mmap_lock before waiting on IO, if we can. The file
Johannes Weiner89b15332019-11-30 17:50:22 -08002754 * is pinning the mapping, as per above.
2755 */
2756 if ((dirtied || page_mkwrite) && mapping) {
2757 struct file *fpin;
2758
2759 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2760 balance_dirty_pages_ratelimited(mapping);
2761 if (fpin) {
2762 fput(fpin);
2763 return VM_FAULT_RETRY;
2764 }
2765 }
2766
2767 return 0;
Jan Kara97ba0c22016-12-14 15:07:27 -08002768}
2769
2770/*
Shachar Raindel4e047f82015-04-14 15:46:25 -07002771 * Handle write page faults for pages that can be reused in the current vma
2772 *
2773 * This can happen either due to the mapping being with the VM_SHARED flag,
2774 * or due to us being the last reference standing to the page. In either
2775 * case, all we need to do here is to mark the page as writable and update
2776 * any related book-keeping.
2777 */
Jan Kara997dd982016-12-14 15:07:36 -08002778static inline void wp_page_reuse(struct vm_fault *vmf)
Jan Kara82b0f8c2016-12-14 15:06:58 -08002779 __releases(vmf->ptl)
Shachar Raindel4e047f82015-04-14 15:46:25 -07002780{
Jan Kara82b0f8c2016-12-14 15:06:58 -08002781 struct vm_area_struct *vma = vmf->vma;
Jan Karaa41b70d2016-12-14 15:07:33 -08002782 struct page *page = vmf->page;
Shachar Raindel4e047f82015-04-14 15:46:25 -07002783 pte_t entry;
2784 /*
2785 * Clear the pages cpupid information as the existing
2786 * information potentially belongs to a now completely
2787 * unrelated process.
2788 */
2789 if (page)
2790 page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
2791
Jan Kara29943022016-12-14 15:07:16 -08002792 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2793 entry = pte_mkyoung(vmf->orig_pte);
Shachar Raindel4e047f82015-04-14 15:46:25 -07002794 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
Jan Kara82b0f8c2016-12-14 15:06:58 -08002795 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
2796 update_mmu_cache(vma, vmf->address, vmf->pte);
2797 pte_unmap_unlock(vmf->pte, vmf->ptl);
Peter Xu798a6b82020-08-21 19:49:58 -04002798 count_vm_event(PGREUSE);
Shachar Raindel4e047f82015-04-14 15:46:25 -07002799}
2800
2801/*
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002802 * Handle the case of a page which we actually need to copy to a new page.
2803 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002804 * Called with mmap_lock locked and the old page referenced, but
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002805 * without the ptl held.
2806 *
2807 * High level logic flow:
2808 *
2809 * - Allocate a page, copy the content of the old page to the new one.
2810 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
2811 * - Take the PTL. If the pte changed, bail out and release the allocated page
2812 * - If the pte is still the way we remember it, update the page table and all
2813 * relevant references. This includes dropping the reference the page-table
2814 * held to the old page, as well as updating the rmap.
2815 * - In any case, unlock the PTL and drop the reference we took to the old page.
2816 */
Souptick Joarder2b740302018-08-23 17:01:36 -07002817static vm_fault_t wp_page_copy(struct vm_fault *vmf)
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002818{
Jan Kara82b0f8c2016-12-14 15:06:58 -08002819 struct vm_area_struct *vma = vmf->vma;
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07002820 struct mm_struct *mm = vma->vm_mm;
Jan Karaa41b70d2016-12-14 15:07:33 -08002821 struct page *old_page = vmf->page;
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002822 struct page *new_page = NULL;
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002823 pte_t entry;
2824 int page_copied = 0;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002825 struct mmu_notifier_range range;
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002826
2827 if (unlikely(anon_vma_prepare(vma)))
2828 goto oom;
2829
Jan Kara29943022016-12-14 15:07:16 -08002830 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08002831 new_page = alloc_zeroed_user_highpage_movable(vma,
2832 vmf->address);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002833 if (!new_page)
2834 goto oom;
2835 } else {
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07002836 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
Jan Kara82b0f8c2016-12-14 15:06:58 -08002837 vmf->address);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002838 if (!new_page)
2839 goto oom;
Jia He83d116c2019-10-11 22:09:39 +08002840
2841 if (!cow_user_page(new_page, old_page, vmf)) {
2842 /*
2843 * COW failed, if the fault was solved by other,
2844 * it's fine. If not, userspace would re-fault on
2845 * the same address and we will handle the fault
2846 * from the second attempt.
2847 */
2848 put_page(new_page);
2849 if (old_page)
2850 put_page(old_page);
2851 return 0;
2852 }
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002853 }
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002854
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07002855 if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002856 goto oom_free_new;
Johannes Weiner9d82c692020-06-03 16:02:04 -07002857 cgroup_throttle_swaprate(new_page, GFP_KERNEL);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002858
Mel Gormaneb3c24f2015-06-24 16:57:27 -07002859 __SetPageUptodate(new_page);
2860
Jérôme Glisse7269f992019-05-13 17:20:53 -07002861 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07002862 vmf->address & PAGE_MASK,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002863 (vmf->address & PAGE_MASK) + PAGE_SIZE);
2864 mmu_notifier_invalidate_range_start(&range);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002865
2866 /*
2867 * Re-check the pte - we dropped the lock
2868 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08002869 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
Jan Kara29943022016-12-14 15:07:16 -08002870 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002871 if (old_page) {
2872 if (!PageAnon(old_page)) {
Jerome Marchandeca56ff2016-01-14 15:19:26 -08002873 dec_mm_counter_fast(mm,
2874 mm_counter_file(old_page));
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002875 inc_mm_counter_fast(mm, MM_ANONPAGES);
2876 }
2877 } else {
2878 inc_mm_counter_fast(mm, MM_ANONPAGES);
2879 }
Jan Kara29943022016-12-14 15:07:16 -08002880 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002881 entry = mk_pte(new_page, vma->vm_page_prot);
Bibo Mao44bf4312020-05-27 10:25:19 +08002882 entry = pte_sw_mkyoung(entry);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002883 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2884 /*
2885 * Clear the pte entry and flush it first, before updating the
2886 * pte with the new entry. This will avoid a race condition
2887 * seen in the presence of one thread doing SMC and another
2888 * thread doing COW.
2889 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08002890 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
2891 page_add_new_anon_rmap(new_page, vma, vmf->address, false);
Joonsoo Kimb5181542020-08-11 18:30:40 -07002892 lru_cache_add_inactive_or_unevictable(new_page, vma);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002893 /*
2894 * We call the notify macro here because, when using secondary
2895 * mmu page tables (such as kvm shadow page tables), we want the
2896 * new page to be mapped directly into the secondary page table.
2897 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08002898 set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
2899 update_mmu_cache(vma, vmf->address, vmf->pte);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002900 if (old_page) {
2901 /*
2902 * Only after switching the pte to the new page may
2903 * we remove the mapcount here. Otherwise another
2904 * process may come and find the rmap count decremented
2905 * before the pte is switched to the new page, and
2906 * "reuse" the old page writing into it while our pte
2907 * here still points into it and can be read by other
2908 * threads.
2909 *
2910 * The critical issue is to order this
2911 * page_remove_rmap with the ptp_clear_flush above.
2912 * Those stores are ordered by (if nothing else,)
2913 * the barrier present in the atomic_add_negative
2914 * in page_remove_rmap.
2915 *
2916 * Then the TLB flush in ptep_clear_flush ensures that
2917 * no process can access the old page before the
2918 * decremented mapcount is visible. And the old page
2919 * cannot be reused until after the decremented
2920 * mapcount is visible. So transitively, TLBs to
2921 * old page will be flushed before it can be reused.
2922 */
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08002923 page_remove_rmap(old_page, false);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002924 }
2925
2926 /* Free the old page.. */
2927 new_page = old_page;
2928 page_copied = 1;
2929 } else {
Bibo Mao7df67692020-05-27 10:25:18 +08002930 update_mmu_tlb(vma, vmf->address, vmf->pte);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002931 }
2932
2933 if (new_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002934 put_page(new_page);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002935
Jan Kara82b0f8c2016-12-14 15:06:58 -08002936 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08002937 /*
2938 * No need to double call mmu_notifier->invalidate_range() callback as
2939 * the above ptep_clear_flush_notify() did already call it.
2940 */
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002941 mmu_notifier_invalidate_range_only_end(&range);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002942 if (old_page) {
2943 /*
2944 * Don't let another task, with possibly unlocked vma,
2945 * keep the mlocked page.
2946 */
2947 if (page_copied && (vma->vm_flags & VM_LOCKED)) {
2948 lock_page(old_page); /* LRU manipulation */
Kirill A. Shutemove90309c2016-01-15 16:54:33 -08002949 if (PageMlocked(old_page))
2950 munlock_vma_page(old_page);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002951 unlock_page(old_page);
2952 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002953 put_page(old_page);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002954 }
2955 return page_copied ? VM_FAULT_WRITE : 0;
2956oom_free_new:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002957 put_page(new_page);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002958oom:
2959 if (old_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002960 put_page(old_page);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002961 return VM_FAULT_OOM;
2962}
2963
Jan Kara66a61972016-12-14 15:07:39 -08002964/**
2965 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
2966 * writeable once the page is prepared
2967 *
2968 * @vmf: structure describing the fault
2969 *
2970 * This function handles all that is needed to finish a write page fault in a
2971 * shared mapping due to PTE being read-only once the mapped page is prepared.
Mike Rapoporta862f682019-03-05 15:48:42 -08002972 * It handles locking of PTE and modifying it.
Jan Kara66a61972016-12-14 15:07:39 -08002973 *
2974 * The function expects the page to be locked or other protection against
2975 * concurrent faults / writeback (such as DAX radix tree locks).
Mike Rapoporta862f682019-03-05 15:48:42 -08002976 *
2977 * Return: %VM_FAULT_WRITE on success, %0 when PTE got changed before
2978 * we acquired PTE lock.
Jan Kara66a61972016-12-14 15:07:39 -08002979 */
Souptick Joarder2b740302018-08-23 17:01:36 -07002980vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
Jan Kara66a61972016-12-14 15:07:39 -08002981{
2982 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
2983 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
2984 &vmf->ptl);
2985 /*
2986 * We might have raced with another page fault while we released the
2987 * pte_offset_map_lock.
2988 */
2989 if (!pte_same(*vmf->pte, vmf->orig_pte)) {
Bibo Mao7df67692020-05-27 10:25:18 +08002990 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
Jan Kara66a61972016-12-14 15:07:39 -08002991 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jan Karaa19e2552016-12-14 15:07:42 -08002992 return VM_FAULT_NOPAGE;
Jan Kara66a61972016-12-14 15:07:39 -08002993 }
2994 wp_page_reuse(vmf);
Jan Karaa19e2552016-12-14 15:07:42 -08002995 return 0;
Jan Kara66a61972016-12-14 15:07:39 -08002996}
2997
Boaz Harroshdd906182015-04-15 16:15:11 -07002998/*
2999 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3000 * mapping
3001 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003002static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
Boaz Harroshdd906182015-04-15 16:15:11 -07003003{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003004 struct vm_area_struct *vma = vmf->vma;
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003005
Boaz Harroshdd906182015-04-15 16:15:11 -07003006 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
Souptick Joarder2b740302018-08-23 17:01:36 -07003007 vm_fault_t ret;
Boaz Harroshdd906182015-04-15 16:15:11 -07003008
Jan Kara82b0f8c2016-12-14 15:06:58 -08003009 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jan Karafe822212016-12-14 15:07:13 -08003010 vmf->flags |= FAULT_FLAG_MKWRITE;
Dave Jiang11bac802017-02-24 14:56:41 -08003011 ret = vma->vm_ops->pfn_mkwrite(vmf);
Jan Kara2f89dc12016-12-14 15:07:50 -08003012 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
Boaz Harroshdd906182015-04-15 16:15:11 -07003013 return ret;
Jan Kara66a61972016-12-14 15:07:39 -08003014 return finish_mkwrite_fault(vmf);
Boaz Harroshdd906182015-04-15 16:15:11 -07003015 }
Jan Kara997dd982016-12-14 15:07:36 -08003016 wp_page_reuse(vmf);
3017 return VM_FAULT_WRITE;
Boaz Harroshdd906182015-04-15 16:15:11 -07003018}
3019
Souptick Joarder2b740302018-08-23 17:01:36 -07003020static vm_fault_t wp_page_shared(struct vm_fault *vmf)
Jan Kara82b0f8c2016-12-14 15:06:58 -08003021 __releases(vmf->ptl)
Shachar Raindel93e478d2015-04-14 15:46:35 -07003022{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003023 struct vm_area_struct *vma = vmf->vma;
Johannes Weiner89b15332019-11-30 17:50:22 -08003024 vm_fault_t ret = VM_FAULT_WRITE;
Shachar Raindel93e478d2015-04-14 15:46:35 -07003025
Jan Karaa41b70d2016-12-14 15:07:33 -08003026 get_page(vmf->page);
Shachar Raindel93e478d2015-04-14 15:46:35 -07003027
Shachar Raindel93e478d2015-04-14 15:46:35 -07003028 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
Souptick Joarder2b740302018-08-23 17:01:36 -07003029 vm_fault_t tmp;
Shachar Raindel93e478d2015-04-14 15:46:35 -07003030
Jan Kara82b0f8c2016-12-14 15:06:58 -08003031 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jan Kara38b8cb72016-12-14 15:07:30 -08003032 tmp = do_page_mkwrite(vmf);
Shachar Raindel93e478d2015-04-14 15:46:35 -07003033 if (unlikely(!tmp || (tmp &
3034 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
Jan Karaa41b70d2016-12-14 15:07:33 -08003035 put_page(vmf->page);
Shachar Raindel93e478d2015-04-14 15:46:35 -07003036 return tmp;
3037 }
Jan Kara66a61972016-12-14 15:07:39 -08003038 tmp = finish_mkwrite_fault(vmf);
Jan Karaa19e2552016-12-14 15:07:42 -08003039 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
Jan Karaa41b70d2016-12-14 15:07:33 -08003040 unlock_page(vmf->page);
Jan Karaa41b70d2016-12-14 15:07:33 -08003041 put_page(vmf->page);
Jan Kara66a61972016-12-14 15:07:39 -08003042 return tmp;
Shachar Raindel93e478d2015-04-14 15:46:35 -07003043 }
Jan Kara66a61972016-12-14 15:07:39 -08003044 } else {
3045 wp_page_reuse(vmf);
Jan Kara997dd982016-12-14 15:07:36 -08003046 lock_page(vmf->page);
Shachar Raindel93e478d2015-04-14 15:46:35 -07003047 }
Johannes Weiner89b15332019-11-30 17:50:22 -08003048 ret |= fault_dirty_shared_page(vmf);
Jan Kara997dd982016-12-14 15:07:36 -08003049 put_page(vmf->page);
Shachar Raindel93e478d2015-04-14 15:46:35 -07003050
Johannes Weiner89b15332019-11-30 17:50:22 -08003051 return ret;
Shachar Raindel93e478d2015-04-14 15:46:35 -07003052}
3053
Shachar Raindel2f38ab22015-04-14 15:46:32 -07003054/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 * This routine handles present pages, when users try to write
3056 * to a shared page. It is done by copying the page to a new address
3057 * and decrementing the shared-page counter for the old page.
3058 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059 * Note that this routine assumes that the protection checks have been
3060 * done by the caller (the low-level page fault routine in most cases).
3061 * Thus we can safely just mark it writable once we've done any necessary
3062 * COW.
3063 *
3064 * We also mark the page dirty at this point even though the page will
3065 * change only once the write actually happens. This avoids a few races,
3066 * and potentially makes it more efficient.
3067 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003068 * We enter with non-exclusive mmap_lock (to exclude vma changes,
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003069 * but allow concurrent faults), with pte both mapped and locked.
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003070 * We return with mmap_lock still held, but pte unmapped and unlocked.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003072static vm_fault_t do_wp_page(struct vm_fault *vmf)
Jan Kara82b0f8c2016-12-14 15:06:58 -08003073 __releases(vmf->ptl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003075 struct vm_area_struct *vma = vmf->vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076
Peter Xu292924b2020-04-06 20:05:49 -07003077 if (userfaultfd_pte_wp(vma, *vmf->pte)) {
Andrea Arcangeli529b9302020-04-06 20:05:29 -07003078 pte_unmap_unlock(vmf->pte, vmf->ptl);
3079 return handle_userfault(vmf, VM_UFFD_WP);
3080 }
3081
Jan Karaa41b70d2016-12-14 15:07:33 -08003082 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3083 if (!vmf->page) {
Peter Zijlstra251b97f2008-07-04 09:59:24 -07003084 /*
Peter Feiner64e455072014-10-13 15:55:46 -07003085 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3086 * VM_PFNMAP VMA.
Peter Zijlstra251b97f2008-07-04 09:59:24 -07003087 *
3088 * We should not cow pages in a shared writeable mapping.
Boaz Harroshdd906182015-04-15 16:15:11 -07003089 * Just mark the pages writable and/or call ops->pfn_mkwrite.
Peter Zijlstra251b97f2008-07-04 09:59:24 -07003090 */
3091 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3092 (VM_WRITE|VM_SHARED))
Jan Kara29943022016-12-14 15:07:16 -08003093 return wp_pfn_shared(vmf);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07003094
Jan Kara82b0f8c2016-12-14 15:06:58 -08003095 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jan Karaa41b70d2016-12-14 15:07:33 -08003096 return wp_page_copy(vmf);
Peter Zijlstra251b97f2008-07-04 09:59:24 -07003097 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098
Peter Zijlstrad08b3852006-09-25 23:30:57 -07003099 /*
Peter Zijlstraee6a6452006-09-25 23:31:00 -07003100 * Take out anonymous pages first, anonymous shared vmas are
3101 * not dirty accountable.
Peter Zijlstrad08b3852006-09-25 23:30:57 -07003102 */
Kirill Tkhai52d1e602019-03-05 15:43:06 -08003103 if (PageAnon(vmf->page)) {
Linus Torvalds09854ba2020-08-21 19:49:55 -04003104 struct page *page = vmf->page;
3105
3106 /* PageKsm() doesn't necessarily raise the page refcount */
3107 if (PageKsm(page) || page_count(page) != 1)
Kirill Tkhai52d1e602019-03-05 15:43:06 -08003108 goto copy;
Linus Torvalds09854ba2020-08-21 19:49:55 -04003109 if (!trylock_page(page))
3110 goto copy;
3111 if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
3112 unlock_page(page);
3113 goto copy;
Peter Zijlstraee6a6452006-09-25 23:31:00 -07003114 }
Linus Torvalds09854ba2020-08-21 19:49:55 -04003115 /*
3116 * Ok, we've got the only map reference, and the only
3117 * page count reference, and the page is locked,
3118 * it's dark out, and we're wearing sunglasses. Hit it.
3119 */
Linus Torvalds09854ba2020-08-21 19:49:55 -04003120 unlock_page(page);
Linus Torvaldsbe068f22020-09-24 08:41:32 -07003121 wp_page_reuse(vmf);
Linus Torvalds09854ba2020-08-21 19:49:55 -04003122 return VM_FAULT_WRITE;
Peter Zijlstraee6a6452006-09-25 23:31:00 -07003123 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
Peter Zijlstrad08b3852006-09-25 23:30:57 -07003124 (VM_WRITE|VM_SHARED))) {
Jan Karaa41b70d2016-12-14 15:07:33 -08003125 return wp_page_shared(vmf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126 }
Kirill Tkhai52d1e602019-03-05 15:43:06 -08003127copy:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128 /*
3129 * Ok, we need to copy. Oh, well..
3130 */
Jan Karaa41b70d2016-12-14 15:07:33 -08003131 get_page(vmf->page);
Shachar Raindel28766802015-04-14 15:46:29 -07003132
Jan Kara82b0f8c2016-12-14 15:06:58 -08003133 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jan Karaa41b70d2016-12-14 15:07:33 -08003134 return wp_page_copy(vmf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135}
3136
Peter Zijlstra97a89412011-05-24 17:12:04 -07003137static void unmap_mapping_range_vma(struct vm_area_struct *vma,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138 unsigned long start_addr, unsigned long end_addr,
3139 struct zap_details *details)
3140{
Al Virof5cc4ee2012-03-05 14:14:20 -05003141 zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142}
3143
Davidlohr Buesof808c132017-09-08 16:15:08 -07003144static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145 struct zap_details *details)
3146{
3147 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148 pgoff_t vba, vea, zba, zea;
3149
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -07003150 vma_interval_tree_foreach(vma, root,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151 details->first_index, details->last_index) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152
3153 vba = vma->vm_pgoff;
Libind6e93212013-07-03 15:01:26 -07003154 vea = vba + vma_pages(vma) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 zba = details->first_index;
3156 if (zba < vba)
3157 zba = vba;
3158 zea = details->last_index;
3159 if (zea > vea)
3160 zea = vea;
3161
Peter Zijlstra97a89412011-05-24 17:12:04 -07003162 unmap_mapping_range_vma(vma,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3164 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
Peter Zijlstra97a89412011-05-24 17:12:04 -07003165 details);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 }
3167}
3168
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169/**
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08003170 * unmap_mapping_pages() - Unmap pages from processes.
3171 * @mapping: The address space containing pages to be unmapped.
3172 * @start: Index of first page to be unmapped.
3173 * @nr: Number of pages to be unmapped. 0 to unmap to end of file.
3174 * @even_cows: Whether to unmap even private COWed pages.
3175 *
3176 * Unmap the pages in this address space from any userspace process which
3177 * has them mmaped. Generally, you want to remove COWed pages as well when
3178 * a file is being truncated, but not when invalidating pages from the page
3179 * cache.
3180 */
3181void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3182 pgoff_t nr, bool even_cows)
3183{
3184 struct zap_details details = { };
3185
3186 details.check_mapping = even_cows ? NULL : mapping;
3187 details.first_index = start;
3188 details.last_index = start + nr - 1;
3189 if (details.last_index < details.first_index)
3190 details.last_index = ULONG_MAX;
3191
3192 i_mmap_lock_write(mapping);
3193 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3194 unmap_mapping_range_tree(&mapping->i_mmap, &details);
3195 i_mmap_unlock_write(mapping);
3196}
3197
3198/**
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08003199 * unmap_mapping_range - unmap the portion of all mmaps in the specified
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08003200 * address_space corresponding to the specified byte range in the underlying
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08003201 * file.
3202 *
Martin Waitz3d410882005-06-23 22:05:21 -07003203 * @mapping: the address space containing mmaps to be unmapped.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 * @holebegin: byte in first page to unmap, relative to the start of
3205 * the underlying file. This will be rounded down to a PAGE_SIZE
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +10003206 * boundary. Note that this is different from truncate_pagecache(), which
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207 * must keep the partial page. In contrast, we must get rid of
3208 * partial pages.
3209 * @holelen: size of prospective hole in bytes. This will be rounded
3210 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
3211 * end of the file.
3212 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3213 * but 0 when invalidating pagecache, don't throw away private data.
3214 */
3215void unmap_mapping_range(struct address_space *mapping,
3216 loff_t const holebegin, loff_t const holelen, int even_cows)
3217{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218 pgoff_t hba = holebegin >> PAGE_SHIFT;
3219 pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3220
3221 /* Check for overflow. */
3222 if (sizeof(holelen) > sizeof(hlen)) {
3223 long long holeend =
3224 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3225 if (holeend & ~(long long)ULONG_MAX)
3226 hlen = ULONG_MAX - hba + 1;
3227 }
3228
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08003229 unmap_mapping_pages(mapping, hba, hlen, even_cows);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230}
3231EXPORT_SYMBOL(unmap_mapping_range);
3232
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003234 * We enter with non-exclusive mmap_lock (to exclude vma changes,
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003235 * but allow concurrent faults), and pte mapped but not yet locked.
Paul Cassella9a95f3c2014-08-06 16:07:24 -07003236 * We return with pte unmapped and unlocked.
3237 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003238 * We return with the mmap_lock locked or unlocked in the same cases
Paul Cassella9a95f3c2014-08-06 16:07:24 -07003239 * as does filemap_fault().
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003241vm_fault_t do_swap_page(struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003243 struct vm_area_struct *vma = vmf->vma;
Minchan Kimeaf649eb2018-04-05 16:23:39 -07003244 struct page *page = NULL, *swapcache;
Hugh Dickins65500d22005-10-29 18:15:59 -07003245 swp_entry_t entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246 pte_t pte;
Michel Lespinassed065bd82010-10-26 14:21:57 -07003247 int locked;
Rik van Rielad8c2ee2010-08-09 17:19:48 -07003248 int exclusive = 0;
Souptick Joarder2b740302018-08-23 17:01:36 -07003249 vm_fault_t ret = 0;
Joonsoo Kimaae466b2020-08-11 18:30:50 -07003250 void *shadow = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251
Minchan Kimeaf649eb2018-04-05 16:23:39 -07003252 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003253 goto out;
Hugh Dickins65500d22005-10-29 18:15:59 -07003254
Jan Kara29943022016-12-14 15:07:16 -08003255 entry = pte_to_swp_entry(vmf->orig_pte);
Andi Kleend1737fd2009-09-16 11:50:06 +02003256 if (unlikely(non_swap_entry(entry))) {
3257 if (is_migration_entry(entry)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003258 migration_entry_wait(vma->vm_mm, vmf->pmd,
3259 vmf->address);
Jérôme Glisse5042db42017-09-08 16:11:43 -07003260 } else if (is_device_private_entry(entry)) {
Christoph Hellwig897e6362019-06-26 14:27:11 +02003261 vmf->page = device_private_entry_to_page(entry);
3262 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
Andi Kleend1737fd2009-09-16 11:50:06 +02003263 } else if (is_hwpoison_entry(entry)) {
3264 ret = VM_FAULT_HWPOISON;
3265 } else {
Jan Kara29943022016-12-14 15:07:16 -08003266 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
Hugh Dickinsd99be1a2009-12-14 17:59:04 -08003267 ret = VM_FAULT_SIGBUS;
Andi Kleend1737fd2009-09-16 11:50:06 +02003268 }
Christoph Lameter06972122006-06-23 02:03:35 -07003269 goto out;
3270 }
Minchan Kim0bcac062017-11-15 17:33:07 -08003271
3272
Shailabh Nagar0ff92242006-07-14 00:24:37 -07003273 delayacct_set_flag(DELAYACCT_PF_SWAPIN);
Minchan Kimeaf649eb2018-04-05 16:23:39 -07003274 page = lookup_swap_cache(entry, vma, vmf->address);
3275 swapcache = page;
Minchan Kimf8020772018-01-18 16:33:50 -08003276
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277 if (!page) {
Minchan Kim0bcac062017-11-15 17:33:07 -08003278 struct swap_info_struct *si = swp_swap_info(entry);
3279
Qian Caia449bf52020-08-14 17:31:31 -07003280 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3281 __swap_count(entry) == 1) {
Minchan Kim0bcac062017-11-15 17:33:07 -08003282 /* skip swapcache */
Minchan Kime9e9b7e2018-04-05 16:23:42 -07003283 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3284 vmf->address);
Minchan Kim0bcac062017-11-15 17:33:07 -08003285 if (page) {
Johannes Weiner4c6355b2020-06-03 16:02:17 -07003286 int err;
3287
Minchan Kim0bcac062017-11-15 17:33:07 -08003288 __SetPageLocked(page);
3289 __SetPageSwapBacked(page);
3290 set_page_private(page, entry.val);
Johannes Weiner4c6355b2020-06-03 16:02:17 -07003291
3292 /* Tell memcg to use swap ownership records */
3293 SetPageSwapCache(page);
3294 err = mem_cgroup_charge(page, vma->vm_mm,
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07003295 GFP_KERNEL);
Johannes Weiner4c6355b2020-06-03 16:02:17 -07003296 ClearPageSwapCache(page);
Michal Hocko545b1b02020-06-25 20:29:21 -07003297 if (err) {
3298 ret = VM_FAULT_OOM;
Johannes Weiner4c6355b2020-06-03 16:02:17 -07003299 goto out_page;
Michal Hocko545b1b02020-06-25 20:29:21 -07003300 }
Johannes Weiner4c6355b2020-06-03 16:02:17 -07003301
Joonsoo Kimaae466b2020-08-11 18:30:50 -07003302 shadow = get_shadow_from_swap_cache(entry);
3303 if (shadow)
3304 workingset_refault(page, shadow);
Joonsoo Kim0076f022020-06-25 20:30:37 -07003305
Johannes Weiner6058eae2020-06-03 16:02:40 -07003306 lru_cache_add(page);
Minchan Kim0bcac062017-11-15 17:33:07 -08003307 swap_readpage(page, true);
3308 }
Minchan Kimaa8d22a2017-11-15 17:33:11 -08003309 } else {
Minchan Kime9e9b7e2018-04-05 16:23:42 -07003310 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
3311 vmf);
Minchan Kimaa8d22a2017-11-15 17:33:11 -08003312 swapcache = page;
Minchan Kim0bcac062017-11-15 17:33:07 -08003313 }
3314
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315 if (!page) {
3316 /*
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003317 * Back out if somebody else faulted in this pte
3318 * while we released the pte lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003320 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3321 vmf->address, &vmf->ptl);
Jan Kara29943022016-12-14 15:07:16 -08003322 if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 ret = VM_FAULT_OOM;
Shailabh Nagar0ff92242006-07-14 00:24:37 -07003324 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
Hugh Dickins65500d22005-10-29 18:15:59 -07003325 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326 }
3327
3328 /* Had to read the page from swap area: Major fault */
3329 ret = VM_FAULT_MAJOR;
Christoph Lameterf8891e52006-06-30 01:55:45 -07003330 count_vm_event(PGMAJFAULT);
Roman Gushchin22621852017-07-06 15:40:25 -07003331 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
Andi Kleend1737fd2009-09-16 11:50:06 +02003332 } else if (PageHWPoison(page)) {
Wu Fengguang71f72522009-12-16 12:19:58 +01003333 /*
3334 * hwpoisoned dirty swapcache pages are kept for killing
3335 * owner processes (which may be unknown at hwpoison time)
3336 */
Andi Kleend1737fd2009-09-16 11:50:06 +02003337 ret = VM_FAULT_HWPOISON;
3338 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
Andi Kleen4779cb32009-10-14 01:51:41 +02003339 goto out_release;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340 }
3341
Jan Kara82b0f8c2016-12-14 15:06:58 -08003342 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
Rik van Riele709ffd2012-05-29 15:06:18 -07003343
Balbir Singh20a10222007-11-14 17:00:33 -08003344 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
Michel Lespinassed065bd82010-10-26 14:21:57 -07003345 if (!locked) {
3346 ret |= VM_FAULT_RETRY;
3347 goto out_release;
3348 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003350 /*
Hugh Dickins31c4a3d2010-09-19 19:40:22 -07003351 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
3352 * release the swapcache from under us. The page pin, and pte_same
3353 * test below, are not enough to exclude that. Even if it is still
3354 * swapcache, we need to check that the page's swap has not changed.
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003355 */
Minchan Kim0bcac062017-11-15 17:33:07 -08003356 if (unlikely((!PageSwapCache(page) ||
3357 page_private(page) != entry.val)) && swapcache)
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003358 goto out_page;
3359
Jan Kara82b0f8c2016-12-14 15:06:58 -08003360 page = ksm_might_need_to_copy(page, vma, vmf->address);
Hugh Dickinscbf86cf2013-02-22 16:35:08 -08003361 if (unlikely(!page)) {
3362 ret = VM_FAULT_OOM;
3363 page = swapcache;
Hugh Dickinscbf86cf2013-02-22 16:35:08 -08003364 goto out_page;
Hugh Dickins5ad64682009-12-14 17:59:24 -08003365 }
3366
Johannes Weiner9d82c692020-06-03 16:02:04 -07003367 cgroup_throttle_swaprate(page, GFP_KERNEL);
KAMEZAWA Hiroyuki073e5872008-10-18 20:28:08 -07003368
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 /*
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003370 * Back out if somebody else already faulted in this pte.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003372 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3373 &vmf->ptl);
Jan Kara29943022016-12-14 15:07:16 -08003374 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
Kirill Korotaevb8107482005-05-16 21:53:50 -07003375 goto out_nomap;
Kirill Korotaevb8107482005-05-16 21:53:50 -07003376
3377 if (unlikely(!PageUptodate(page))) {
3378 ret = VM_FAULT_SIGBUS;
3379 goto out_nomap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380 }
3381
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003382 /*
3383 * The page isn't present yet, go ahead with the fault.
3384 *
3385 * Be careful about the sequence of operations here.
3386 * To get its accounting right, reuse_swap_page() must be called
3387 * while the page is counted on swap but not yet in mapcount i.e.
3388 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
3389 * must be called after the swap_free(), or it will never succeed.
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003390 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003392 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3393 dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 pte = mk_pte(page, vma->vm_page_prot);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003395 if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003397 vmf->flags &= ~FAULT_FLAG_WRITE;
Andrea Arcangeli9a5b4892010-08-09 17:19:49 -07003398 ret |= VM_FAULT_WRITE;
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08003399 exclusive = RMAP_EXCLUSIVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401 flush_icache_page(vma, page);
Jan Kara29943022016-12-14 15:07:16 -08003402 if (pte_swp_soft_dirty(vmf->orig_pte))
Cyrill Gorcunov179ef712013-08-13 16:00:49 -07003403 pte = pte_mksoft_dirty(pte);
Peter Xuf45ec5f2020-04-06 20:06:01 -07003404 if (pte_swp_uffd_wp(vmf->orig_pte)) {
3405 pte = pte_mkuffd_wp(pte);
3406 pte = pte_wrprotect(pte);
3407 }
Jan Kara82b0f8c2016-12-14 15:06:58 -08003408 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
Khalid Azizca827d52018-02-21 10:15:44 -07003409 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
Jan Kara29943022016-12-14 15:07:16 -08003410 vmf->orig_pte = pte;
Minchan Kim0bcac062017-11-15 17:33:07 -08003411
3412 /* ksm created a completely new copy */
3413 if (unlikely(page != swapcache && swapcache)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003414 page_add_new_anon_rmap(page, vma, vmf->address, false);
Joonsoo Kimb5181542020-08-11 18:30:40 -07003415 lru_cache_add_inactive_or_unevictable(page, vma);
Minchan Kim0bcac062017-11-15 17:33:07 -08003416 } else {
3417 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
Johannes Weiner00501b52014-08-08 14:19:20 -07003418 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419
Hugh Dickinsc475a8a2005-06-21 17:15:12 -07003420 swap_free(entry);
Vladimir Davydov5ccc5ab2016-01-20 15:03:10 -08003421 if (mem_cgroup_swap_full(page) ||
3422 (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -08003423 try_to_free_swap(page);
Hugh Dickinsc475a8a2005-06-21 17:15:12 -07003424 unlock_page(page);
Minchan Kim0bcac062017-11-15 17:33:07 -08003425 if (page != swapcache && swapcache) {
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003426 /*
3427 * Hold the lock to avoid the swap entry to be reused
3428 * until we take the PT lock for the pte_same() check
3429 * (to avoid false positives from pte_same). For
3430 * further safety release the lock after the swap_free
3431 * so that the swap count won't change under a
3432 * parallel locked swapcache.
3433 */
3434 unlock_page(swapcache);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003435 put_page(swapcache);
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003436 }
Hugh Dickinsc475a8a2005-06-21 17:15:12 -07003437
Jan Kara82b0f8c2016-12-14 15:06:58 -08003438 if (vmf->flags & FAULT_FLAG_WRITE) {
Jan Kara29943022016-12-14 15:07:16 -08003439 ret |= do_wp_page(vmf);
Hugh Dickins61469f12008-03-04 14:29:04 -08003440 if (ret & VM_FAULT_ERROR)
3441 ret &= VM_FAULT_ERROR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442 goto out;
3443 }
3444
3445 /* No need to invalidate - it was non-present before */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003446 update_mmu_cache(vma, vmf->address, vmf->pte);
Hugh Dickins65500d22005-10-29 18:15:59 -07003447unlock:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003448 pte_unmap_unlock(vmf->pte, vmf->ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449out:
3450 return ret;
Kirill Korotaevb8107482005-05-16 21:53:50 -07003451out_nomap:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003452 pte_unmap_unlock(vmf->pte, vmf->ptl);
Johannes Weinerbc43f752009-04-30 15:08:08 -07003453out_page:
Kirill Korotaevb8107482005-05-16 21:53:50 -07003454 unlock_page(page);
Andi Kleen4779cb32009-10-14 01:51:41 +02003455out_release:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003456 put_page(page);
Minchan Kim0bcac062017-11-15 17:33:07 -08003457 if (page != swapcache && swapcache) {
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003458 unlock_page(swapcache);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003459 put_page(swapcache);
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003460 }
Hugh Dickins65500d22005-10-29 18:15:59 -07003461 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462}
3463
3464/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003465 * We enter with non-exclusive mmap_lock (to exclude vma changes,
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003466 * but allow concurrent faults), and pte mapped but not yet locked.
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003467 * We return with mmap_lock still held, but pte unmapped and unlocked.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003469static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003471 struct vm_area_struct *vma = vmf->vma;
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003472 struct page *page;
Souptick Joarder2b740302018-08-23 17:01:36 -07003473 vm_fault_t ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474 pte_t entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475
Kirill A. Shutemov6b7339f2015-07-06 23:18:37 +03003476 /* File mapping without ->vm_ops ? */
3477 if (vma->vm_flags & VM_SHARED)
3478 return VM_FAULT_SIGBUS;
3479
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003480 /*
3481 * Use pte_alloc() instead of pte_alloc_map(). We can't run
3482 * pte_offset_map() on pmds where a huge pmd might be created
3483 * from a different thread.
3484 *
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07003485 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003486 * parallel threads are excluded by other means.
3487 *
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07003488 * Here we only have mmap_read_lock(mm).
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003489 */
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08003490 if (pte_alloc(vma->vm_mm, vmf->pmd))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003491 return VM_FAULT_OOM;
3492
3493 /* See the comment in pte_alloc_one_map() */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003494 if (unlikely(pmd_trans_unstable(vmf->pmd)))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003495 return 0;
3496
Linus Torvalds11ac5522010-08-14 11:44:56 -07003497 /* Use the zero-page for reads */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003498 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003499 !mm_forbids_zeropage(vma->vm_mm)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003500 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
Hugh Dickins62eede62009-09-21 17:03:34 -07003501 vma->vm_page_prot));
Jan Kara82b0f8c2016-12-14 15:06:58 -08003502 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3503 vmf->address, &vmf->ptl);
Bibo Mao7df67692020-05-27 10:25:18 +08003504 if (!pte_none(*vmf->pte)) {
3505 update_mmu_tlb(vma, vmf->address, vmf->pte);
Hugh Dickinsa13ea5b2009-09-21 17:03:30 -07003506 goto unlock;
Bibo Mao7df67692020-05-27 10:25:18 +08003507 }
Michal Hocko6b31d592017-08-18 15:16:15 -07003508 ret = check_stable_address_space(vma->vm_mm);
3509 if (ret)
3510 goto unlock;
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -07003511 /* Deliver the page fault to userland, check inside PT lock */
3512 if (userfaultfd_missing(vma)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003513 pte_unmap_unlock(vmf->pte, vmf->ptl);
3514 return handle_userfault(vmf, VM_UFFD_MISSING);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -07003515 }
Hugh Dickinsa13ea5b2009-09-21 17:03:30 -07003516 goto setpte;
3517 }
3518
Nick Piggin557ed1f2007-10-16 01:24:40 -07003519 /* Allocate our own private page. */
Nick Piggin557ed1f2007-10-16 01:24:40 -07003520 if (unlikely(anon_vma_prepare(vma)))
3521 goto oom;
Jan Kara82b0f8c2016-12-14 15:06:58 -08003522 page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
Nick Piggin557ed1f2007-10-16 01:24:40 -07003523 if (!page)
3524 goto oom;
Mel Gormaneb3c24f2015-06-24 16:57:27 -07003525
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07003526 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
Mel Gormaneb3c24f2015-06-24 16:57:27 -07003527 goto oom_free_page;
Johannes Weiner9d82c692020-06-03 16:02:04 -07003528 cgroup_throttle_swaprate(page, GFP_KERNEL);
Mel Gormaneb3c24f2015-06-24 16:57:27 -07003529
Minchan Kim52f37622013-04-29 15:08:15 -07003530 /*
3531 * The memory barrier inside __SetPageUptodate makes sure that
Wei Yangf4f53292019-11-30 17:58:17 -08003532 * preceding stores to the page contents become visible before
Minchan Kim52f37622013-04-29 15:08:15 -07003533 * the set_pte_at() write.
3534 */
Nick Piggin0ed361d2008-02-04 22:29:34 -08003535 __SetPageUptodate(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536
Nick Piggin557ed1f2007-10-16 01:24:40 -07003537 entry = mk_pte(page, vma->vm_page_prot);
Bibo Mao44bf4312020-05-27 10:25:19 +08003538 entry = pte_sw_mkyoung(entry);
Hugh Dickins1ac0cb52009-09-21 17:03:29 -07003539 if (vma->vm_flags & VM_WRITE)
3540 entry = pte_mkwrite(pte_mkdirty(entry));
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003541
Jan Kara82b0f8c2016-12-14 15:06:58 -08003542 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3543 &vmf->ptl);
Bibo Mao7df67692020-05-27 10:25:18 +08003544 if (!pte_none(*vmf->pte)) {
3545 update_mmu_cache(vma, vmf->address, vmf->pte);
Nick Piggin557ed1f2007-10-16 01:24:40 -07003546 goto release;
Bibo Mao7df67692020-05-27 10:25:18 +08003547 }
Hugh Dickins9ba69292009-09-21 17:02:20 -07003548
Michal Hocko6b31d592017-08-18 15:16:15 -07003549 ret = check_stable_address_space(vma->vm_mm);
3550 if (ret)
3551 goto release;
3552
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -07003553 /* Deliver the page fault to userland, check inside PT lock */
3554 if (userfaultfd_missing(vma)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003555 pte_unmap_unlock(vmf->pte, vmf->ptl);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003556 put_page(page);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003557 return handle_userfault(vmf, VM_UFFD_MISSING);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -07003558 }
3559
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003560 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003561 page_add_new_anon_rmap(page, vma, vmf->address, false);
Joonsoo Kimb5181542020-08-11 18:30:40 -07003562 lru_cache_add_inactive_or_unevictable(page, vma);
Hugh Dickinsa13ea5b2009-09-21 17:03:30 -07003563setpte:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003564 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565
3566 /* No need to invalidate - it was non-present before */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003567 update_mmu_cache(vma, vmf->address, vmf->pte);
Hugh Dickins65500d22005-10-29 18:15:59 -07003568unlock:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003569 pte_unmap_unlock(vmf->pte, vmf->ptl);
Michal Hocko6b31d592017-08-18 15:16:15 -07003570 return ret;
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003571release:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003572 put_page(page);
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003573 goto unlock;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08003574oom_free_page:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003575 put_page(page);
Hugh Dickins65500d22005-10-29 18:15:59 -07003576oom:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 return VM_FAULT_OOM;
3578}
3579
Paul Cassella9a95f3c2014-08-06 16:07:24 -07003580/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003581 * The mmap_lock must have been held on entry, and may have been
Paul Cassella9a95f3c2014-08-06 16:07:24 -07003582 * released depending on flags and vma->vm_ops->fault() return value.
3583 * See filemap_fault() and __lock_page_retry().
3584 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003585static vm_fault_t __do_fault(struct vm_fault *vmf)
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003586{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003587 struct vm_area_struct *vma = vmf->vma;
Souptick Joarder2b740302018-08-23 17:01:36 -07003588 vm_fault_t ret;
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003589
Michal Hocko63f36552019-01-08 15:23:07 -08003590 /*
3591 * Preallocate pte before we take page_lock because this might lead to
3592 * deadlocks for memcg reclaim which waits for pages under writeback:
3593 * lock_page(A)
3594 * SetPageWriteback(A)
3595 * unlock_page(A)
3596 * lock_page(B)
3597 * lock_page(B)
Yanfei Xud3838072020-10-13 16:53:26 -07003598 * pte_alloc_one
Michal Hocko63f36552019-01-08 15:23:07 -08003599 * shrink_page_list
3600 * wait_on_page_writeback(A)
3601 * SetPageWriteback(B)
3602 * unlock_page(B)
3603 * # flush A, B to clear the writeback
3604 */
3605 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
Yanfei Xua7069ee2020-10-13 16:53:29 -07003606 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
Michal Hocko63f36552019-01-08 15:23:07 -08003607 if (!vmf->prealloc_pte)
3608 return VM_FAULT_OOM;
3609 smp_wmb(); /* See comment in __pte_alloc() */
3610 }
3611
Dave Jiang11bac802017-02-24 14:56:41 -08003612 ret = vma->vm_ops->fault(vmf);
Jan Kara39170482016-12-14 15:07:18 -08003613 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
Jan Karab1aa8122016-12-14 15:07:24 -08003614 VM_FAULT_DONE_COW)))
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003615 return ret;
3616
Jan Kara667240e2016-12-14 15:07:07 -08003617 if (unlikely(PageHWPoison(vmf->page))) {
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003618 if (ret & VM_FAULT_LOCKED)
Jan Kara667240e2016-12-14 15:07:07 -08003619 unlock_page(vmf->page);
3620 put_page(vmf->page);
Jan Kara936ca802016-12-14 15:07:10 -08003621 vmf->page = NULL;
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003622 return VM_FAULT_HWPOISON;
3623 }
3624
3625 if (unlikely(!(ret & VM_FAULT_LOCKED)))
Jan Kara667240e2016-12-14 15:07:07 -08003626 lock_page(vmf->page);
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003627 else
Jan Kara667240e2016-12-14 15:07:07 -08003628 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003629
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003630 return ret;
3631}
3632
Ross Zwislerd0f0931d2017-06-02 14:46:34 -07003633/*
3634 * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
3635 * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
3636 * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
3637 * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
3638 */
3639static int pmd_devmap_trans_unstable(pmd_t *pmd)
3640{
3641 return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
3642}
3643
Souptick Joarder2b740302018-08-23 17:01:36 -07003644static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003645{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003646 struct vm_area_struct *vma = vmf->vma;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003647
Jan Kara82b0f8c2016-12-14 15:06:58 -08003648 if (!pmd_none(*vmf->pmd))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003649 goto map_pte;
Jan Kara82b0f8c2016-12-14 15:06:58 -08003650 if (vmf->prealloc_pte) {
3651 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3652 if (unlikely(!pmd_none(*vmf->pmd))) {
3653 spin_unlock(vmf->ptl);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003654 goto map_pte;
3655 }
3656
Kirill A. Shutemovc4812902017-11-15 17:35:37 -08003657 mm_inc_nr_ptes(vma->vm_mm);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003658 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3659 spin_unlock(vmf->ptl);
Tobin C Harding7f2b6ce2017-02-24 14:58:59 -08003660 vmf->prealloc_pte = NULL;
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08003661 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003662 return VM_FAULT_OOM;
3663 }
3664map_pte:
3665 /*
3666 * If a huge pmd materialized under us just retry later. Use
Ross Zwislerd0f0931d2017-06-02 14:46:34 -07003667 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
3668 * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
3669 * under us and then back to pmd_none, as a result of MADV_DONTNEED
3670 * running immediately after a huge pmd fault in a different thread of
3671 * this mm, in turn leading to a misleading pmd_trans_huge() retval.
3672 * All we have to ensure is that it is a regular pmd that we can walk
3673 * with pte_offset_map() and we can do that through an atomic read in
3674 * C, which is what pmd_trans_unstable() provides.
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003675 */
Ross Zwislerd0f0931d2017-06-02 14:46:34 -07003676 if (pmd_devmap_trans_unstable(vmf->pmd))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003677 return VM_FAULT_NOPAGE;
3678
Ross Zwislerd0f0931d2017-06-02 14:46:34 -07003679 /*
3680 * At this point we know that our vmf->pmd points to a page of ptes
3681 * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
3682 * for the duration of the fault. If a racing MADV_DONTNEED runs and
3683 * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
3684 * be valid and we will re-check to make sure the vmf->pte isn't
3685 * pte_none() under vmf->ptl protection when we return to
3686 * alloc_set_pte().
3687 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003688 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3689 &vmf->ptl);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003690 return 0;
3691}
3692
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07003693#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Jan Kara82b0f8c2016-12-14 15:06:58 -08003694static void deposit_prealloc_pte(struct vm_fault *vmf)
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003695{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003696 struct vm_area_struct *vma = vmf->vma;
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003697
Jan Kara82b0f8c2016-12-14 15:06:58 -08003698 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003699 /*
3700 * We are going to consume the prealloc table,
3701 * count that as nr_ptes.
3702 */
Kirill A. Shutemovc4812902017-11-15 17:35:37 -08003703 mm_inc_nr_ptes(vma->vm_mm);
Tobin C Harding7f2b6ce2017-02-24 14:58:59 -08003704 vmf->prealloc_pte = NULL;
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003705}
3706
Souptick Joarder2b740302018-08-23 17:01:36 -07003707static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003708{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003709 struct vm_area_struct *vma = vmf->vma;
3710 bool write = vmf->flags & FAULT_FLAG_WRITE;
3711 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003712 pmd_t entry;
Souptick Joarder2b740302018-08-23 17:01:36 -07003713 int i;
Matthew Wilcox (Oracle)d01ac3c2020-10-15 20:05:26 -07003714 vm_fault_t ret = VM_FAULT_FALLBACK;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003715
3716 if (!transhuge_vma_suitable(vma, haddr))
Matthew Wilcox (Oracle)d01ac3c2020-10-15 20:05:26 -07003717 return ret;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003718
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003719 page = compound_head(page);
Matthew Wilcox (Oracle)d01ac3c2020-10-15 20:05:26 -07003720 if (compound_order(page) != HPAGE_PMD_ORDER)
3721 return ret;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003722
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003723 /*
3724 * Archs like ppc64 need additonal space to store information
3725 * related to pte entry. Use the preallocated table for that.
3726 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003727 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08003728 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003729 if (!vmf->prealloc_pte)
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003730 return VM_FAULT_OOM;
3731 smp_wmb(); /* See comment in __pte_alloc() */
3732 }
3733
Jan Kara82b0f8c2016-12-14 15:06:58 -08003734 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3735 if (unlikely(!pmd_none(*vmf->pmd)))
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003736 goto out;
3737
3738 for (i = 0; i < HPAGE_PMD_NR; i++)
3739 flush_icache_page(vma, page + i);
3740
3741 entry = mk_huge_pmd(page, vma->vm_page_prot);
3742 if (write)
Linus Torvaldsf55e1012017-11-29 09:01:01 -08003743 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003744
Yang Shifadae292018-08-17 15:44:55 -07003745 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003746 page_add_file_rmap(page, true);
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003747 /*
3748 * deposit and withdraw with pmd lock held
3749 */
3750 if (arch_needs_pgtable_deposit())
Jan Kara82b0f8c2016-12-14 15:06:58 -08003751 deposit_prealloc_pte(vmf);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003752
Jan Kara82b0f8c2016-12-14 15:06:58 -08003753 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003754
Jan Kara82b0f8c2016-12-14 15:06:58 -08003755 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003756
3757 /* fault is handled */
3758 ret = 0;
Kirill A. Shutemov95ecedc2016-07-26 15:25:31 -07003759 count_vm_event(THP_FILE_MAPPED);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003760out:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003761 spin_unlock(vmf->ptl);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003762 return ret;
3763}
3764#else
Souptick Joarder2b740302018-08-23 17:01:36 -07003765static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003766{
3767 BUILD_BUG();
3768 return 0;
3769}
3770#endif
3771
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003772/**
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003773 * alloc_set_pte - setup new PTE entry for given page and add reverse page
Randy Dunlapf1dc1682020-10-13 16:54:01 -07003774 * mapping. If needed, the function allocates page table or use pre-allocated.
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003775 *
Jan Kara82b0f8c2016-12-14 15:06:58 -08003776 * @vmf: fault environment
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003777 * @page: page to map
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003778 *
Jan Kara82b0f8c2016-12-14 15:06:58 -08003779 * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
3780 * return.
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003781 *
3782 * Target users are page handler itself and implementations of
3783 * vm_ops->map_pages.
Mike Rapoporta862f682019-03-05 15:48:42 -08003784 *
3785 * Return: %0 on success, %VM_FAULT_ code in case of error.
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003786 */
Johannes Weiner9d82c692020-06-03 16:02:04 -07003787vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page)
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003788{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003789 struct vm_area_struct *vma = vmf->vma;
3790 bool write = vmf->flags & FAULT_FLAG_WRITE;
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003791 pte_t entry;
Souptick Joarder2b740302018-08-23 17:01:36 -07003792 vm_fault_t ret;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003793
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07003794 if (pmd_none(*vmf->pmd) && PageTransCompound(page)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003795 ret = do_set_pmd(vmf, page);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003796 if (ret != VM_FAULT_FALLBACK)
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08003797 return ret;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003798 }
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003799
Jan Kara82b0f8c2016-12-14 15:06:58 -08003800 if (!vmf->pte) {
3801 ret = pte_alloc_one_map(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003802 if (ret)
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08003803 return ret;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003804 }
3805
3806 /* Re-check under ptl */
Bibo Mao7df67692020-05-27 10:25:18 +08003807 if (unlikely(!pte_none(*vmf->pte))) {
3808 update_mmu_tlb(vma, vmf->address, vmf->pte);
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08003809 return VM_FAULT_NOPAGE;
Bibo Mao7df67692020-05-27 10:25:18 +08003810 }
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003811
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003812 flush_icache_page(vma, page);
3813 entry = mk_pte(page, vma->vm_page_prot);
Bibo Mao44bf4312020-05-27 10:25:19 +08003814 entry = pte_sw_mkyoung(entry);
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003815 if (write)
3816 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003817 /* copy-on-write page */
3818 if (write && !(vma->vm_flags & VM_SHARED)) {
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003819 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003820 page_add_new_anon_rmap(page, vma, vmf->address, false);
Joonsoo Kimb5181542020-08-11 18:30:40 -07003821 lru_cache_add_inactive_or_unevictable(page, vma);
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003822 } else {
Jerome Marchandeca56ff2016-01-14 15:19:26 -08003823 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
Kirill A. Shutemovdd78fed2016-07-26 15:25:26 -07003824 page_add_file_rmap(page, false);
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003825 }
Jan Kara82b0f8c2016-12-14 15:06:58 -08003826 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003827
3828 /* no need to invalidate: a not-present page won't be cached */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003829 update_mmu_cache(vma, vmf->address, vmf->pte);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003830
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08003831 return 0;
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003832}
3833
Jan Kara9118c0c2016-12-14 15:07:21 -08003834
3835/**
3836 * finish_fault - finish page fault once we have prepared the page to fault
3837 *
3838 * @vmf: structure describing the fault
3839 *
3840 * This function handles all that is needed to finish a page fault once the
3841 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
3842 * given page, adds reverse page mapping, handles memcg charges and LRU
Mike Rapoporta862f682019-03-05 15:48:42 -08003843 * addition.
Jan Kara9118c0c2016-12-14 15:07:21 -08003844 *
3845 * The function expects the page to be locked and on success it consumes a
3846 * reference of a page being mapped (for the PTE which maps it).
Mike Rapoporta862f682019-03-05 15:48:42 -08003847 *
3848 * Return: %0 on success, %VM_FAULT_ code in case of error.
Jan Kara9118c0c2016-12-14 15:07:21 -08003849 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003850vm_fault_t finish_fault(struct vm_fault *vmf)
Jan Kara9118c0c2016-12-14 15:07:21 -08003851{
3852 struct page *page;
Souptick Joarder2b740302018-08-23 17:01:36 -07003853 vm_fault_t ret = 0;
Jan Kara9118c0c2016-12-14 15:07:21 -08003854
3855 /* Did we COW the page? */
3856 if ((vmf->flags & FAULT_FLAG_WRITE) &&
3857 !(vmf->vma->vm_flags & VM_SHARED))
3858 page = vmf->cow_page;
3859 else
3860 page = vmf->page;
Michal Hocko6b31d592017-08-18 15:16:15 -07003861
3862 /*
3863 * check even for read faults because we might have lost our CoWed
3864 * page
3865 */
3866 if (!(vmf->vma->vm_flags & VM_SHARED))
3867 ret = check_stable_address_space(vmf->vma->vm_mm);
3868 if (!ret)
Johannes Weiner9d82c692020-06-03 16:02:04 -07003869 ret = alloc_set_pte(vmf, page);
Jan Kara9118c0c2016-12-14 15:07:21 -08003870 if (vmf->pte)
3871 pte_unmap_unlock(vmf->pte, vmf->ptl);
3872 return ret;
3873}
3874
Kirill A. Shutemov3a910532014-08-06 16:08:07 -07003875static unsigned long fault_around_bytes __read_mostly =
3876 rounddown_pow_of_two(65536);
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003877
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003878#ifdef CONFIG_DEBUG_FS
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003879static int fault_around_bytes_get(void *data, u64 *val)
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003880{
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003881 *val = fault_around_bytes;
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003882 return 0;
3883}
3884
Andrey Ryabininb4903d62014-07-30 16:08:35 -07003885/*
William Kucharskida391d62018-01-31 16:21:11 -08003886 * fault_around_bytes must be rounded down to the nearest page order as it's
3887 * what do_fault_around() expects to see.
Andrey Ryabininb4903d62014-07-30 16:08:35 -07003888 */
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003889static int fault_around_bytes_set(void *data, u64 val)
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003890{
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003891 if (val / PAGE_SIZE > PTRS_PER_PTE)
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003892 return -EINVAL;
Andrey Ryabininb4903d62014-07-30 16:08:35 -07003893 if (val > PAGE_SIZE)
3894 fault_around_bytes = rounddown_pow_of_two(val);
3895 else
3896 fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003897 return 0;
3898}
Yevgen Pronenko0a1345f2017-07-10 15:47:17 -07003899DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003900 fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003901
3902static int __init fault_around_debugfs(void)
3903{
Greg Kroah-Hartmand9f79792019-03-05 15:46:09 -08003904 debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
3905 &fault_around_bytes_fops);
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003906 return 0;
3907}
3908late_initcall(fault_around_debugfs);
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003909#endif
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003910
Kirill A. Shutemov1fdb4122014-06-04 16:10:55 -07003911/*
3912 * do_fault_around() tries to map few pages around the fault address. The hope
3913 * is that the pages will be needed soon and this will lower the number of
3914 * faults to handle.
3915 *
3916 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
3917 * not ready to be mapped: not up-to-date, locked, etc.
3918 *
3919 * This function is called with the page table lock taken. In the split ptlock
3920 * case the page table lock only protects only those entries which belong to
3921 * the page table corresponding to the fault address.
3922 *
3923 * This function doesn't cross the VMA boundaries, in order to call map_pages()
3924 * only once.
3925 *
William Kucharskida391d62018-01-31 16:21:11 -08003926 * fault_around_bytes defines how many bytes we'll try to map.
3927 * do_fault_around() expects it to be set to a power of two less than or equal
3928 * to PTRS_PER_PTE.
Kirill A. Shutemov1fdb4122014-06-04 16:10:55 -07003929 *
William Kucharskida391d62018-01-31 16:21:11 -08003930 * The virtual address of the area that we map is naturally aligned to
3931 * fault_around_bytes rounded down to the machine page size
3932 * (and therefore to page order). This way it's easier to guarantee
3933 * that we don't cross page table boundaries.
Kirill A. Shutemov1fdb4122014-06-04 16:10:55 -07003934 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003935static vm_fault_t do_fault_around(struct vm_fault *vmf)
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003936{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003937 unsigned long address = vmf->address, nr_pages, mask;
Jan Kara0721ec82016-12-14 15:07:04 -08003938 pgoff_t start_pgoff = vmf->pgoff;
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003939 pgoff_t end_pgoff;
Souptick Joarder2b740302018-08-23 17:01:36 -07003940 int off;
3941 vm_fault_t ret = 0;
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003942
Jason Low4db0c3c2015-04-15 16:14:08 -07003943 nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
Kirill A. Shutemovaecd6f42014-08-06 16:08:05 -07003944 mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
3945
Jan Kara82b0f8c2016-12-14 15:06:58 -08003946 vmf->address = max(address & mask, vmf->vma->vm_start);
3947 off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003948 start_pgoff -= off;
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003949
3950 /*
William Kucharskida391d62018-01-31 16:21:11 -08003951 * end_pgoff is either the end of the page table, the end of
3952 * the vma or nr_pages from start_pgoff, depending what is nearest.
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003953 */
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003954 end_pgoff = start_pgoff -
Jan Kara82b0f8c2016-12-14 15:06:58 -08003955 ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003956 PTRS_PER_PTE - 1;
Jan Kara82b0f8c2016-12-14 15:06:58 -08003957 end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003958 start_pgoff + nr_pages - 1);
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003959
Jan Kara82b0f8c2016-12-14 15:06:58 -08003960 if (pmd_none(*vmf->pmd)) {
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08003961 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003962 if (!vmf->prealloc_pte)
Vegard Nossumc5f88bd2016-08-02 14:02:22 -07003963 goto out;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003964 smp_wmb(); /* See comment in __pte_alloc() */
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003965 }
3966
Jan Kara82b0f8c2016-12-14 15:06:58 -08003967 vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003968
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003969 /* Huge page is mapped? Page fault is solved */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003970 if (pmd_trans_huge(*vmf->pmd)) {
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003971 ret = VM_FAULT_NOPAGE;
3972 goto out;
3973 }
3974
3975 /* ->map_pages() haven't done anything useful. Cold page cache? */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003976 if (!vmf->pte)
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003977 goto out;
3978
3979 /* check if the page fault is solved */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003980 vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
3981 if (!pte_none(*vmf->pte))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003982 ret = VM_FAULT_NOPAGE;
Jan Kara82b0f8c2016-12-14 15:06:58 -08003983 pte_unmap_unlock(vmf->pte, vmf->ptl);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003984out:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003985 vmf->address = address;
3986 vmf->pte = NULL;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003987 return ret;
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003988}
3989
Souptick Joarder2b740302018-08-23 17:01:36 -07003990static vm_fault_t do_read_fault(struct vm_fault *vmf)
Kirill A. Shutemove655fb22014-04-03 14:48:11 -07003991{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003992 struct vm_area_struct *vma = vmf->vma;
Souptick Joarder2b740302018-08-23 17:01:36 -07003993 vm_fault_t ret = 0;
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003994
3995 /*
3996 * Let's call ->map_pages() first and use ->fault() as fallback
3997 * if page by the offset is not ready to be mapped (cold cache or
3998 * something).
3999 */
Kirill A. Shutemov9b4bdd22015-02-10 14:09:51 -08004000 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
Jan Kara0721ec82016-12-14 15:07:04 -08004001 ret = do_fault_around(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004002 if (ret)
4003 return ret;
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07004004 }
Kirill A. Shutemove655fb22014-04-03 14:48:11 -07004005
Jan Kara936ca802016-12-14 15:07:10 -08004006 ret = __do_fault(vmf);
Kirill A. Shutemove655fb22014-04-03 14:48:11 -07004007 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4008 return ret;
4009
Jan Kara9118c0c2016-12-14 15:07:21 -08004010 ret |= finish_fault(vmf);
Jan Kara936ca802016-12-14 15:07:10 -08004011 unlock_page(vmf->page);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004012 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
Jan Kara936ca802016-12-14 15:07:10 -08004013 put_page(vmf->page);
Kirill A. Shutemove655fb22014-04-03 14:48:11 -07004014 return ret;
4015}
4016
Souptick Joarder2b740302018-08-23 17:01:36 -07004017static vm_fault_t do_cow_fault(struct vm_fault *vmf)
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004018{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004019 struct vm_area_struct *vma = vmf->vma;
Souptick Joarder2b740302018-08-23 17:01:36 -07004020 vm_fault_t ret;
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004021
4022 if (unlikely(anon_vma_prepare(vma)))
4023 return VM_FAULT_OOM;
4024
Jan Kara936ca802016-12-14 15:07:10 -08004025 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4026 if (!vmf->cow_page)
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004027 return VM_FAULT_OOM;
4028
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07004029 if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
Jan Kara936ca802016-12-14 15:07:10 -08004030 put_page(vmf->cow_page);
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004031 return VM_FAULT_OOM;
4032 }
Johannes Weiner9d82c692020-06-03 16:02:04 -07004033 cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004034
Jan Kara936ca802016-12-14 15:07:10 -08004035 ret = __do_fault(vmf);
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004036 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4037 goto uncharge_out;
Jan Kara39170482016-12-14 15:07:18 -08004038 if (ret & VM_FAULT_DONE_COW)
4039 return ret;
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004040
Jan Karab1aa8122016-12-14 15:07:24 -08004041 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
Jan Kara936ca802016-12-14 15:07:10 -08004042 __SetPageUptodate(vmf->cow_page);
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004043
Jan Kara9118c0c2016-12-14 15:07:21 -08004044 ret |= finish_fault(vmf);
Jan Karab1aa8122016-12-14 15:07:24 -08004045 unlock_page(vmf->page);
4046 put_page(vmf->page);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004047 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4048 goto uncharge_out;
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004049 return ret;
4050uncharge_out:
Jan Kara936ca802016-12-14 15:07:10 -08004051 put_page(vmf->cow_page);
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004052 return ret;
4053}
4054
Souptick Joarder2b740302018-08-23 17:01:36 -07004055static vm_fault_t do_shared_fault(struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004056{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004057 struct vm_area_struct *vma = vmf->vma;
Souptick Joarder2b740302018-08-23 17:01:36 -07004058 vm_fault_t ret, tmp;
KAMEZAWA Hiroyuki1d65f862011-07-25 17:12:27 -07004059
Jan Kara936ca802016-12-14 15:07:10 -08004060 ret = __do_fault(vmf);
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07004061 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
Kirill A. Shutemovf0c6d4d2014-04-03 14:48:13 -07004062 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004063
4064 /*
Kirill A. Shutemovf0c6d4d2014-04-03 14:48:13 -07004065 * Check if the backing address space wants to know that the page is
4066 * about to become writable
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067 */
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07004068 if (vma->vm_ops->page_mkwrite) {
Jan Kara936ca802016-12-14 15:07:10 -08004069 unlock_page(vmf->page);
Jan Kara38b8cb72016-12-14 15:07:30 -08004070 tmp = do_page_mkwrite(vmf);
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07004071 if (unlikely(!tmp ||
4072 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
Jan Kara936ca802016-12-14 15:07:10 -08004073 put_page(vmf->page);
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07004074 return tmp;
4075 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076 }
4077
Jan Kara9118c0c2016-12-14 15:07:21 -08004078 ret |= finish_fault(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004079 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4080 VM_FAULT_RETRY))) {
Jan Kara936ca802016-12-14 15:07:10 -08004081 unlock_page(vmf->page);
4082 put_page(vmf->page);
Kirill A. Shutemovf0c6d4d2014-04-03 14:48:13 -07004083 return ret;
Peter Zijlstrad08b3852006-09-25 23:30:57 -07004084 }
Kirill A. Shutemovf0c6d4d2014-04-03 14:48:13 -07004085
Johannes Weiner89b15332019-11-30 17:50:22 -08004086 ret |= fault_dirty_shared_page(vmf);
KAMEZAWA Hiroyuki1d65f862011-07-25 17:12:27 -07004087 return ret;
Nick Piggin54cb8822007-07-19 01:46:59 -07004088}
Nick Piggind00806b2007-07-19 01:46:57 -07004089
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004090/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004091 * We enter with non-exclusive mmap_lock (to exclude vma changes,
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004092 * but allow concurrent faults).
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004093 * The mmap_lock may have been released depending on flags and our
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004094 * return value. See filemap_fault() and __lock_page_or_retry().
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004095 * If mmap_lock is released, vma may become invalid (for example
Jan Stancekfc8efd22019-03-05 15:50:08 -08004096 * by other thread calling munmap()).
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004097 */
Souptick Joarder2b740302018-08-23 17:01:36 -07004098static vm_fault_t do_fault(struct vm_fault *vmf)
Nick Piggin54cb8822007-07-19 01:46:59 -07004099{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004100 struct vm_area_struct *vma = vmf->vma;
Jan Stancekfc8efd22019-03-05 15:50:08 -08004101 struct mm_struct *vm_mm = vma->vm_mm;
Souptick Joarder2b740302018-08-23 17:01:36 -07004102 vm_fault_t ret;
Nick Piggin54cb8822007-07-19 01:46:59 -07004103
Aneesh Kumar K.Vff09d7e2018-10-26 15:09:01 -07004104 /*
4105 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4106 */
4107 if (!vma->vm_ops->fault) {
4108 /*
4109 * If we find a migration pmd entry or a none pmd entry, which
4110 * should never happen, return SIGBUS
4111 */
4112 if (unlikely(!pmd_present(*vmf->pmd)))
4113 ret = VM_FAULT_SIGBUS;
4114 else {
4115 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
4116 vmf->pmd,
4117 vmf->address,
4118 &vmf->ptl);
4119 /*
4120 * Make sure this is not a temporary clearing of pte
4121 * by holding ptl and checking again. A R/M/W update
4122 * of pte involves: take ptl, clearing the pte so that
4123 * we don't have concurrent modification by hardware
4124 * followed by an update.
4125 */
4126 if (unlikely(pte_none(*vmf->pte)))
4127 ret = VM_FAULT_SIGBUS;
4128 else
4129 ret = VM_FAULT_NOPAGE;
4130
4131 pte_unmap_unlock(vmf->pte, vmf->ptl);
4132 }
4133 } else if (!(vmf->flags & FAULT_FLAG_WRITE))
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08004134 ret = do_read_fault(vmf);
4135 else if (!(vma->vm_flags & VM_SHARED))
4136 ret = do_cow_fault(vmf);
4137 else
4138 ret = do_shared_fault(vmf);
4139
4140 /* preallocated pagetable is unused: free it */
4141 if (vmf->prealloc_pte) {
Jan Stancekfc8efd22019-03-05 15:50:08 -08004142 pte_free(vm_mm, vmf->prealloc_pte);
Tobin C Harding7f2b6ce2017-02-24 14:58:59 -08004143 vmf->prealloc_pte = NULL;
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08004144 }
4145 return ret;
Nick Piggin54cb8822007-07-19 01:46:59 -07004146}
4147
Rashika Kheriab19a9932014-04-03 14:48:02 -07004148static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
Rik van Riel04bb2f92013-10-07 11:29:36 +01004149 unsigned long addr, int page_nid,
4150 int *flags)
Mel Gorman9532fec2012-11-15 01:24:32 +00004151{
4152 get_page(page);
4153
4154 count_vm_numa_event(NUMA_HINT_FAULTS);
Rik van Riel04bb2f92013-10-07 11:29:36 +01004155 if (page_nid == numa_node_id()) {
Mel Gorman9532fec2012-11-15 01:24:32 +00004156 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
Rik van Riel04bb2f92013-10-07 11:29:36 +01004157 *flags |= TNF_FAULT_LOCAL;
4158 }
Mel Gorman9532fec2012-11-15 01:24:32 +00004159
4160 return mpol_misplaced(page, vma, addr);
4161}
4162
Souptick Joarder2b740302018-08-23 17:01:36 -07004163static vm_fault_t do_numa_page(struct vm_fault *vmf)
Mel Gormand10e63f2012-10-25 14:16:31 +02004164{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004165 struct vm_area_struct *vma = vmf->vma;
Mel Gorman4daae3b2012-11-02 11:33:45 +00004166 struct page *page = NULL;
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08004167 int page_nid = NUMA_NO_NODE;
Peter Zijlstra90572892013-10-07 11:29:20 +01004168 int last_cpupid;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02004169 int target_nid;
Mel Gormanb8593bf2012-11-21 01:18:23 +00004170 bool migrated = false;
Aneesh Kumar K.V04a86452019-03-05 15:46:29 -08004171 pte_t pte, old_pte;
Aneesh Kumar K.V288bc542017-02-24 14:59:16 -08004172 bool was_writable = pte_savedwrite(vmf->orig_pte);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004173 int flags = 0;
Mel Gormand10e63f2012-10-25 14:16:31 +02004174
4175 /*
Tobin C Harding166f61b2017-02-24 14:59:01 -08004176 * The "pte" at this point cannot be used safely without
4177 * validation through pte_unmap_same(). It's of NUMA type but
4178 * the pfn may be screwed if the read is non atomic.
Tobin C Harding166f61b2017-02-24 14:59:01 -08004179 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004180 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
4181 spin_lock(vmf->ptl);
Aneesh Kumar K.Vcee216a2017-02-24 14:59:13 -08004182 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004183 pte_unmap_unlock(vmf->pte, vmf->ptl);
Mel Gorman4daae3b2012-11-02 11:33:45 +00004184 goto out;
4185 }
4186
Aneesh Kumar K.Vcee216a2017-02-24 14:59:13 -08004187 /*
4188 * Make it present again, Depending on how arch implementes non
4189 * accessible ptes, some can allow access by kernel mode.
4190 */
Aneesh Kumar K.V04a86452019-03-05 15:46:29 -08004191 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4192 pte = pte_modify(old_pte, vma->vm_page_prot);
Mel Gorman4d942462015-02-12 14:58:28 -08004193 pte = pte_mkyoung(pte);
Mel Gormanb191f9b2015-03-25 15:55:40 -07004194 if (was_writable)
4195 pte = pte_mkwrite(pte);
Aneesh Kumar K.V04a86452019-03-05 15:46:29 -08004196 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004197 update_mmu_cache(vma, vmf->address, vmf->pte);
Mel Gormand10e63f2012-10-25 14:16:31 +02004198
Jan Kara82b0f8c2016-12-14 15:06:58 -08004199 page = vm_normal_page(vma, vmf->address, pte);
Mel Gormand10e63f2012-10-25 14:16:31 +02004200 if (!page) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004201 pte_unmap_unlock(vmf->pte, vmf->ptl);
Mel Gormand10e63f2012-10-25 14:16:31 +02004202 return 0;
4203 }
4204
Kirill A. Shutemove81c4802016-01-15 16:53:49 -08004205 /* TODO: handle PTE-mapped THP */
4206 if (PageCompound(page)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004207 pte_unmap_unlock(vmf->pte, vmf->ptl);
Kirill A. Shutemove81c4802016-01-15 16:53:49 -08004208 return 0;
4209 }
4210
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004211 /*
Mel Gormanbea66fb2015-03-25 15:55:37 -07004212 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4213 * much anyway since they can be in shared cache state. This misses
4214 * the case where a mapping is writable but the process never writes
4215 * to it but pte_write gets cleared during protection updates and
4216 * pte_dirty has unpredictable behaviour between PTE scan updates,
4217 * background writeback, dirty balancing and application behaviour.
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004218 */
Rik van Rield59dc7b2016-09-08 21:30:53 -04004219 if (!pte_write(pte))
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004220 flags |= TNF_NO_GROUP;
4221
Rik van Rieldabe1d92013-10-07 11:29:34 +01004222 /*
4223 * Flag if the page is shared between multiple address spaces. This
4224 * is later used when determining whether to group tasks together
4225 */
4226 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
4227 flags |= TNF_SHARED;
4228
Peter Zijlstra90572892013-10-07 11:29:20 +01004229 last_cpupid = page_cpupid_last(page);
Mel Gorman8191acb2013-10-07 11:28:45 +01004230 page_nid = page_to_nid(page);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004231 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004232 &flags);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004233 pte_unmap_unlock(vmf->pte, vmf->ptl);
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08004234 if (target_nid == NUMA_NO_NODE) {
Mel Gorman4daae3b2012-11-02 11:33:45 +00004235 put_page(page);
4236 goto out;
4237 }
4238
4239 /* Migrate to the requested node */
Mel Gorman1bc115d2013-10-07 11:29:05 +01004240 migrated = migrate_misplaced_page(page, vma, target_nid);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004241 if (migrated) {
Mel Gorman8191acb2013-10-07 11:28:45 +01004242 page_nid = target_nid;
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004243 flags |= TNF_MIGRATED;
Mel Gorman074c2382015-03-25 15:55:42 -07004244 } else
4245 flags |= TNF_MIGRATE_FAIL;
Mel Gorman4daae3b2012-11-02 11:33:45 +00004246
4247out:
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08004248 if (page_nid != NUMA_NO_NODE)
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004249 task_numa_fault(last_cpupid, page_nid, 1, flags);
Mel Gormand10e63f2012-10-25 14:16:31 +02004250 return 0;
4251}
4252
Souptick Joarder2b740302018-08-23 17:01:36 -07004253static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
Matthew Wilcoxb96375f2015-09-08 14:58:48 -07004254{
Dave Jiangf4200392017-02-22 15:40:06 -08004255 if (vma_is_anonymous(vmf->vma))
Jan Kara82b0f8c2016-12-14 15:06:58 -08004256 return do_huge_pmd_anonymous_page(vmf);
Dave Jianga2d58162017-02-24 14:56:59 -08004257 if (vmf->vma->vm_ops->huge_fault)
Dave Jiangc791ace2017-02-24 14:57:08 -08004258 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
Matthew Wilcoxb96375f2015-09-08 14:58:48 -07004259 return VM_FAULT_FALLBACK;
4260}
4261
Geert Uytterhoeven183f24aa2017-12-14 15:32:52 -08004262/* `inline' is required to avoid gcc 4.1.2 build error */
Souptick Joarder2b740302018-08-23 17:01:36 -07004263static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
Matthew Wilcoxb96375f2015-09-08 14:58:48 -07004264{
Andrea Arcangeli529b9302020-04-06 20:05:29 -07004265 if (vma_is_anonymous(vmf->vma)) {
Peter Xu292924b2020-04-06 20:05:49 -07004266 if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd))
Andrea Arcangeli529b9302020-04-06 20:05:29 -07004267 return handle_userfault(vmf, VM_UFFD_WP);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004268 return do_huge_pmd_wp_page(vmf, orig_pmd);
Andrea Arcangeli529b9302020-04-06 20:05:29 -07004269 }
Thomas Hellstrom (VMware)327e9fd2020-03-24 18:47:47 +01004270 if (vmf->vma->vm_ops->huge_fault) {
4271 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
Kirill A. Shutemovaf9e4d52016-07-26 15:25:40 -07004272
Thomas Hellstrom (VMware)327e9fd2020-03-24 18:47:47 +01004273 if (!(ret & VM_FAULT_FALLBACK))
4274 return ret;
4275 }
4276
4277 /* COW or write-notify handled on pte level: split pmd. */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004278 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
Kirill A. Shutemovaf9e4d52016-07-26 15:25:40 -07004279
Matthew Wilcoxb96375f2015-09-08 14:58:48 -07004280 return VM_FAULT_FALLBACK;
4281}
4282
Souptick Joarder2b740302018-08-23 17:01:36 -07004283static vm_fault_t create_huge_pud(struct vm_fault *vmf)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004284{
Thomas Hellstrom (VMware)327e9fd2020-03-24 18:47:47 +01004285#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
4286 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004287 /* No support for anonymous transparent PUD pages yet */
4288 if (vma_is_anonymous(vmf->vma))
Thomas Hellstrom (VMware)327e9fd2020-03-24 18:47:47 +01004289 goto split;
4290 if (vmf->vma->vm_ops->huge_fault) {
4291 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4292
4293 if (!(ret & VM_FAULT_FALLBACK))
4294 return ret;
4295 }
4296split:
4297 /* COW or write-notify not handled on PUD level: split pud.*/
4298 __split_huge_pud(vmf->vma, vmf->pud, vmf->address);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004299#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4300 return VM_FAULT_FALLBACK;
4301}
4302
Souptick Joarder2b740302018-08-23 17:01:36 -07004303static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004304{
4305#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4306 /* No support for anonymous transparent PUD pages yet */
4307 if (vma_is_anonymous(vmf->vma))
4308 return VM_FAULT_FALLBACK;
4309 if (vmf->vma->vm_ops->huge_fault)
Dave Jiangc791ace2017-02-24 14:57:08 -08004310 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004311#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4312 return VM_FAULT_FALLBACK;
4313}
4314
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315/*
4316 * These routines also need to handle stuff like marking pages dirty
4317 * and/or accessed for architectures that don't do it in hardware (most
4318 * RISC architectures). The early dirtying is also good on the i386.
4319 *
4320 * There is also a hook called "update_mmu_cache()" that architectures
4321 * with external mmu caches can use to update those (ie the Sparc or
4322 * PowerPC hashed page tables that act as extended TLBs).
4323 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004324 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004325 * concurrent faults).
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004326 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004327 * The mmap_lock may have been released depending on flags and our return value.
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004328 * See filemap_fault() and __lock_page_or_retry().
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329 */
Souptick Joarder2b740302018-08-23 17:01:36 -07004330static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331{
4332 pte_t entry;
4333
Jan Kara82b0f8c2016-12-14 15:06:58 -08004334 if (unlikely(pmd_none(*vmf->pmd))) {
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004335 /*
4336 * Leave __pte_alloc() until later: because vm_ops->fault may
4337 * want to allocate huge page, and if we expose page table
4338 * for an instant, it will be difficult to retract from
4339 * concurrent faults and from rmap lookups.
4340 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004341 vmf->pte = NULL;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004342 } else {
4343 /* See comment in pte_alloc_one_map() */
Ross Zwislerd0f0931d2017-06-02 14:46:34 -07004344 if (pmd_devmap_trans_unstable(vmf->pmd))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004345 return 0;
4346 /*
4347 * A regular pmd is established and it can't morph into a huge
4348 * pmd from under us anymore at this point because we hold the
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004349 * mmap_lock read mode and khugepaged takes it in write mode.
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004350 * So now it's safe to run pte_offset_map().
4351 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004352 vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
Jan Kara29943022016-12-14 15:07:16 -08004353 vmf->orig_pte = *vmf->pte;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004354
4355 /*
4356 * some architectures can have larger ptes than wordsize,
4357 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
Paul E. McKenneyb03a0fe2017-10-23 14:07:25 -07004358 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
4359 * accesses. The code below just needs a consistent view
4360 * for the ifs and we later double check anyway with the
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004361 * ptl lock held. So here a barrier will do.
4362 */
4363 barrier();
Jan Kara29943022016-12-14 15:07:16 -08004364 if (pte_none(vmf->orig_pte)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004365 pte_unmap(vmf->pte);
4366 vmf->pte = NULL;
Hugh Dickins65500d22005-10-29 18:15:59 -07004367 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004368 }
4369
Jan Kara82b0f8c2016-12-14 15:06:58 -08004370 if (!vmf->pte) {
4371 if (vma_is_anonymous(vmf->vma))
4372 return do_anonymous_page(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004373 else
Jan Kara82b0f8c2016-12-14 15:06:58 -08004374 return do_fault(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004375 }
4376
Jan Kara29943022016-12-14 15:07:16 -08004377 if (!pte_present(vmf->orig_pte))
4378 return do_swap_page(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004379
Jan Kara29943022016-12-14 15:07:16 -08004380 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4381 return do_numa_page(vmf);
Mel Gormand10e63f2012-10-25 14:16:31 +02004382
Jan Kara82b0f8c2016-12-14 15:06:58 -08004383 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
4384 spin_lock(vmf->ptl);
Jan Kara29943022016-12-14 15:07:16 -08004385 entry = vmf->orig_pte;
Bibo Mao7df67692020-05-27 10:25:18 +08004386 if (unlikely(!pte_same(*vmf->pte, entry))) {
4387 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
Hugh Dickins8f4e2102005-10-29 18:16:26 -07004388 goto unlock;
Bibo Mao7df67692020-05-27 10:25:18 +08004389 }
Jan Kara82b0f8c2016-12-14 15:06:58 -08004390 if (vmf->flags & FAULT_FLAG_WRITE) {
Linus Torvaldsf6f37322017-12-15 18:53:22 -08004391 if (!pte_write(entry))
Jan Kara29943022016-12-14 15:07:16 -08004392 return do_wp_page(vmf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393 entry = pte_mkdirty(entry);
4394 }
4395 entry = pte_mkyoung(entry);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004396 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4397 vmf->flags & FAULT_FLAG_WRITE)) {
4398 update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
Andrea Arcangeli1a44e142005-10-29 18:16:48 -07004399 } else {
Yang Shib7333b52020-08-14 21:30:41 -07004400 /* Skip spurious TLB flush for retried page fault */
4401 if (vmf->flags & FAULT_FLAG_TRIED)
4402 goto unlock;
Andrea Arcangeli1a44e142005-10-29 18:16:48 -07004403 /*
4404 * This is needed only for protection faults but the arch code
4405 * is not yet telling us if this is a protection fault or not.
4406 * This still avoids useless tlb flushes for .text page faults
4407 * with threads.
4408 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004409 if (vmf->flags & FAULT_FLAG_WRITE)
4410 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
Andrea Arcangeli1a44e142005-10-29 18:16:48 -07004411 }
Hugh Dickins8f4e2102005-10-29 18:16:26 -07004412unlock:
Jan Kara82b0f8c2016-12-14 15:06:58 -08004413 pte_unmap_unlock(vmf->pte, vmf->ptl);
Nick Piggin83c54072007-07-19 01:47:05 -07004414 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004415}
4416
4417/*
4418 * By the time we get here, we already hold the mm semaphore
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004419 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004420 * The mmap_lock may have been released depending on flags and our
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004421 * return value. See filemap_fault() and __lock_page_or_retry().
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422 */
Souptick Joarder2b740302018-08-23 17:01:36 -07004423static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
4424 unsigned long address, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004426 struct vm_fault vmf = {
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004427 .vma = vma,
Jan Kara1a29d852016-12-14 15:07:01 -08004428 .address = address & PAGE_MASK,
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004429 .flags = flags,
Jan Kara0721ec82016-12-14 15:07:04 -08004430 .pgoff = linear_page_index(vma, address),
Jan Kara667240e2016-12-14 15:07:07 -08004431 .gfp_mask = __get_fault_gfp_mask(vma),
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004432 };
Anshuman Khandualfde26be2017-09-08 16:12:45 -07004433 unsigned int dirty = flags & FAULT_FLAG_WRITE;
Kirill A. Shutemovdcddffd2016-07-26 15:25:18 -07004434 struct mm_struct *mm = vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004436 p4d_t *p4d;
Souptick Joarder2b740302018-08-23 17:01:36 -07004437 vm_fault_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438
Linus Torvalds1da177e2005-04-16 15:20:36 -07004439 pgd = pgd_offset(mm, address);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004440 p4d = p4d_alloc(mm, pgd, address);
4441 if (!p4d)
4442 return VM_FAULT_OOM;
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004443
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004444 vmf.pud = pud_alloc(mm, p4d, address);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004445 if (!vmf.pud)
Hugh Dickinsc74df322005-10-29 18:16:23 -07004446 return VM_FAULT_OOM;
Thomas Hellstrom625110b2019-11-30 17:51:32 -08004447retry_pud:
Michal Hocko7635d9c2018-12-28 00:38:21 -08004448 if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004449 ret = create_huge_pud(&vmf);
4450 if (!(ret & VM_FAULT_FALLBACK))
4451 return ret;
4452 } else {
4453 pud_t orig_pud = *vmf.pud;
4454
4455 barrier();
4456 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004457
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004458 /* NUMA case for anonymous PUDs would go here */
4459
Linus Torvaldsf6f37322017-12-15 18:53:22 -08004460 if (dirty && !pud_write(orig_pud)) {
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004461 ret = wp_huge_pud(&vmf, orig_pud);
4462 if (!(ret & VM_FAULT_FALLBACK))
4463 return ret;
4464 } else {
4465 huge_pud_set_accessed(&vmf, orig_pud);
4466 return 0;
4467 }
4468 }
4469 }
4470
4471 vmf.pmd = pmd_alloc(mm, vmf.pud, address);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004472 if (!vmf.pmd)
Hugh Dickinsc74df322005-10-29 18:16:23 -07004473 return VM_FAULT_OOM;
Thomas Hellstrom625110b2019-11-30 17:51:32 -08004474
4475 /* Huge pud page fault raced with pmd_alloc? */
4476 if (pud_trans_unstable(vmf.pud))
4477 goto retry_pud;
4478
Michal Hocko7635d9c2018-12-28 00:38:21 -08004479 if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
Dave Jianga2d58162017-02-24 14:56:59 -08004480 ret = create_huge_pmd(&vmf);
Kirill A. Shutemovc0292552013-09-12 15:14:05 -07004481 if (!(ret & VM_FAULT_FALLBACK))
4482 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08004483 } else {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004484 pmd_t orig_pmd = *vmf.pmd;
David Rientjes1f1d06c2012-05-29 15:06:23 -07004485
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08004486 barrier();
Zi Yan84c3fc42017-09-08 16:11:01 -07004487 if (unlikely(is_swap_pmd(orig_pmd))) {
4488 VM_BUG_ON(thp_migration_supported() &&
4489 !is_pmd_migration_entry(orig_pmd));
4490 if (is_pmd_migration_entry(orig_pmd))
4491 pmd_migration_entry_wait(mm, vmf.pmd);
4492 return 0;
4493 }
Dan Williams5c7fb562016-01-15 16:56:52 -08004494 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
Lorenzo Stoakes38e08852016-09-11 23:54:25 +01004495 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
Jan Kara82b0f8c2016-12-14 15:06:58 -08004496 return do_huge_pmd_numa_page(&vmf, orig_pmd);
Mel Gormand10e63f2012-10-25 14:16:31 +02004497
Linus Torvaldsf6f37322017-12-15 18:53:22 -08004498 if (dirty && !pmd_write(orig_pmd)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004499 ret = wp_huge_pmd(&vmf, orig_pmd);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08004500 if (!(ret & VM_FAULT_FALLBACK))
4501 return ret;
Will Deacona1dd4502012-12-11 16:01:27 -08004502 } else {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004503 huge_pmd_set_accessed(&vmf, orig_pmd);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08004504 return 0;
David Rientjes1f1d06c2012-05-29 15:06:23 -07004505 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08004506 }
4507 }
4508
Jan Kara82b0f8c2016-12-14 15:06:58 -08004509 return handle_pte_fault(&vmf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510}
4511
Peter Xubce617e2020-08-11 18:37:44 -07004512/**
4513 * mm_account_fault - Do page fault accountings
4514 *
4515 * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting
4516 * of perf event counters, but we'll still do the per-task accounting to
4517 * the task who triggered this page fault.
4518 * @address: the faulted address.
4519 * @flags: the fault flags.
4520 * @ret: the fault retcode.
4521 *
4522 * This will take care of most of the page fault accountings. Meanwhile, it
4523 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
4524 * updates. However note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
4525 * still be in per-arch page fault handlers at the entry of page fault.
4526 */
4527static inline void mm_account_fault(struct pt_regs *regs,
4528 unsigned long address, unsigned int flags,
4529 vm_fault_t ret)
4530{
4531 bool major;
4532
4533 /*
4534 * We don't do accounting for some specific faults:
4535 *
4536 * - Unsuccessful faults (e.g. when the address wasn't valid). That
4537 * includes arch_vma_access_permitted() failing before reaching here.
4538 * So this is not a "this many hardware page faults" counter. We
4539 * should use the hw profiling for that.
4540 *
4541 * - Incomplete faults (VM_FAULT_RETRY). They will only be counted
4542 * once they're completed.
4543 */
4544 if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY))
4545 return;
4546
4547 /*
4548 * We define the fault as a major fault when the final successful fault
4549 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
4550 * handle it immediately previously).
4551 */
4552 major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
4553
Peter Xua2beb5f2020-08-11 18:38:57 -07004554 if (major)
4555 current->maj_flt++;
4556 else
4557 current->min_flt++;
4558
Peter Xubce617e2020-08-11 18:37:44 -07004559 /*
Peter Xua2beb5f2020-08-11 18:38:57 -07004560 * If the fault is done for GUP, regs will be NULL. We only do the
4561 * accounting for the per thread fault counters who triggered the
4562 * fault, and we skip the perf event updates.
Peter Xubce617e2020-08-11 18:37:44 -07004563 */
4564 if (!regs)
4565 return;
4566
Peter Xua2beb5f2020-08-11 18:38:57 -07004567 if (major)
Peter Xubce617e2020-08-11 18:37:44 -07004568 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
Peter Xua2beb5f2020-08-11 18:38:57 -07004569 else
Peter Xubce617e2020-08-11 18:37:44 -07004570 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
Peter Xubce617e2020-08-11 18:37:44 -07004571}
4572
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004573/*
4574 * By the time we get here, we already hold the mm semaphore
4575 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004576 * The mmap_lock may have been released depending on flags and our
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004577 * return value. See filemap_fault() and __lock_page_or_retry().
4578 */
Souptick Joarder2b740302018-08-23 17:01:36 -07004579vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
Peter Xubce617e2020-08-11 18:37:44 -07004580 unsigned int flags, struct pt_regs *regs)
Johannes Weiner519e5242013-09-12 15:13:42 -07004581{
Souptick Joarder2b740302018-08-23 17:01:36 -07004582 vm_fault_t ret;
Johannes Weiner519e5242013-09-12 15:13:42 -07004583
4584 __set_current_state(TASK_RUNNING);
4585
4586 count_vm_event(PGFAULT);
Roman Gushchin22621852017-07-06 15:40:25 -07004587 count_memcg_event_mm(vma->vm_mm, PGFAULT);
Johannes Weiner519e5242013-09-12 15:13:42 -07004588
4589 /* do counter updates before entering really critical section. */
4590 check_sync_rss_stat(current);
4591
Laurent Dufourde0c7992017-09-08 16:13:12 -07004592 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
4593 flags & FAULT_FLAG_INSTRUCTION,
4594 flags & FAULT_FLAG_REMOTE))
4595 return VM_FAULT_SIGSEGV;
4596
Johannes Weiner519e5242013-09-12 15:13:42 -07004597 /*
4598 * Enable the memcg OOM handling for faults triggered in user
4599 * space. Kernel faults are handled more gracefully.
4600 */
4601 if (flags & FAULT_FLAG_USER)
Michal Hocko29ef6802018-08-17 15:47:11 -07004602 mem_cgroup_enter_user_fault();
Johannes Weiner519e5242013-09-12 15:13:42 -07004603
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004604 if (unlikely(is_vm_hugetlb_page(vma)))
4605 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
4606 else
4607 ret = __handle_mm_fault(vma, address, flags);
Johannes Weiner519e5242013-09-12 15:13:42 -07004608
Johannes Weiner49426422013-10-16 13:46:59 -07004609 if (flags & FAULT_FLAG_USER) {
Michal Hocko29ef6802018-08-17 15:47:11 -07004610 mem_cgroup_exit_user_fault();
Tobin C Harding166f61b2017-02-24 14:59:01 -08004611 /*
4612 * The task may have entered a memcg OOM situation but
4613 * if the allocation error was handled gracefully (no
4614 * VM_FAULT_OOM), there is no need to kill anything.
4615 * Just clean up the OOM state peacefully.
4616 */
4617 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
4618 mem_cgroup_oom_synchronize(false);
Johannes Weiner49426422013-10-16 13:46:59 -07004619 }
Johannes Weiner3812c8c2013-09-12 15:13:44 -07004620
Peter Xubce617e2020-08-11 18:37:44 -07004621 mm_account_fault(regs, address, flags, ret);
4622
Johannes Weiner519e5242013-09-12 15:13:42 -07004623 return ret;
4624}
Jesse Barnese1d6d012014-12-12 16:55:27 -08004625EXPORT_SYMBOL_GPL(handle_mm_fault);
Johannes Weiner519e5242013-09-12 15:13:42 -07004626
Kirill A. Shutemov90eceff2017-03-09 17:24:08 +03004627#ifndef __PAGETABLE_P4D_FOLDED
4628/*
4629 * Allocate p4d page table.
4630 * We've already handled the fast-path in-line.
4631 */
4632int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
4633{
4634 p4d_t *new = p4d_alloc_one(mm, address);
4635 if (!new)
4636 return -ENOMEM;
4637
4638 smp_wmb(); /* See comment in __pte_alloc */
4639
4640 spin_lock(&mm->page_table_lock);
4641 if (pgd_present(*pgd)) /* Another has populated it */
4642 p4d_free(mm, new);
4643 else
4644 pgd_populate(mm, pgd, new);
4645 spin_unlock(&mm->page_table_lock);
4646 return 0;
4647}
4648#endif /* __PAGETABLE_P4D_FOLDED */
4649
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650#ifndef __PAGETABLE_PUD_FOLDED
4651/*
4652 * Allocate page upper directory.
Hugh Dickins872fec12005-10-29 18:16:21 -07004653 * We've already handled the fast-path in-line.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654 */
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004655int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656{
Hugh Dickinsc74df322005-10-29 18:16:23 -07004657 pud_t *new = pud_alloc_one(mm, address);
4658 if (!new)
Hugh Dickins1bb36302005-10-29 18:16:22 -07004659 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660
Nick Piggin362a61a2008-05-14 06:37:36 +02004661 smp_wmb(); /* See comment in __pte_alloc */
4662
Hugh Dickins872fec12005-10-29 18:16:21 -07004663 spin_lock(&mm->page_table_lock);
Kirill A. Shutemovb4e98d92017-11-15 17:35:33 -08004664 if (!p4d_present(*p4d)) {
4665 mm_inc_nr_puds(mm);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004666 p4d_populate(mm, p4d, new);
Kirill A. Shutemovb4e98d92017-11-15 17:35:33 -08004667 } else /* Another has populated it */
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004668 pud_free(mm, new);
Hugh Dickinsc74df322005-10-29 18:16:23 -07004669 spin_unlock(&mm->page_table_lock);
Hugh Dickins1bb36302005-10-29 18:16:22 -07004670 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671}
4672#endif /* __PAGETABLE_PUD_FOLDED */
4673
4674#ifndef __PAGETABLE_PMD_FOLDED
4675/*
4676 * Allocate page middle directory.
Hugh Dickins872fec12005-10-29 18:16:21 -07004677 * We've already handled the fast-path in-line.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678 */
Hugh Dickins1bb36302005-10-29 18:16:22 -07004679int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004680{
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004681 spinlock_t *ptl;
Hugh Dickinsc74df322005-10-29 18:16:23 -07004682 pmd_t *new = pmd_alloc_one(mm, address);
4683 if (!new)
Hugh Dickins1bb36302005-10-29 18:16:22 -07004684 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685
Nick Piggin362a61a2008-05-14 06:37:36 +02004686 smp_wmb(); /* See comment in __pte_alloc */
4687
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004688 ptl = pud_lock(mm, pud);
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -08004689 if (!pud_present(*pud)) {
4690 mm_inc_nr_pmds(mm);
Hugh Dickins1bb36302005-10-29 18:16:22 -07004691 pud_populate(mm, pud, new);
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -08004692 } else /* Another has populated it */
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -08004693 pmd_free(mm, new);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004694 spin_unlock(ptl);
Hugh Dickins1bb36302005-10-29 18:16:22 -07004695 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696}
4697#endif /* __PAGETABLE_PMD_FOLDED */
4698
Ross Zwisler09796392017-01-10 16:57:21 -08004699static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004700 struct mmu_notifier_range *range,
Jérôme Glissea4d1a882017-08-31 17:17:26 -04004701 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004702{
4703 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004704 p4d_t *p4d;
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004705 pud_t *pud;
4706 pmd_t *pmd;
4707 pte_t *ptep;
4708
4709 pgd = pgd_offset(mm, address);
4710 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
4711 goto out;
4712
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004713 p4d = p4d_offset(pgd, address);
4714 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
4715 goto out;
4716
4717 pud = pud_offset(p4d, address);
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004718 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
4719 goto out;
4720
4721 pmd = pmd_offset(pud, address);
Andrea Arcangelif66055ab2011-01-13 15:46:54 -08004722 VM_BUG_ON(pmd_trans_huge(*pmd));
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004723
Ross Zwisler09796392017-01-10 16:57:21 -08004724 if (pmd_huge(*pmd)) {
4725 if (!pmdpp)
4726 goto out;
4727
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004728 if (range) {
Jérôme Glisse7269f992019-05-13 17:20:53 -07004729 mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07004730 NULL, mm, address & PMD_MASK,
4731 (address & PMD_MASK) + PMD_SIZE);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004732 mmu_notifier_invalidate_range_start(range);
Jérôme Glissea4d1a882017-08-31 17:17:26 -04004733 }
Ross Zwisler09796392017-01-10 16:57:21 -08004734 *ptlp = pmd_lock(mm, pmd);
4735 if (pmd_huge(*pmd)) {
4736 *pmdpp = pmd;
4737 return 0;
4738 }
4739 spin_unlock(*ptlp);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004740 if (range)
4741 mmu_notifier_invalidate_range_end(range);
Ross Zwisler09796392017-01-10 16:57:21 -08004742 }
4743
4744 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004745 goto out;
4746
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004747 if (range) {
Jérôme Glisse7269f992019-05-13 17:20:53 -07004748 mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07004749 address & PAGE_MASK,
4750 (address & PAGE_MASK) + PAGE_SIZE);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004751 mmu_notifier_invalidate_range_start(range);
Jérôme Glissea4d1a882017-08-31 17:17:26 -04004752 }
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004753 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004754 if (!pte_present(*ptep))
4755 goto unlock;
4756 *ptepp = ptep;
4757 return 0;
4758unlock:
4759 pte_unmap_unlock(ptep, *ptlp);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004760 if (range)
4761 mmu_notifier_invalidate_range_end(range);
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004762out:
4763 return -EINVAL;
4764}
4765
Ross Zwislerf729c8c2017-01-10 16:57:24 -08004766static inline int follow_pte(struct mm_struct *mm, unsigned long address,
4767 pte_t **ptepp, spinlock_t **ptlp)
Namhyung Kim1b36ba82010-10-26 14:22:00 -07004768{
4769 int res;
4770
4771 /* (void) is needed to make gcc happy */
4772 (void) __cond_lock(*ptlp,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004773 !(res = __follow_pte_pmd(mm, address, NULL,
Jérôme Glissea4d1a882017-08-31 17:17:26 -04004774 ptepp, NULL, ptlp)));
Namhyung Kim1b36ba82010-10-26 14:22:00 -07004775 return res;
4776}
4777
Ross Zwisler09796392017-01-10 16:57:21 -08004778int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004779 struct mmu_notifier_range *range,
4780 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
Ross Zwisler09796392017-01-10 16:57:21 -08004781{
4782 int res;
4783
4784 /* (void) is needed to make gcc happy */
4785 (void) __cond_lock(*ptlp,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004786 !(res = __follow_pte_pmd(mm, address, range,
Jérôme Glissea4d1a882017-08-31 17:17:26 -04004787 ptepp, pmdpp, ptlp)));
Ross Zwisler09796392017-01-10 16:57:21 -08004788 return res;
4789}
4790EXPORT_SYMBOL(follow_pte_pmd);
4791
Johannes Weiner3b6748e2009-06-16 15:32:35 -07004792/**
4793 * follow_pfn - look up PFN at a user virtual address
4794 * @vma: memory mapping
4795 * @address: user virtual address
4796 * @pfn: location to store found PFN
4797 *
4798 * Only IO mappings and raw PFN mappings are allowed.
4799 *
Mike Rapoporta862f682019-03-05 15:48:42 -08004800 * Return: zero and the pfn at @pfn on success, -ve otherwise.
Johannes Weiner3b6748e2009-06-16 15:32:35 -07004801 */
4802int follow_pfn(struct vm_area_struct *vma, unsigned long address,
4803 unsigned long *pfn)
4804{
4805 int ret = -EINVAL;
4806 spinlock_t *ptl;
4807 pte_t *ptep;
4808
4809 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4810 return ret;
4811
4812 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
4813 if (ret)
4814 return ret;
4815 *pfn = pte_pfn(*ptep);
4816 pte_unmap_unlock(ptep, ptl);
4817 return 0;
4818}
4819EXPORT_SYMBOL(follow_pfn);
4820
Rik van Riel28b2ee22008-07-23 21:27:05 -07004821#ifdef CONFIG_HAVE_IOREMAP_PROT
venkatesh.pallipadi@intel.comd87fe662008-12-19 13:47:27 -08004822int follow_phys(struct vm_area_struct *vma,
4823 unsigned long address, unsigned int flags,
4824 unsigned long *prot, resource_size_t *phys)
Rik van Riel28b2ee22008-07-23 21:27:05 -07004825{
Johannes Weiner03668a42009-06-16 15:32:34 -07004826 int ret = -EINVAL;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004827 pte_t *ptep, pte;
4828 spinlock_t *ptl;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004829
venkatesh.pallipadi@intel.comd87fe662008-12-19 13:47:27 -08004830 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4831 goto out;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004832
Johannes Weiner03668a42009-06-16 15:32:34 -07004833 if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
venkatesh.pallipadi@intel.comd87fe662008-12-19 13:47:27 -08004834 goto out;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004835 pte = *ptep;
Johannes Weiner03668a42009-06-16 15:32:34 -07004836
Linus Torvaldsf6f37322017-12-15 18:53:22 -08004837 if ((flags & FOLL_WRITE) && !pte_write(pte))
Rik van Riel28b2ee22008-07-23 21:27:05 -07004838 goto unlock;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004839
4840 *prot = pgprot_val(pte_pgprot(pte));
Johannes Weiner03668a42009-06-16 15:32:34 -07004841 *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004842
Johannes Weiner03668a42009-06-16 15:32:34 -07004843 ret = 0;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004844unlock:
4845 pte_unmap_unlock(ptep, ptl);
4846out:
venkatesh.pallipadi@intel.comd87fe662008-12-19 13:47:27 -08004847 return ret;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004848}
4849
4850int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
4851 void *buf, int len, int write)
4852{
4853 resource_size_t phys_addr;
4854 unsigned long prot = 0;
KOSAKI Motohiro2bc72732009-01-06 14:39:43 -08004855 void __iomem *maddr;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004856 int offset = addr & (PAGE_SIZE-1);
4857
venkatesh.pallipadi@intel.comd87fe662008-12-19 13:47:27 -08004858 if (follow_phys(vma, addr, write, &prot, &phys_addr))
Rik van Riel28b2ee22008-07-23 21:27:05 -07004859 return -EINVAL;
4860
Grazvydas Ignotas9cb12d72015-02-12 15:00:19 -08004861 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
jie@chenjie6@huwei.com24eee1e2018-08-10 17:23:06 -07004862 if (!maddr)
4863 return -ENOMEM;
4864
Rik van Riel28b2ee22008-07-23 21:27:05 -07004865 if (write)
4866 memcpy_toio(maddr + offset, buf, len);
4867 else
4868 memcpy_fromio(buf, maddr + offset, len);
4869 iounmap(maddr);
4870
4871 return len;
4872}
Uwe Kleine-König5a736332013-08-07 13:02:52 +02004873EXPORT_SYMBOL_GPL(generic_access_phys);
Rik van Riel28b2ee22008-07-23 21:27:05 -07004874#endif
4875
David Howells0ec76a12006-09-27 01:50:15 -07004876/*
Stephen Wilson206cb632011-03-13 15:49:19 -04004877 * Access another process' address space as given in mm. If non-NULL, use the
4878 * given task for page fault accounting.
David Howells0ec76a12006-09-27 01:50:15 -07004879 */
Eric W. Biederman84d77d32016-11-22 12:06:50 -06004880int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
Lorenzo Stoakes442486e2016-10-13 01:20:18 +01004881 unsigned long addr, void *buf, int len, unsigned int gup_flags)
David Howells0ec76a12006-09-27 01:50:15 -07004882{
David Howells0ec76a12006-09-27 01:50:15 -07004883 struct vm_area_struct *vma;
David Howells0ec76a12006-09-27 01:50:15 -07004884 void *old_buf = buf;
Lorenzo Stoakes442486e2016-10-13 01:20:18 +01004885 int write = gup_flags & FOLL_WRITE;
David Howells0ec76a12006-09-27 01:50:15 -07004886
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07004887 if (mmap_read_lock_killable(mm))
Konstantin Khlebnikov1e426fe2019-07-11 21:00:07 -07004888 return 0;
4889
Simon Arlott183ff222007-10-20 01:27:18 +02004890 /* ignore errors, just check how much was successfully transferred */
David Howells0ec76a12006-09-27 01:50:15 -07004891 while (len) {
4892 int bytes, ret, offset;
4893 void *maddr;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004894 struct page *page = NULL;
David Howells0ec76a12006-09-27 01:50:15 -07004895
Peter Xu64019a22020-08-11 18:39:01 -07004896 ret = get_user_pages_remote(mm, addr, 1,
Lorenzo Stoakes5b56d492016-12-14 15:06:52 -08004897 gup_flags, &page, &vma, NULL);
Rik van Riel28b2ee22008-07-23 21:27:05 -07004898 if (ret <= 0) {
Rik van Rieldbffcd02014-08-06 16:08:12 -07004899#ifndef CONFIG_HAVE_IOREMAP_PROT
4900 break;
4901#else
Rik van Riel28b2ee22008-07-23 21:27:05 -07004902 /*
4903 * Check if this is a VM_IO | VM_PFNMAP VMA, which
4904 * we can access using slightly different code.
4905 */
Rik van Riel28b2ee22008-07-23 21:27:05 -07004906 vma = find_vma(mm, addr);
Michael Ellermanfe936df2011-04-14 15:22:10 -07004907 if (!vma || vma->vm_start > addr)
Rik van Riel28b2ee22008-07-23 21:27:05 -07004908 break;
4909 if (vma->vm_ops && vma->vm_ops->access)
4910 ret = vma->vm_ops->access(vma, addr, buf,
4911 len, write);
4912 if (ret <= 0)
Rik van Riel28b2ee22008-07-23 21:27:05 -07004913 break;
4914 bytes = ret;
Rik van Rieldbffcd02014-08-06 16:08:12 -07004915#endif
David Howells0ec76a12006-09-27 01:50:15 -07004916 } else {
Rik van Riel28b2ee22008-07-23 21:27:05 -07004917 bytes = len;
4918 offset = addr & (PAGE_SIZE-1);
4919 if (bytes > PAGE_SIZE-offset)
4920 bytes = PAGE_SIZE-offset;
4921
4922 maddr = kmap(page);
4923 if (write) {
4924 copy_to_user_page(vma, page, addr,
4925 maddr + offset, buf, bytes);
4926 set_page_dirty_lock(page);
4927 } else {
4928 copy_from_user_page(vma, page, addr,
4929 buf, maddr + offset, bytes);
4930 }
4931 kunmap(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004932 put_page(page);
David Howells0ec76a12006-09-27 01:50:15 -07004933 }
David Howells0ec76a12006-09-27 01:50:15 -07004934 len -= bytes;
4935 buf += bytes;
4936 addr += bytes;
4937 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07004938 mmap_read_unlock(mm);
David Howells0ec76a12006-09-27 01:50:15 -07004939
4940 return buf - old_buf;
4941}
Andi Kleen03252912008-01-30 13:33:18 +01004942
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04004943/**
Randy Dunlapae91dbf2011-03-26 13:27:01 -07004944 * access_remote_vm - access another process' address space
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04004945 * @mm: the mm_struct of the target address space
4946 * @addr: start address to access
4947 * @buf: source or destination buffer
4948 * @len: number of bytes to transfer
Lorenzo Stoakes6347e8d2016-10-13 01:20:19 +01004949 * @gup_flags: flags modifying lookup behaviour
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04004950 *
4951 * The caller must hold a reference on @mm.
Mike Rapoporta862f682019-03-05 15:48:42 -08004952 *
4953 * Return: number of bytes copied from source to destination.
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04004954 */
4955int access_remote_vm(struct mm_struct *mm, unsigned long addr,
Lorenzo Stoakes6347e8d2016-10-13 01:20:19 +01004956 void *buf, int len, unsigned int gup_flags)
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04004957{
Lorenzo Stoakes6347e8d2016-10-13 01:20:19 +01004958 return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04004959}
4960
Andi Kleen03252912008-01-30 13:33:18 +01004961/*
Stephen Wilson206cb632011-03-13 15:49:19 -04004962 * Access another process' address space.
4963 * Source/target buffer must be kernel space,
4964 * Do not walk the page table directly, use get_user_pages
4965 */
4966int access_process_vm(struct task_struct *tsk, unsigned long addr,
Lorenzo Stoakesf307ab62016-10-13 01:20:20 +01004967 void *buf, int len, unsigned int gup_flags)
Stephen Wilson206cb632011-03-13 15:49:19 -04004968{
4969 struct mm_struct *mm;
4970 int ret;
4971
4972 mm = get_task_mm(tsk);
4973 if (!mm)
4974 return 0;
4975
Lorenzo Stoakesf307ab62016-10-13 01:20:20 +01004976 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
Lorenzo Stoakes442486e2016-10-13 01:20:18 +01004977
Stephen Wilson206cb632011-03-13 15:49:19 -04004978 mmput(mm);
4979
4980 return ret;
4981}
Catalin Marinasfcd35852016-11-01 14:43:25 -07004982EXPORT_SYMBOL_GPL(access_process_vm);
Stephen Wilson206cb632011-03-13 15:49:19 -04004983
Andi Kleen03252912008-01-30 13:33:18 +01004984/*
4985 * Print the name of a VMA.
4986 */
4987void print_vma_addr(char *prefix, unsigned long ip)
4988{
4989 struct mm_struct *mm = current->mm;
4990 struct vm_area_struct *vma;
4991
Ingo Molnare8bff742008-02-13 20:21:06 +01004992 /*
Michal Hocko0a7f6822017-11-15 17:38:59 -08004993 * we might be running from an atomic context so we cannot sleep
Ingo Molnare8bff742008-02-13 20:21:06 +01004994 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07004995 if (!mmap_read_trylock(mm))
Ingo Molnare8bff742008-02-13 20:21:06 +01004996 return;
4997
Andi Kleen03252912008-01-30 13:33:18 +01004998 vma = find_vma(mm, ip);
4999 if (vma && vma->vm_file) {
5000 struct file *f = vma->vm_file;
Michal Hocko0a7f6822017-11-15 17:38:59 -08005001 char *buf = (char *)__get_free_page(GFP_NOWAIT);
Andi Kleen03252912008-01-30 13:33:18 +01005002 if (buf) {
Andy Shevchenko2fbc57c2012-12-17 16:01:23 -08005003 char *p;
Andi Kleen03252912008-01-30 13:33:18 +01005004
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02005005 p = file_path(f, buf, PAGE_SIZE);
Andi Kleen03252912008-01-30 13:33:18 +01005006 if (IS_ERR(p))
5007 p = "?";
Andy Shevchenko2fbc57c2012-12-17 16:01:23 -08005008 printk("%s%s[%lx+%lx]", prefix, kbasename(p),
Andi Kleen03252912008-01-30 13:33:18 +01005009 vma->vm_start,
5010 vma->vm_end - vma->vm_start);
5011 free_page((unsigned long)buf);
5012 }
5013 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07005014 mmap_read_unlock(mm);
Andi Kleen03252912008-01-30 13:33:18 +01005015}
Nick Piggin3ee1afa2008-09-10 13:37:17 +02005016
Michael S. Tsirkin662bbcb2013-05-26 17:32:23 +03005017#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
David Hildenbrand9ec23532015-05-11 17:52:07 +02005018void __might_fault(const char *file, int line)
Nick Piggin3ee1afa2008-09-10 13:37:17 +02005019{
Peter Zijlstra95156f02009-01-12 13:02:11 +01005020 /*
5021 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07005022 * holding the mmap_lock, this is safe because kernel memory doesn't
Peter Zijlstra95156f02009-01-12 13:02:11 +01005023 * get paged out, therefore we'll never actually fault, and the
5024 * below annotations will generate false positives.
5025 */
Al Virodb68ce12017-03-20 21:08:07 -04005026 if (uaccess_kernel())
Peter Zijlstra95156f02009-01-12 13:02:11 +01005027 return;
David Hildenbrand9ec23532015-05-11 17:52:07 +02005028 if (pagefault_disabled())
Michael S. Tsirkin662bbcb2013-05-26 17:32:23 +03005029 return;
David Hildenbrand9ec23532015-05-11 17:52:07 +02005030 __might_sleep(file, line, 0);
5031#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
Michael S. Tsirkin662bbcb2013-05-26 17:32:23 +03005032 if (current->mm)
Michel Lespinasseda1c55f2020-06-08 21:33:47 -07005033 might_lock_read(&current->mm->mmap_lock);
David Hildenbrand9ec23532015-05-11 17:52:07 +02005034#endif
Nick Piggin3ee1afa2008-09-10 13:37:17 +02005035}
David Hildenbrand9ec23532015-05-11 17:52:07 +02005036EXPORT_SYMBOL(__might_fault);
Nick Piggin3ee1afa2008-09-10 13:37:17 +02005037#endif
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005038
5039#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
Huang Yingc6ddfb62018-08-17 15:45:46 -07005040/*
5041 * Process all subpages of the specified huge page with the specified
5042 * operation. The target subpage will be processed last to keep its
5043 * cache lines hot.
5044 */
5045static inline void process_huge_page(
5046 unsigned long addr_hint, unsigned int pages_per_huge_page,
5047 void (*process_subpage)(unsigned long addr, int idx, void *arg),
5048 void *arg)
5049{
5050 int i, n, base, l;
5051 unsigned long addr = addr_hint &
5052 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5053
5054 /* Process target subpage last to keep its cache lines hot */
5055 might_sleep();
5056 n = (addr_hint - addr) / PAGE_SIZE;
5057 if (2 * n <= pages_per_huge_page) {
5058 /* If target subpage in first half of huge page */
5059 base = 0;
5060 l = n;
5061 /* Process subpages at the end of huge page */
5062 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5063 cond_resched();
5064 process_subpage(addr + i * PAGE_SIZE, i, arg);
5065 }
5066 } else {
5067 /* If target subpage in second half of huge page */
5068 base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5069 l = pages_per_huge_page - n;
5070 /* Process subpages at the begin of huge page */
5071 for (i = 0; i < base; i++) {
5072 cond_resched();
5073 process_subpage(addr + i * PAGE_SIZE, i, arg);
5074 }
5075 }
5076 /*
5077 * Process remaining subpages in left-right-left-right pattern
5078 * towards the target subpage
5079 */
5080 for (i = 0; i < l; i++) {
5081 int left_idx = base + i;
5082 int right_idx = base + 2 * l - 1 - i;
5083
5084 cond_resched();
5085 process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5086 cond_resched();
5087 process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5088 }
5089}
5090
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005091static void clear_gigantic_page(struct page *page,
5092 unsigned long addr,
5093 unsigned int pages_per_huge_page)
5094{
5095 int i;
5096 struct page *p = page;
5097
5098 might_sleep();
5099 for (i = 0; i < pages_per_huge_page;
5100 i++, p = mem_map_next(p, page, i)) {
5101 cond_resched();
5102 clear_user_highpage(p, addr + i * PAGE_SIZE);
5103 }
5104}
Huang Yingc6ddfb62018-08-17 15:45:46 -07005105
5106static void clear_subpage(unsigned long addr, int idx, void *arg)
5107{
5108 struct page *page = arg;
5109
5110 clear_user_highpage(page + idx, addr);
5111}
5112
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005113void clear_huge_page(struct page *page,
Huang Yingc79b57e2017-09-06 16:25:04 -07005114 unsigned long addr_hint, unsigned int pages_per_huge_page)
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005115{
Huang Yingc79b57e2017-09-06 16:25:04 -07005116 unsigned long addr = addr_hint &
5117 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005118
5119 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5120 clear_gigantic_page(page, addr, pages_per_huge_page);
5121 return;
5122 }
5123
Huang Yingc6ddfb62018-08-17 15:45:46 -07005124 process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005125}
5126
5127static void copy_user_gigantic_page(struct page *dst, struct page *src,
5128 unsigned long addr,
5129 struct vm_area_struct *vma,
5130 unsigned int pages_per_huge_page)
5131{
5132 int i;
5133 struct page *dst_base = dst;
5134 struct page *src_base = src;
5135
5136 for (i = 0; i < pages_per_huge_page; ) {
5137 cond_resched();
5138 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
5139
5140 i++;
5141 dst = mem_map_next(dst, dst_base, i);
5142 src = mem_map_next(src, src_base, i);
5143 }
5144}
5145
Huang Yingc9f4cd72018-08-17 15:45:49 -07005146struct copy_subpage_arg {
5147 struct page *dst;
5148 struct page *src;
5149 struct vm_area_struct *vma;
5150};
5151
5152static void copy_subpage(unsigned long addr, int idx, void *arg)
5153{
5154 struct copy_subpage_arg *copy_arg = arg;
5155
5156 copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
5157 addr, copy_arg->vma);
5158}
5159
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005160void copy_user_huge_page(struct page *dst, struct page *src,
Huang Yingc9f4cd72018-08-17 15:45:49 -07005161 unsigned long addr_hint, struct vm_area_struct *vma,
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005162 unsigned int pages_per_huge_page)
5163{
Huang Yingc9f4cd72018-08-17 15:45:49 -07005164 unsigned long addr = addr_hint &
5165 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5166 struct copy_subpage_arg arg = {
5167 .dst = dst,
5168 .src = src,
5169 .vma = vma,
5170 };
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005171
5172 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5173 copy_user_gigantic_page(dst, src, addr, vma,
5174 pages_per_huge_page);
5175 return;
5176 }
5177
Huang Yingc9f4cd72018-08-17 15:45:49 -07005178 process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005179}
Mike Kravetzfa4d75c2017-02-22 15:42:49 -08005180
5181long copy_huge_page_from_user(struct page *dst_page,
5182 const void __user *usr_src,
Mike Kravetz810a56b2017-02-22 15:42:58 -08005183 unsigned int pages_per_huge_page,
5184 bool allow_pagefault)
Mike Kravetzfa4d75c2017-02-22 15:42:49 -08005185{
5186 void *src = (void *)usr_src;
5187 void *page_kaddr;
5188 unsigned long i, rc = 0;
5189 unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
5190
5191 for (i = 0; i < pages_per_huge_page; i++) {
Mike Kravetz810a56b2017-02-22 15:42:58 -08005192 if (allow_pagefault)
5193 page_kaddr = kmap(dst_page + i);
5194 else
5195 page_kaddr = kmap_atomic(dst_page + i);
Mike Kravetzfa4d75c2017-02-22 15:42:49 -08005196 rc = copy_from_user(page_kaddr,
5197 (const void __user *)(src + i * PAGE_SIZE),
5198 PAGE_SIZE);
Mike Kravetz810a56b2017-02-22 15:42:58 -08005199 if (allow_pagefault)
5200 kunmap(dst_page + i);
5201 else
5202 kunmap_atomic(page_kaddr);
Mike Kravetzfa4d75c2017-02-22 15:42:49 -08005203
5204 ret_val -= (PAGE_SIZE - rc);
5205 if (rc)
5206 break;
5207
5208 cond_resched();
5209 }
5210 return ret_val;
5211}
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005212#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005213
Olof Johansson40b64ac2013-12-20 14:28:05 -08005214#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
Kirill A. Shutemovb35f1812014-01-21 15:49:07 -08005215
5216static struct kmem_cache *page_ptl_cachep;
5217
5218void __init ptlock_cache_init(void)
5219{
5220 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
5221 SLAB_PANIC, NULL);
5222}
5223
Peter Zijlstra539edb52013-11-14 14:31:52 -08005224bool ptlock_alloc(struct page *page)
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005225{
5226 spinlock_t *ptl;
5227
Kirill A. Shutemovb35f1812014-01-21 15:49:07 -08005228 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005229 if (!ptl)
5230 return false;
Peter Zijlstra539edb52013-11-14 14:31:52 -08005231 page->ptl = ptl;
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005232 return true;
5233}
5234
Peter Zijlstra539edb52013-11-14 14:31:52 -08005235void ptlock_free(struct page *page)
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005236{
Kirill A. Shutemovb35f1812014-01-21 15:49:07 -08005237 kmem_cache_free(page_ptl_cachep, page->ptl);
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005238}
5239#endif