blob: 0346797bb118917e55d56958f2e1251d197767a4 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/memory.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8/*
9 * demand-loading started 01.12.91 - seems it is high on the list of
10 * things wanted, and it should be easy to implement. - Linus
11 */
12
13/*
14 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15 * pages started 02.12.91, seems to work. - Linus.
16 *
17 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18 * would have taken more than the 6M I have free, but it worked well as
19 * far as I could see.
20 *
21 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22 */
23
24/*
25 * Real VM (paging to/from disk) started 18.12.91. Much more work and
26 * thought has to go into this. Oh, well..
27 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
28 * Found it. Everything seems to work now.
29 * 20.12.91 - Ok, making the swap-device changeable like the root.
30 */
31
32/*
33 * 05.04.94 - Multi-page memory management added for v1.1.
Tobin C Harding166f61b2017-02-24 14:59:01 -080034 * Idea by Alex Bligh (alex@cconcepts.co.uk)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 *
36 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
37 * (Gerhard.Wichert@pdb.siemens.de)
38 *
39 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40 */
41
42#include <linux/kernel_stat.h>
43#include <linux/mm.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010044#include <linux/sched/mm.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +010045#include <linux/sched/coredump.h>
Ingo Molnar6a3827d2017-02-08 18:51:31 +010046#include <linux/sched/numa_balancing.h>
Ingo Molnar29930022017-02-08 18:51:36 +010047#include <linux/sched/task.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/hugetlb.h>
49#include <linux/mman.h>
50#include <linux/swap.h>
51#include <linux/highmem.h>
52#include <linux/pagemap.h>
Jérôme Glisse5042db42017-09-08 16:11:43 -070053#include <linux/memremap.h>
Hugh Dickins9a840892009-09-21 17:02:01 -070054#include <linux/ksm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <linux/rmap.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040056#include <linux/export.h>
Shailabh Nagar0ff92242006-07-14 00:24:37 -070057#include <linux/delayacct.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/init.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080059#include <linux/pfn_t.h>
Peter Zijlstraedc79b22006-09-25 23:30:58 -070060#include <linux/writeback.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080061#include <linux/memcontrol.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070062#include <linux/mmu_notifier.h>
Hugh Dickins3dc14742009-01-06 14:40:08 -080063#include <linux/swapops.h>
64#include <linux/elf.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090065#include <linux/gfp.h>
Mel Gorman4daae3b2012-11-02 11:33:45 +000066#include <linux/migrate.h>
Andy Shevchenko2fbc57c2012-12-17 16:01:23 -080067#include <linux/string.h>
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -070068#include <linux/debugfs.h>
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -070069#include <linux/userfaultfd_k.h>
Jan Karabc2466e2016-05-12 18:29:19 +020070#include <linux/dax.h>
Michal Hocko6b31d592017-08-18 15:16:15 -070071#include <linux/oom.h>
Anshuman Khandual98fa15f2019-03-05 15:42:58 -080072#include <linux/numa.h>
Peter Xubce617e2020-08-11 18:37:44 -070073#include <linux/perf_event.h>
74#include <linux/ptrace.h>
Joerg Roedele80d3902020-09-04 16:35:43 -070075#include <linux/vmalloc.h>
Chris Goldsworthy62e32cf2020-11-09 22:26:47 -080076#include <trace/hooks/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Joel Fernandes (Google)b3d14112019-11-30 17:50:30 -080078#include <trace/events/kmem.h>
79
Alexey Dobriyan6952b612009-09-18 23:55:55 +040080#include <asm/io.h>
Dave Hansen33a709b2016-02-12 13:02:19 -080081#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <asm/pgalloc.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080083#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <asm/tlb.h>
85#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
Joerg Roedele80d3902020-09-04 16:35:43 -070087#include "pgalloc-track.h"
Jan Beulich42b77722008-07-23 21:27:10 -070088#include "internal.h"
89
Arnd Bergmannaf27d942018-02-16 16:25:53 +010090#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
Peter Zijlstra90572892013-10-07 11:29:20 +010091#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
Peter Zijlstra75980e92013-02-22 16:34:32 -080092#endif
93
Andy Whitcroftd41dee32005-06-23 00:07:54 -070094#ifndef CONFIG_NEED_MULTIPLE_NODES
Linus Torvalds1da177e2005-04-16 15:20:36 -070095/* use the per-pgdat data instead for discontigmem - mbligh */
96unsigned long max_mapnr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097EXPORT_SYMBOL(max_mapnr);
Tobin C Harding166f61b2017-02-24 14:59:01 -080098
99struct page *mem_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100EXPORT_SYMBOL(mem_map);
101#endif
102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103/*
104 * A number of key systems in x86 including ioremap() rely on the assumption
105 * that high_memory defines the upper bound on direct map memory, then end
106 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
107 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
108 * and ZONE_HIGHMEM.
109 */
Tobin C Harding166f61b2017-02-24 14:59:01 -0800110void *high_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111EXPORT_SYMBOL(high_memory);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Ingo Molnar32a93232008-02-06 22:39:44 +0100113/*
114 * Randomize the address space (stacks, mmaps, brk, etc.).
115 *
116 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
117 * as ancient (libc5 based) binaries can segfault. )
118 */
119int randomize_va_space __read_mostly =
120#ifdef CONFIG_COMPAT_BRK
121 1;
122#else
123 2;
124#endif
Andi Kleena62eaf12006-02-16 23:41:58 +0100125
Jia He83d116c2019-10-11 22:09:39 +0800126#ifndef arch_faults_on_old_pte
127static inline bool arch_faults_on_old_pte(void)
128{
129 /*
130 * Those arches which don't have hw access flag feature need to
131 * implement their own helper. By default, "true" means pagefault
132 * will be hit on old pte.
133 */
134 return true;
135}
136#endif
137
Andi Kleena62eaf12006-02-16 23:41:58 +0100138static int __init disable_randmaps(char *s)
139{
140 randomize_va_space = 0;
OGAWA Hirofumi9b410462006-03-31 02:30:33 -0800141 return 1;
Andi Kleena62eaf12006-02-16 23:41:58 +0100142}
143__setup("norandmaps", disable_randmaps);
144
Hugh Dickins62eede62009-09-21 17:03:34 -0700145unsigned long zero_pfn __read_mostly;
Ard Biesheuvel0b700682014-09-12 22:17:23 +0200146EXPORT_SYMBOL(zero_pfn);
147
Tobin C Harding166f61b2017-02-24 14:59:01 -0800148unsigned long highest_memmap_pfn __read_mostly;
149
Hugh Dickinsa13ea5b2009-09-21 17:03:30 -0700150/*
151 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
152 */
153static int __init init_zero_pfn(void)
154{
155 zero_pfn = page_to_pfn(ZERO_PAGE(0));
156 return 0;
157}
158core_initcall(init_zero_pfn);
Andi Kleena62eaf12006-02-16 23:41:58 +0100159
Joel Fernandes77dfeaa2019-12-10 10:45:34 -0500160/*
161 * Only trace rss_stat when there is a 512kb cross over.
162 * Smaller changes may be lost unless every small change is
163 * crossing into or returning to a 512kb boundary.
164 */
165#define TRACE_MM_COUNTER_THRESHOLD 128
166
167void mm_trace_rss_stat(struct mm_struct *mm, int member, long count,
168 long value)
Joel Fernandes (Google)b3d14112019-11-30 17:50:30 -0800169{
Joel Fernandes77dfeaa2019-12-10 10:45:34 -0500170 long thresh_mask = ~(TRACE_MM_COUNTER_THRESHOLD - 1);
171
172 /* Threshold roll-over, trace it */
173 if ((count & thresh_mask) != ((count - value) & thresh_mask))
174 trace_rss_stat(mm, member, count);
Joel Fernandes (Google)b3d14112019-11-30 17:50:30 -0800175}
Greg Kroah-Hartmanbb0c8742019-12-17 19:12:36 +0100176EXPORT_SYMBOL_GPL(mm_trace_rss_stat);
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800177
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800178#if defined(SPLIT_RSS_COUNTING)
179
David Rientjesea48cf72012-03-21 16:34:13 -0700180void sync_mm_rss(struct mm_struct *mm)
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800181{
182 int i;
183
184 for (i = 0; i < NR_MM_COUNTERS; i++) {
David Rientjes05af2e12012-03-21 16:34:13 -0700185 if (current->rss_stat.count[i]) {
186 add_mm_counter(mm, i, current->rss_stat.count[i]);
187 current->rss_stat.count[i] = 0;
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800188 }
189 }
David Rientjes05af2e12012-03-21 16:34:13 -0700190 current->rss_stat.events = 0;
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800191}
192
193static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
194{
195 struct task_struct *task = current;
196
197 if (likely(task->mm == mm))
198 task->rss_stat.count[member] += val;
199 else
200 add_mm_counter(mm, member, val);
201}
202#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
203#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
204
205/* sync counter once per 64 page faults */
206#define TASK_RSS_EVENTS_THRESH (64)
207static void check_sync_rss_stat(struct task_struct *task)
208{
209 if (unlikely(task != current))
210 return;
211 if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
David Rientjesea48cf72012-03-21 16:34:13 -0700212 sync_mm_rss(task->mm);
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800213}
Peter Zijlstra9547d012011-05-24 17:12:14 -0700214#else /* SPLIT_RSS_COUNTING */
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800215
216#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
217#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
218
219static void check_sync_rss_stat(struct task_struct *task)
220{
221}
222
Peter Zijlstra9547d012011-05-24 17:12:14 -0700223#endif /* SPLIT_RSS_COUNTING */
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 * Note: this doesn't free the actual pages themselves. That
227 * has been handled earlier when unmapping all the memory regions.
228 */
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000229static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
230 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231{
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800232 pgtable_t token = pmd_pgtable(*pmd);
Hugh Dickinse0da3822005-04-19 13:29:15 -0700233 pmd_clear(pmd);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000234 pte_free_tlb(tlb, token, addr);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -0800235 mm_dec_nr_ptes(tlb->mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236}
237
Hugh Dickinse0da3822005-04-19 13:29:15 -0700238static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
239 unsigned long addr, unsigned long end,
240 unsigned long floor, unsigned long ceiling)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241{
242 pmd_t *pmd;
243 unsigned long next;
Hugh Dickinse0da3822005-04-19 13:29:15 -0700244 unsigned long start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Hugh Dickinse0da3822005-04-19 13:29:15 -0700246 start = addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 pmd = pmd_offset(pud, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 do {
249 next = pmd_addr_end(addr, end);
250 if (pmd_none_or_clear_bad(pmd))
251 continue;
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000252 free_pte_range(tlb, pmd, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 } while (pmd++, addr = next, addr != end);
254
Hugh Dickinse0da3822005-04-19 13:29:15 -0700255 start &= PUD_MASK;
256 if (start < floor)
257 return;
258 if (ceiling) {
259 ceiling &= PUD_MASK;
260 if (!ceiling)
261 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 }
Hugh Dickinse0da3822005-04-19 13:29:15 -0700263 if (end - 1 > ceiling - 1)
264 return;
265
266 pmd = pmd_offset(pud, start);
267 pud_clear(pud);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000268 pmd_free_tlb(tlb, pmd, start);
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -0800269 mm_dec_nr_pmds(tlb->mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270}
271
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300272static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
Hugh Dickinse0da3822005-04-19 13:29:15 -0700273 unsigned long addr, unsigned long end,
274 unsigned long floor, unsigned long ceiling)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275{
276 pud_t *pud;
277 unsigned long next;
Hugh Dickinse0da3822005-04-19 13:29:15 -0700278 unsigned long start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
Hugh Dickinse0da3822005-04-19 13:29:15 -0700280 start = addr;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300281 pud = pud_offset(p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 do {
283 next = pud_addr_end(addr, end);
284 if (pud_none_or_clear_bad(pud))
285 continue;
Hugh Dickinse0da3822005-04-19 13:29:15 -0700286 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 } while (pud++, addr = next, addr != end);
288
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300289 start &= P4D_MASK;
290 if (start < floor)
291 return;
292 if (ceiling) {
293 ceiling &= P4D_MASK;
294 if (!ceiling)
295 return;
296 }
297 if (end - 1 > ceiling - 1)
298 return;
299
300 pud = pud_offset(p4d, start);
301 p4d_clear(p4d);
302 pud_free_tlb(tlb, pud, start);
Kirill A. Shutemovb4e98d92017-11-15 17:35:33 -0800303 mm_dec_nr_puds(tlb->mm);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300304}
305
306static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
307 unsigned long addr, unsigned long end,
308 unsigned long floor, unsigned long ceiling)
309{
310 p4d_t *p4d;
311 unsigned long next;
312 unsigned long start;
313
314 start = addr;
315 p4d = p4d_offset(pgd, addr);
316 do {
317 next = p4d_addr_end(addr, end);
318 if (p4d_none_or_clear_bad(p4d))
319 continue;
320 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
321 } while (p4d++, addr = next, addr != end);
322
Hugh Dickinse0da3822005-04-19 13:29:15 -0700323 start &= PGDIR_MASK;
324 if (start < floor)
325 return;
326 if (ceiling) {
327 ceiling &= PGDIR_MASK;
328 if (!ceiling)
329 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 }
Hugh Dickinse0da3822005-04-19 13:29:15 -0700331 if (end - 1 > ceiling - 1)
332 return;
333
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300334 p4d = p4d_offset(pgd, start);
Hugh Dickinse0da3822005-04-19 13:29:15 -0700335 pgd_clear(pgd);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300336 p4d_free_tlb(tlb, p4d, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
339/*
Hugh Dickinse0da3822005-04-19 13:29:15 -0700340 * This function frees user-level page tables of a process.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 */
Jan Beulich42b77722008-07-23 21:27:10 -0700342void free_pgd_range(struct mmu_gather *tlb,
Hugh Dickinse0da3822005-04-19 13:29:15 -0700343 unsigned long addr, unsigned long end,
344 unsigned long floor, unsigned long ceiling)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345{
346 pgd_t *pgd;
347 unsigned long next;
348
Hugh Dickinse0da3822005-04-19 13:29:15 -0700349 /*
350 * The next few lines have given us lots of grief...
351 *
352 * Why are we testing PMD* at this top level? Because often
353 * there will be no work to do at all, and we'd prefer not to
354 * go all the way down to the bottom just to discover that.
355 *
356 * Why all these "- 1"s? Because 0 represents both the bottom
357 * of the address space and the top of it (using -1 for the
358 * top wouldn't help much: the masks would do the wrong thing).
359 * The rule is that addr 0 and floor 0 refer to the bottom of
360 * the address space, but end 0 and ceiling 0 refer to the top
361 * Comparisons need to use "end - 1" and "ceiling - 1" (though
362 * that end 0 case should be mythical).
363 *
364 * Wherever addr is brought up or ceiling brought down, we must
365 * be careful to reject "the opposite 0" before it confuses the
366 * subsequent tests. But what about where end is brought down
367 * by PMD_SIZE below? no, end can't go down to 0 there.
368 *
369 * Whereas we round start (addr) and ceiling down, by different
370 * masks at different levels, in order to test whether a table
371 * now has no other vmas using it, so can be freed, we don't
372 * bother to round floor or end up - the tests don't need that.
373 */
374
375 addr &= PMD_MASK;
376 if (addr < floor) {
377 addr += PMD_SIZE;
378 if (!addr)
379 return;
380 }
381 if (ceiling) {
382 ceiling &= PMD_MASK;
383 if (!ceiling)
384 return;
385 }
386 if (end - 1 > ceiling - 1)
387 end -= PMD_SIZE;
388 if (addr > end - 1)
389 return;
Aneesh Kumar K.V07e32662016-12-12 16:42:40 -0800390 /*
391 * We add page table cache pages with PAGE_SIZE,
392 * (see pte_free_tlb()), flush the tlb if we need
393 */
Peter Zijlstraed6a7932018-08-31 14:46:08 +0200394 tlb_change_page_size(tlb, PAGE_SIZE);
Jan Beulich42b77722008-07-23 21:27:10 -0700395 pgd = pgd_offset(tlb->mm, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 do {
397 next = pgd_addr_end(addr, end);
398 if (pgd_none_or_clear_bad(pgd))
399 continue;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300400 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 } while (pgd++, addr = next, addr != end);
Hugh Dickinse0da3822005-04-19 13:29:15 -0700402}
403
Jan Beulich42b77722008-07-23 21:27:10 -0700404void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700405 unsigned long floor, unsigned long ceiling)
Hugh Dickinse0da3822005-04-19 13:29:15 -0700406{
407 while (vma) {
408 struct vm_area_struct *next = vma->vm_next;
409 unsigned long addr = vma->vm_start;
410
Hugh Dickins8f4f8c12005-10-29 18:16:29 -0700411 /*
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +1000412 * Hide vma from rmap and truncate_pagecache before freeing
413 * pgtables
Hugh Dickins8f4f8c12005-10-29 18:16:29 -0700414 */
Laurent Dufour73ab9e32018-04-17 16:33:17 +0200415 vm_write_begin(vma);
Rik van Riel5beb4932010-03-05 13:42:07 -0800416 unlink_anon_vmas(vma);
Laurent Dufour73ab9e32018-04-17 16:33:17 +0200417 vm_write_end(vma);
Hugh Dickins8f4f8c12005-10-29 18:16:29 -0700418 unlink_file_vma(vma);
419
David Gibson9da61ae2006-03-22 00:08:57 -0800420 if (is_vm_hugetlb_page(vma)) {
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700421 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
Tobin C Harding166f61b2017-02-24 14:59:01 -0800422 floor, next ? next->vm_start : ceiling);
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700423 } else {
424 /*
425 * Optimization: gather nearby vmas into one call down
426 */
427 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
David Gibson48669202006-03-22 00:08:58 -0800428 && !is_vm_hugetlb_page(next)) {
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700429 vma = next;
430 next = vma->vm_next;
Laurent Dufour73ab9e32018-04-17 16:33:17 +0200431 vm_write_begin(vma);
Rik van Riel5beb4932010-03-05 13:42:07 -0800432 unlink_anon_vmas(vma);
Laurent Dufour73ab9e32018-04-17 16:33:17 +0200433 vm_write_end(vma);
Hugh Dickins8f4f8c12005-10-29 18:16:29 -0700434 unlink_file_vma(vma);
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700435 }
436 free_pgd_range(tlb, addr, vma->vm_end,
Tobin C Harding166f61b2017-02-24 14:59:01 -0800437 floor, next ? next->vm_start : ceiling);
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700438 }
Hugh Dickinse0da3822005-04-19 13:29:15 -0700439 vma = next;
440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441}
442
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800443int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800445 spinlock_t *ptl;
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800446 pgtable_t new = pte_alloc_one(mm);
Hugh Dickins1bb36302005-10-29 18:16:22 -0700447 if (!new)
448 return -ENOMEM;
449
Nick Piggin362a61a2008-05-14 06:37:36 +0200450 /*
451 * Ensure all pte setup (eg. pte page lock and page clearing) are
452 * visible before the pte is made visible to other CPUs by being
453 * put into page tables.
454 *
455 * The other side of the story is the pointer chasing in the page
456 * table walking code (when walking the page table without locking;
457 * ie. most of the time). Fortunately, these data accesses consist
458 * of a chain of data-dependent loads, meaning most CPUs (alpha
459 * being the notable exception) will already guarantee loads are
460 * seen in-order. See the alpha page table accessors for the
Will Deaconbb7cdd32019-10-30 17:15:01 +0000461 * smp_rmb() barriers in page table walking code.
Nick Piggin362a61a2008-05-14 06:37:36 +0200462 */
463 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
464
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800465 ptl = pmd_lock(mm, pmd);
Andrea Arcangeli8ac1f832011-01-13 15:46:43 -0800466 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
Kirill A. Shutemovc4812902017-11-15 17:35:37 -0800467 mm_inc_nr_ptes(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 pmd_populate(mm, pmd, new);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800469 new = NULL;
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -0800470 }
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800471 spin_unlock(ptl);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800472 if (new)
473 pte_free(mm, new);
Hugh Dickins1bb36302005-10-29 18:16:22 -0700474 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475}
476
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800477int __pte_alloc_kernel(pmd_t *pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800479 pte_t *new = pte_alloc_one_kernel(&init_mm);
Hugh Dickins1bb36302005-10-29 18:16:22 -0700480 if (!new)
481 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
Nick Piggin362a61a2008-05-14 06:37:36 +0200483 smp_wmb(); /* See comment in __pte_alloc */
484
Hugh Dickins1bb36302005-10-29 18:16:22 -0700485 spin_lock(&init_mm.page_table_lock);
Andrea Arcangeli8ac1f832011-01-13 15:46:43 -0800486 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
Hugh Dickins1bb36302005-10-29 18:16:22 -0700487 pmd_populate_kernel(&init_mm, pmd, new);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800488 new = NULL;
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -0800489 }
Hugh Dickins1bb36302005-10-29 18:16:22 -0700490 spin_unlock(&init_mm.page_table_lock);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800491 if (new)
492 pte_free_kernel(&init_mm, new);
Hugh Dickins1bb36302005-10-29 18:16:22 -0700493 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494}
495
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800496static inline void init_rss_vec(int *rss)
Hugh Dickinsae859762005-10-29 18:16:05 -0700497{
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800498 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
499}
500
501static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
502{
503 int i;
504
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800505 if (current->mm == mm)
David Rientjes05af2e12012-03-21 16:34:13 -0700506 sync_mm_rss(mm);
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800507 for (i = 0; i < NR_MM_COUNTERS; i++)
508 if (rss[i])
509 add_mm_counter(mm, i, rss[i]);
Hugh Dickinsae859762005-10-29 18:16:05 -0700510}
511
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512/*
Linus Torvalds6aab3412005-11-28 14:34:23 -0800513 * This function is called to print an error when a bad pte
514 * is found. For example, we might have a PFN-mapped pte in
515 * a region that doesn't allow it.
Nick Pigginb5810032005-10-29 18:16:12 -0700516 *
517 * The calling function must still handle the error.
518 */
Hugh Dickins3dc14742009-01-06 14:40:08 -0800519static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
520 pte_t pte, struct page *page)
Nick Pigginb5810032005-10-29 18:16:12 -0700521{
Hugh Dickins3dc14742009-01-06 14:40:08 -0800522 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300523 p4d_t *p4d = p4d_offset(pgd, addr);
524 pud_t *pud = pud_offset(p4d, addr);
Hugh Dickins3dc14742009-01-06 14:40:08 -0800525 pmd_t *pmd = pmd_offset(pud, addr);
526 struct address_space *mapping;
527 pgoff_t index;
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800528 static unsigned long resume;
529 static unsigned long nr_shown;
530 static unsigned long nr_unshown;
531
532 /*
533 * Allow a burst of 60 reports, then keep quiet for that minute;
534 * or allow a steady drip of one report per second.
535 */
536 if (nr_shown == 60) {
537 if (time_before(jiffies, resume)) {
538 nr_unshown++;
539 return;
540 }
541 if (nr_unshown) {
Joe Perches11705322016-03-17 14:19:50 -0700542 pr_alert("BUG: Bad page map: %lu messages suppressed\n",
543 nr_unshown);
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800544 nr_unshown = 0;
545 }
546 nr_shown = 0;
547 }
548 if (nr_shown++ == 0)
549 resume = jiffies + 60 * HZ;
Hugh Dickins3dc14742009-01-06 14:40:08 -0800550
551 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
552 index = linear_page_index(vma, addr);
553
Joe Perches11705322016-03-17 14:19:50 -0700554 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
555 current->comm,
556 (long long)pte_val(pte), (long long)pmd_val(*pmd));
Wu Fengguang718a3822010-03-10 15:20:43 -0800557 if (page)
Dave Hansenf0b791a2014-01-23 15:52:49 -0800558 dump_page(page, "bad pte");
Kefeng Wang6aa9b8b2019-09-23 15:35:34 -0700559 pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
Joe Perches11705322016-03-17 14:19:50 -0700560 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
Sakari Ailusd75f7732019-03-25 21:32:28 +0200561 pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
Konstantin Khlebnikov26825822015-04-15 16:15:08 -0700562 vma->vm_file,
563 vma->vm_ops ? vma->vm_ops->fault : NULL,
564 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
565 mapping ? mapping->a_ops->readpage : NULL);
Nick Pigginb5810032005-10-29 18:16:12 -0700566 dump_stack();
Rusty Russell373d4d02013-01-21 17:17:39 +1030567 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Nick Pigginb5810032005-10-29 18:16:12 -0700568}
569
570/*
Nick Piggin7e675132008-04-28 02:13:00 -0700571 * vm_normal_page -- This function gets the "struct page" associated with a pte.
Linus Torvalds6aab3412005-11-28 14:34:23 -0800572 *
Nick Piggin7e675132008-04-28 02:13:00 -0700573 * "Special" mappings do not wish to be associated with a "struct page" (either
574 * it doesn't exist, or it exists but they don't want to touch it). In this
575 * case, NULL is returned here. "Normal" mappings do have a struct page.
Jared Hulbertb379d792008-04-28 02:12:58 -0700576 *
Nick Piggin7e675132008-04-28 02:13:00 -0700577 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
578 * pte bit, in which case this function is trivial. Secondly, an architecture
579 * may not have a spare pte bit, which requires a more complicated scheme,
580 * described below.
581 *
582 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
583 * special mapping (even if there are underlying and valid "struct pages").
584 * COWed pages of a VM_PFNMAP are always normal.
Linus Torvalds6aab3412005-11-28 14:34:23 -0800585 *
Jared Hulbertb379d792008-04-28 02:12:58 -0700586 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
587 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
Nick Piggin7e675132008-04-28 02:13:00 -0700588 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
589 * mapping will always honor the rule
Linus Torvalds6aab3412005-11-28 14:34:23 -0800590 *
591 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
592 *
Nick Piggin7e675132008-04-28 02:13:00 -0700593 * And for normal mappings this is false.
Jared Hulbertb379d792008-04-28 02:12:58 -0700594 *
Nick Piggin7e675132008-04-28 02:13:00 -0700595 * This restricts such mappings to be a linear translation from virtual address
596 * to pfn. To get around this restriction, we allow arbitrary mappings so long
597 * as the vma is not a COW mapping; in that case, we know that all ptes are
598 * special (because none can have been COWed).
Jared Hulbertb379d792008-04-28 02:12:58 -0700599 *
600 *
Nick Piggin7e675132008-04-28 02:13:00 -0700601 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
602 *
Jared Hulbertb379d792008-04-28 02:12:58 -0700603 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
604 * page" backing, however the difference is that _all_ pages with a struct
605 * page (that is, those where pfn_valid is true) are refcounted and considered
606 * normal pages by the VM. The disadvantage is that pages are refcounted
607 * (which can be slower and simply not an option for some PFNMAP users). The
608 * advantage is that we don't have to follow the strict linearity rule of
609 * PFNMAP mappings in order to support COWable mappings.
610 *
Hugh Dickinsee498ed2005-11-21 21:32:18 -0800611 */
Christoph Hellwig25b29952019-06-13 22:50:49 +0200612struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
613 pte_t pte)
Hugh Dickinsee498ed2005-11-21 21:32:18 -0800614{
Hugh Dickins22b31ee2009-01-06 14:40:09 -0800615 unsigned long pfn = pte_pfn(pte);
Nick Piggin7e675132008-04-28 02:13:00 -0700616
Laurent Dufour00b3a332018-06-07 17:06:12 -0700617 if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
Hugh Dickinsb38af472014-08-29 15:18:44 -0700618 if (likely(!pte_special(pte)))
Hugh Dickins22b31ee2009-01-06 14:40:09 -0800619 goto check_pfn;
David Vrabel667a0a02014-12-18 14:48:15 +0000620 if (vma->vm_ops && vma->vm_ops->find_special_page)
621 return vma->vm_ops->find_special_page(vma, addr);
Hugh Dickinsa13ea5b2009-09-21 17:03:30 -0700622 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
623 return NULL;
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700624 if (is_zero_pfn(pfn))
625 return NULL;
Dave Jiange1fb4a02018-08-17 15:43:40 -0700626 if (pte_devmap(pte))
627 return NULL;
628
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700629 print_bad_pte(vma, addr, pte, NULL);
Nick Piggin7e675132008-04-28 02:13:00 -0700630 return NULL;
631 }
632
Laurent Dufour00b3a332018-06-07 17:06:12 -0700633 /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
Nick Piggin7e675132008-04-28 02:13:00 -0700634
Jared Hulbertb379d792008-04-28 02:12:58 -0700635 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
636 if (vma->vm_flags & VM_MIXEDMAP) {
637 if (!pfn_valid(pfn))
638 return NULL;
639 goto out;
640 } else {
Nick Piggin7e675132008-04-28 02:13:00 -0700641 unsigned long off;
642 off = (addr - vma->vm_start) >> PAGE_SHIFT;
Jared Hulbertb379d792008-04-28 02:12:58 -0700643 if (pfn == vma->vm_pgoff + off)
644 return NULL;
645 if (!is_cow_mapping(vma->vm_flags))
646 return NULL;
647 }
Linus Torvalds6aab3412005-11-28 14:34:23 -0800648 }
649
Hugh Dickinsb38af472014-08-29 15:18:44 -0700650 if (is_zero_pfn(pfn))
651 return NULL;
Laurent Dufour00b3a332018-06-07 17:06:12 -0700652
Hugh Dickins22b31ee2009-01-06 14:40:09 -0800653check_pfn:
654 if (unlikely(pfn > highest_memmap_pfn)) {
655 print_bad_pte(vma, addr, pte, NULL);
656 return NULL;
657 }
Linus Torvalds6aab3412005-11-28 14:34:23 -0800658
659 /*
Nick Piggin7e675132008-04-28 02:13:00 -0700660 * NOTE! We still have PageReserved() pages in the page tables.
Nick Piggin7e675132008-04-28 02:13:00 -0700661 * eg. VDSO mappings can cause them to exist.
Linus Torvalds6aab3412005-11-28 14:34:23 -0800662 */
Jared Hulbertb379d792008-04-28 02:12:58 -0700663out:
Linus Torvalds6aab3412005-11-28 14:34:23 -0800664 return pfn_to_page(pfn);
Hugh Dickinsee498ed2005-11-21 21:32:18 -0800665}
666
Gerald Schaefer28093f92016-04-28 16:18:35 -0700667#ifdef CONFIG_TRANSPARENT_HUGEPAGE
668struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
669 pmd_t pmd)
670{
671 unsigned long pfn = pmd_pfn(pmd);
672
673 /*
674 * There is no pmd_special() but there may be special pmds, e.g.
675 * in a direct-access (dax) mapping, so let's just replicate the
Laurent Dufour00b3a332018-06-07 17:06:12 -0700676 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
Gerald Schaefer28093f92016-04-28 16:18:35 -0700677 */
678 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
679 if (vma->vm_flags & VM_MIXEDMAP) {
680 if (!pfn_valid(pfn))
681 return NULL;
682 goto out;
683 } else {
684 unsigned long off;
685 off = (addr - vma->vm_start) >> PAGE_SHIFT;
686 if (pfn == vma->vm_pgoff + off)
687 return NULL;
688 if (!is_cow_mapping(vma->vm_flags))
689 return NULL;
690 }
691 }
692
Dave Jiange1fb4a02018-08-17 15:43:40 -0700693 if (pmd_devmap(pmd))
694 return NULL;
Yu Zhao3cde2872019-12-04 16:49:56 -0800695 if (is_huge_zero_pmd(pmd))
Gerald Schaefer28093f92016-04-28 16:18:35 -0700696 return NULL;
697 if (unlikely(pfn > highest_memmap_pfn))
698 return NULL;
699
700 /*
701 * NOTE! We still have PageReserved() pages in the page tables.
702 * eg. VDSO mappings can cause them to exist.
703 */
704out:
705 return pfn_to_page(pfn);
706}
707#endif
708
Hugh Dickinsee498ed2005-11-21 21:32:18 -0800709/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 * copy one vm_area from one task to the other. Assumes the page tables
711 * already present in the new task to be cleared in the whole range
712 * covered by this vma.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 */
714
Linus Torvaldsdf3a57d2020-09-23 09:56:59 -0700715static unsigned long
716copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
Nick Pigginb5810032005-10-29 18:16:12 -0700717 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
Hugh Dickins8c103762005-10-29 18:16:13 -0700718 unsigned long addr, int *rss)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719{
Nick Pigginb5810032005-10-29 18:16:12 -0700720 unsigned long vm_flags = vma->vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 pte_t pte = *src_pte;
722 struct page *page;
Linus Torvaldsdf3a57d2020-09-23 09:56:59 -0700723 swp_entry_t entry = pte_to_swp_entry(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
Linus Torvaldsdf3a57d2020-09-23 09:56:59 -0700725 if (likely(!non_swap_entry(entry))) {
726 if (swap_duplicate(entry) < 0)
727 return entry.val;
Christoph Lameter06972122006-06-23 02:03:35 -0700728
Linus Torvaldsdf3a57d2020-09-23 09:56:59 -0700729 /* make sure dst_mm is on swapoff's mmlist. */
730 if (unlikely(list_empty(&dst_mm->mmlist))) {
731 spin_lock(&mmlist_lock);
732 if (list_empty(&dst_mm->mmlist))
733 list_add(&dst_mm->mmlist,
734 &src_mm->mmlist);
735 spin_unlock(&mmlist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 }
Linus Torvaldsdf3a57d2020-09-23 09:56:59 -0700737 rss[MM_SWAPENTS]++;
738 } else if (is_migration_entry(entry)) {
739 page = migration_entry_to_page(entry);
740
741 rss[mm_counter(page)]++;
742
743 if (is_write_migration_entry(entry) &&
744 is_cow_mapping(vm_flags)) {
745 /*
746 * COW mappings require pages in both
747 * parent and child to be set to read.
748 */
749 make_migration_entry_read(&entry);
750 pte = swp_entry_to_pte(entry);
751 if (pte_swp_soft_dirty(*src_pte))
752 pte = pte_swp_mksoft_dirty(pte);
753 if (pte_swp_uffd_wp(*src_pte))
754 pte = pte_swp_mkuffd_wp(pte);
755 set_pte_at(src_mm, addr, src_pte, pte);
756 }
757 } else if (is_device_private_entry(entry)) {
758 page = device_private_entry_to_page(entry);
759
760 /*
761 * Update rss count even for unaddressable pages, as
762 * they should treated just like normal pages in this
763 * respect.
764 *
765 * We will likely want to have some new rss counters
766 * for unaddressable pages, at some point. But for now
767 * keep things as they are.
768 */
769 get_page(page);
770 rss[mm_counter(page)]++;
771 page_dup_rmap(page, false);
772
773 /*
774 * We do not preserve soft-dirty information, because so
775 * far, checkpoint/restore is the only feature that
776 * requires that. And checkpoint/restore does not work
777 * when a device driver is involved (you cannot easily
778 * save and restore device driver state).
779 */
780 if (is_write_device_private_entry(entry) &&
781 is_cow_mapping(vm_flags)) {
782 make_device_private_entry_read(&entry);
783 pte = swp_entry_to_pte(entry);
784 if (pte_swp_uffd_wp(*src_pte))
785 pte = pte_swp_mkuffd_wp(pte);
786 set_pte_at(src_mm, addr, src_pte, pte);
787 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 }
Linus Torvaldsdf3a57d2020-09-23 09:56:59 -0700789 set_pte_at(dst_mm, addr, dst_pte, pte);
790 return 0;
791}
792
Peter Xu70e806e2020-09-25 18:25:59 -0400793/*
794 * Copy a present and normal page if necessary.
795 *
796 * NOTE! The usual case is that this doesn't need to do
797 * anything, and can just return a positive value. That
798 * will let the caller know that it can just increase
799 * the page refcount and re-use the pte the traditional
800 * way.
801 *
802 * But _if_ we need to copy it because it needs to be
803 * pinned in the parent (and the child should get its own
804 * copy rather than just a reference to the same page),
805 * we'll do that here and return zero to let the caller
806 * know we're done.
807 *
808 * And if we need a pre-allocated page but don't yet have
809 * one, return a negative error to let the preallocation
810 * code know so that it can do so outside the page table
811 * lock.
812 */
813static inline int
Peter Xuc78f4632020-10-13 16:54:21 -0700814copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
815 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
816 struct page **prealloc, pte_t pte, struct page *page)
Peter Xu70e806e2020-09-25 18:25:59 -0400817{
Peter Xuc78f4632020-10-13 16:54:21 -0700818 struct mm_struct *src_mm = src_vma->vm_mm;
Peter Xu70e806e2020-09-25 18:25:59 -0400819 struct page *new_page;
820
Peter Xuc78f4632020-10-13 16:54:21 -0700821 if (!is_cow_mapping(src_vma->vm_flags))
Peter Xu70e806e2020-09-25 18:25:59 -0400822 return 1;
823
824 /*
Peter Xu70e806e2020-09-25 18:25:59 -0400825 * What we want to do is to check whether this page may
826 * have been pinned by the parent process. If so,
827 * instead of wrprotect the pte on both sides, we copy
828 * the page immediately so that we'll always guarantee
829 * the pinned page won't be randomly replaced in the
830 * future.
831 *
Linus Torvaldsf3c64ed2020-09-28 12:50:03 -0700832 * The page pinning checks are just "has this mm ever
833 * seen pinning", along with the (inexact) check of
834 * the page count. That might give false positives for
835 * for pinning, but it will work correctly.
Peter Xu70e806e2020-09-25 18:25:59 -0400836 */
837 if (likely(!atomic_read(&src_mm->has_pinned)))
838 return 1;
839 if (likely(!page_maybe_dma_pinned(page)))
840 return 1;
841
Peter Xu70e806e2020-09-25 18:25:59 -0400842 new_page = *prealloc;
843 if (!new_page)
844 return -EAGAIN;
845
846 /*
847 * We have a prealloc page, all good! Take it
848 * over and copy the page & arm it.
849 */
850 *prealloc = NULL;
Peter Xuc78f4632020-10-13 16:54:21 -0700851 copy_user_highpage(new_page, page, addr, src_vma);
Peter Xu70e806e2020-09-25 18:25:59 -0400852 __SetPageUptodate(new_page);
Peter Xuc78f4632020-10-13 16:54:21 -0700853 page_add_new_anon_rmap(new_page, dst_vma, addr, false);
854 lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
Peter Xu70e806e2020-09-25 18:25:59 -0400855 rss[mm_counter(new_page)]++;
856
857 /* All done, just insert the new page copy in the child */
Peter Xuc78f4632020-10-13 16:54:21 -0700858 pte = mk_pte(new_page, dst_vma->vm_page_prot);
Laurent Dufour32507b62018-04-17 16:33:18 +0200859 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma->vm_flags);
Peter Xuc78f4632020-10-13 16:54:21 -0700860 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
Peter Xu70e806e2020-09-25 18:25:59 -0400861 return 0;
862}
863
864/*
865 * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page
866 * is required to copy this pte.
867 */
868static inline int
Peter Xuc78f4632020-10-13 16:54:21 -0700869copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
870 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
871 struct page **prealloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872{
Peter Xuc78f4632020-10-13 16:54:21 -0700873 struct mm_struct *src_mm = src_vma->vm_mm;
874 unsigned long vm_flags = src_vma->vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 pte_t pte = *src_pte;
876 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
Peter Xuc78f4632020-10-13 16:54:21 -0700878 page = vm_normal_page(src_vma, addr, pte);
Peter Xu70e806e2020-09-25 18:25:59 -0400879 if (page) {
880 int retval;
881
Peter Xuc78f4632020-10-13 16:54:21 -0700882 retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
883 addr, rss, prealloc, pte, page);
Peter Xu70e806e2020-09-25 18:25:59 -0400884 if (retval <= 0)
885 return retval;
886
887 get_page(page);
888 page_dup_rmap(page, false);
889 rss[mm_counter(page)]++;
890 }
891
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 /*
893 * If it's a COW mapping, write protect it both
894 * in the parent and the child
895 */
Linus Torvalds1b2de5d2018-07-09 13:19:49 -0700896 if (is_cow_mapping(vm_flags) && pte_write(pte)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 ptep_set_wrprotect(src_mm, addr, src_pte);
Zachary Amsden3dc90792006-09-30 23:29:30 -0700898 pte = pte_wrprotect(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 }
900
901 /*
902 * If it's a shared mapping, mark it clean in
903 * the child
904 */
905 if (vm_flags & VM_SHARED)
906 pte = pte_mkclean(pte);
907 pte = pte_mkold(pte);
Linus Torvalds6aab3412005-11-28 14:34:23 -0800908
Peter Xub569a172020-04-06 20:05:53 -0700909 /*
910 * Make sure the _PAGE_UFFD_WP bit is cleared if the new VMA
911 * does not have the VM_UFFD_WP, which means that the uffd
912 * fork event is not enabled.
913 */
914 if (!(vm_flags & VM_UFFD_WP))
915 pte = pte_clear_uffd_wp(pte);
916
Peter Xuc78f4632020-10-13 16:54:21 -0700917 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
Peter Xu70e806e2020-09-25 18:25:59 -0400918 return 0;
919}
920
921static inline struct page *
922page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
923 unsigned long addr)
924{
925 struct page *new_page;
926
927 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
928 if (!new_page)
929 return NULL;
930
931 if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) {
932 put_page(new_page);
933 return NULL;
934 }
935 cgroup_throttle_swaprate(new_page, GFP_KERNEL);
936
937 return new_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938}
939
Peter Xuc78f4632020-10-13 16:54:21 -0700940static int
941copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
942 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
943 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944{
Peter Xuc78f4632020-10-13 16:54:21 -0700945 struct mm_struct *dst_mm = dst_vma->vm_mm;
946 struct mm_struct *src_mm = src_vma->vm_mm;
Daisuke Nishimurac36987e2009-10-26 16:50:23 -0700947 pte_t *orig_src_pte, *orig_dst_pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 pte_t *src_pte, *dst_pte;
Hugh Dickinsc74df322005-10-29 18:16:23 -0700949 spinlock_t *src_ptl, *dst_ptl;
Peter Xu70e806e2020-09-25 18:25:59 -0400950 int progress, ret = 0;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800951 int rss[NR_MM_COUNTERS];
Hugh Dickins570a335b2009-12-14 17:58:46 -0800952 swp_entry_t entry = (swp_entry_t){0};
Peter Xu70e806e2020-09-25 18:25:59 -0400953 struct page *prealloc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
955again:
Peter Xu70e806e2020-09-25 18:25:59 -0400956 progress = 0;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800957 init_rss_vec(rss);
958
Hugh Dickinsc74df322005-10-29 18:16:23 -0700959 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
Peter Xu70e806e2020-09-25 18:25:59 -0400960 if (!dst_pte) {
961 ret = -ENOMEM;
962 goto out;
963 }
Peter Zijlstraece0e2b2010-10-26 14:21:52 -0700964 src_pte = pte_offset_map(src_pmd, addr);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700965 src_ptl = pte_lockptr(src_mm, src_pmd);
Ingo Molnarf20dc5f2006-07-03 00:25:08 -0700966 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
Daisuke Nishimurac36987e2009-10-26 16:50:23 -0700967 orig_src_pte = src_pte;
968 orig_dst_pte = dst_pte;
Zachary Amsden6606c3e2006-09-30 23:29:33 -0700969 arch_enter_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 do {
972 /*
973 * We are holding two locks at this point - either of them
974 * could generate latencies in another task on another CPU.
975 */
Hugh Dickinse040f212005-10-29 18:15:53 -0700976 if (progress >= 32) {
977 progress = 0;
978 if (need_resched() ||
Nick Piggin95c354f2008-01-30 13:31:20 +0100979 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
Hugh Dickinse040f212005-10-29 18:15:53 -0700980 break;
981 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 if (pte_none(*src_pte)) {
983 progress++;
984 continue;
985 }
Linus Torvalds79a19712020-09-23 10:04:16 -0700986 if (unlikely(!pte_present(*src_pte))) {
987 entry.val = copy_nonpresent_pte(dst_mm, src_mm,
988 dst_pte, src_pte,
Peter Xuc78f4632020-10-13 16:54:21 -0700989 src_vma, addr, rss);
Linus Torvalds79a19712020-09-23 10:04:16 -0700990 if (entry.val)
991 break;
992 progress += 8;
993 continue;
994 }
Peter Xu70e806e2020-09-25 18:25:59 -0400995 /* copy_present_pte() will clear `*prealloc' if consumed */
Peter Xuc78f4632020-10-13 16:54:21 -0700996 ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
997 addr, rss, &prealloc);
Peter Xu70e806e2020-09-25 18:25:59 -0400998 /*
999 * If we need a pre-allocated page for this pte, drop the
1000 * locks, allocate, and try again.
1001 */
1002 if (unlikely(ret == -EAGAIN))
1003 break;
1004 if (unlikely(prealloc)) {
1005 /*
1006 * pre-alloc page cannot be reused by next time so as
1007 * to strictly follow mempolicy (e.g., alloc_page_vma()
1008 * will allocate page according to address). This
1009 * could only happen if one pinned pte changed.
1010 */
1011 put_page(prealloc);
1012 prealloc = NULL;
1013 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 progress += 8;
1015 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
Zachary Amsden6606c3e2006-09-30 23:29:33 -07001017 arch_leave_lazy_mmu_mode();
Hugh Dickinsc74df322005-10-29 18:16:23 -07001018 spin_unlock(src_ptl);
Peter Zijlstraece0e2b2010-10-26 14:21:52 -07001019 pte_unmap(orig_src_pte);
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -08001020 add_mm_rss_vec(dst_mm, rss);
Daisuke Nishimurac36987e2009-10-26 16:50:23 -07001021 pte_unmap_unlock(orig_dst_pte, dst_ptl);
Hugh Dickinsc74df322005-10-29 18:16:23 -07001022 cond_resched();
Hugh Dickins570a335b2009-12-14 17:58:46 -08001023
1024 if (entry.val) {
Peter Xu70e806e2020-09-25 18:25:59 -04001025 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1026 ret = -ENOMEM;
1027 goto out;
1028 }
1029 entry.val = 0;
1030 } else if (ret) {
1031 WARN_ON_ONCE(ret != -EAGAIN);
Peter Xuc78f4632020-10-13 16:54:21 -07001032 prealloc = page_copy_prealloc(src_mm, src_vma, addr);
Peter Xu70e806e2020-09-25 18:25:59 -04001033 if (!prealloc)
Hugh Dickins570a335b2009-12-14 17:58:46 -08001034 return -ENOMEM;
Peter Xu70e806e2020-09-25 18:25:59 -04001035 /* We've captured and resolved the error. Reset, try again. */
1036 ret = 0;
Hugh Dickins570a335b2009-12-14 17:58:46 -08001037 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 if (addr != end)
1039 goto again;
Peter Xu70e806e2020-09-25 18:25:59 -04001040out:
1041 if (unlikely(prealloc))
1042 put_page(prealloc);
1043 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044}
1045
Peter Xuc78f4632020-10-13 16:54:21 -07001046static inline int
1047copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1048 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1049 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050{
Peter Xuc78f4632020-10-13 16:54:21 -07001051 struct mm_struct *dst_mm = dst_vma->vm_mm;
1052 struct mm_struct *src_mm = src_vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 pmd_t *src_pmd, *dst_pmd;
1054 unsigned long next;
1055
1056 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1057 if (!dst_pmd)
1058 return -ENOMEM;
1059 src_pmd = pmd_offset(src_pud, addr);
1060 do {
1061 next = pmd_addr_end(addr, end);
Zi Yan84c3fc42017-09-08 16:11:01 -07001062 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1063 || pmd_devmap(*src_pmd)) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001064 int err;
Peter Xuc78f4632020-10-13 16:54:21 -07001065 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001066 err = copy_huge_pmd(dst_mm, src_mm,
Peter Xuc78f4632020-10-13 16:54:21 -07001067 dst_pmd, src_pmd, addr, src_vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001068 if (err == -ENOMEM)
1069 return -ENOMEM;
1070 if (!err)
1071 continue;
1072 /* fall through */
1073 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 if (pmd_none_or_clear_bad(src_pmd))
1075 continue;
Peter Xuc78f4632020-10-13 16:54:21 -07001076 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1077 addr, next))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 return -ENOMEM;
1079 } while (dst_pmd++, src_pmd++, addr = next, addr != end);
1080 return 0;
1081}
1082
Peter Xuc78f4632020-10-13 16:54:21 -07001083static inline int
1084copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1085 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1086 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087{
Peter Xuc78f4632020-10-13 16:54:21 -07001088 struct mm_struct *dst_mm = dst_vma->vm_mm;
1089 struct mm_struct *src_mm = src_vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 pud_t *src_pud, *dst_pud;
1091 unsigned long next;
1092
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001093 dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 if (!dst_pud)
1095 return -ENOMEM;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001096 src_pud = pud_offset(src_p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 do {
1098 next = pud_addr_end(addr, end);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001099 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1100 int err;
1101
Peter Xuc78f4632020-10-13 16:54:21 -07001102 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001103 err = copy_huge_pud(dst_mm, src_mm,
Peter Xuc78f4632020-10-13 16:54:21 -07001104 dst_pud, src_pud, addr, src_vma);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001105 if (err == -ENOMEM)
1106 return -ENOMEM;
1107 if (!err)
1108 continue;
1109 /* fall through */
1110 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 if (pud_none_or_clear_bad(src_pud))
1112 continue;
Peter Xuc78f4632020-10-13 16:54:21 -07001113 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1114 addr, next))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 return -ENOMEM;
1116 } while (dst_pud++, src_pud++, addr = next, addr != end);
1117 return 0;
1118}
1119
Peter Xuc78f4632020-10-13 16:54:21 -07001120static inline int
1121copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1122 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1123 unsigned long end)
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001124{
Peter Xuc78f4632020-10-13 16:54:21 -07001125 struct mm_struct *dst_mm = dst_vma->vm_mm;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001126 p4d_t *src_p4d, *dst_p4d;
1127 unsigned long next;
1128
1129 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1130 if (!dst_p4d)
1131 return -ENOMEM;
1132 src_p4d = p4d_offset(src_pgd, addr);
1133 do {
1134 next = p4d_addr_end(addr, end);
1135 if (p4d_none_or_clear_bad(src_p4d))
1136 continue;
Peter Xuc78f4632020-10-13 16:54:21 -07001137 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1138 addr, next))
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001139 return -ENOMEM;
1140 } while (dst_p4d++, src_p4d++, addr = next, addr != end);
1141 return 0;
1142}
1143
Peter Xuc78f4632020-10-13 16:54:21 -07001144int
1145copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146{
1147 pgd_t *src_pgd, *dst_pgd;
1148 unsigned long next;
Peter Xuc78f4632020-10-13 16:54:21 -07001149 unsigned long addr = src_vma->vm_start;
1150 unsigned long end = src_vma->vm_end;
1151 struct mm_struct *dst_mm = dst_vma->vm_mm;
1152 struct mm_struct *src_mm = src_vma->vm_mm;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001153 struct mmu_notifier_range range;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001154 bool is_cow;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001155 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Nick Piggind9928952005-08-28 16:49:11 +10001157 /*
1158 * Don't copy ptes where a page fault will fill them correctly.
1159 * Fork becomes much lighter when there are big shared or private
1160 * readonly mappings. The tradeoff is that copy_page_range is more
1161 * efficient than faulting.
1162 */
Peter Xuc78f4632020-10-13 16:54:21 -07001163 if (!(src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
1164 !src_vma->anon_vma)
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08001165 return 0;
Nick Piggind9928952005-08-28 16:49:11 +10001166
Peter Xuc78f4632020-10-13 16:54:21 -07001167 if (is_vm_hugetlb_page(src_vma))
1168 return copy_hugetlb_page_range(dst_mm, src_mm, src_vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
Peter Xuc78f4632020-10-13 16:54:21 -07001170 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
venkatesh.pallipadi@intel.com2ab64032008-12-18 11:41:29 -08001171 /*
1172 * We do not free on error cases below as remove_vma
1173 * gets called on error from higher level routine
1174 */
Peter Xuc78f4632020-10-13 16:54:21 -07001175 ret = track_pfn_copy(src_vma);
venkatesh.pallipadi@intel.com2ab64032008-12-18 11:41:29 -08001176 if (ret)
1177 return ret;
1178 }
1179
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001180 /*
1181 * We need to invalidate the secondary MMU mappings only when
1182 * there could be a permission downgrade on the ptes of the
1183 * parent mm. And a permission downgrade will only happen if
1184 * is_cow_mapping() returns true.
1185 */
Peter Xuc78f4632020-10-13 16:54:21 -07001186 is_cow = is_cow_mapping(src_vma->vm_flags);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001187
1188 if (is_cow) {
Jérôme Glisse7269f992019-05-13 17:20:53 -07001189 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
Peter Xuc78f4632020-10-13 16:54:21 -07001190 0, src_vma, src_mm, addr, end);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001191 mmu_notifier_invalidate_range_start(&range);
Jason Gunthorpe53794652020-12-14 19:05:44 -08001192 /*
1193 * Disabling preemption is not needed for the write side, as
1194 * the read side doesn't spin, but goes to the mmap_lock.
1195 *
1196 * Use the raw variant of the seqcount_t write API to avoid
1197 * lockdep complaining about preemptibility.
1198 */
1199 mmap_assert_write_locked(src_mm);
1200 raw_write_seqcount_begin(&src_mm->write_protect_seq);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001201 }
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001202
1203 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 dst_pgd = pgd_offset(dst_mm, addr);
1205 src_pgd = pgd_offset(src_mm, addr);
1206 do {
1207 next = pgd_addr_end(addr, end);
1208 if (pgd_none_or_clear_bad(src_pgd))
1209 continue;
Peter Xuc78f4632020-10-13 16:54:21 -07001210 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1211 addr, next))) {
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001212 ret = -ENOMEM;
1213 break;
1214 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001216
Jason Gunthorpe53794652020-12-14 19:05:44 -08001217 if (is_cow) {
1218 raw_write_seqcount_end(&src_mm->write_protect_seq);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001219 mmu_notifier_invalidate_range_end(&range);
Jason Gunthorpe53794652020-12-14 19:05:44 -08001220 }
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001221 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222}
1223
Robin Holt51c6f662005-11-13 16:06:42 -08001224static unsigned long zap_pte_range(struct mmu_gather *tlb,
Nick Pigginb5810032005-10-29 18:16:12 -07001225 struct vm_area_struct *vma, pmd_t *pmd,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 unsigned long addr, unsigned long end,
Peter Zijlstra97a89412011-05-24 17:12:04 -07001227 struct zap_details *details)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228{
Nick Pigginb5810032005-10-29 18:16:12 -07001229 struct mm_struct *mm = tlb->mm;
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001230 int force_flush = 0;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -08001231 int rss[NR_MM_COUNTERS];
Peter Zijlstra97a89412011-05-24 17:12:04 -07001232 spinlock_t *ptl;
Steven Rostedt5f1a1902011-06-15 15:08:23 -07001233 pte_t *start_pte;
Peter Zijlstra97a89412011-05-24 17:12:04 -07001234 pte_t *pte;
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08001235 swp_entry_t entry;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -08001236
Peter Zijlstraed6a7932018-08-31 14:46:08 +02001237 tlb_change_page_size(tlb, PAGE_SIZE);
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001238again:
Peter Zijlstrae3032972011-05-24 17:12:01 -07001239 init_rss_vec(rss);
Steven Rostedt5f1a1902011-06-15 15:08:23 -07001240 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1241 pte = start_pte;
Mel Gorman3ea27712017-08-02 13:31:52 -07001242 flush_tlb_batched_pending(mm);
Zachary Amsden6606c3e2006-09-30 23:29:33 -07001243 arch_enter_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 do {
1245 pte_t ptent = *pte;
Tobin C Harding166f61b2017-02-24 14:59:01 -08001246 if (pte_none(ptent))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 continue;
Hugh Dickins6f5e6b92006-03-16 23:04:09 -08001248
Minchan Kim7b167b62019-09-24 00:02:24 +00001249 if (need_resched())
1250 break;
1251
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 if (pte_present(ptent)) {
Hugh Dickinsee498ed2005-11-21 21:32:18 -08001253 struct page *page;
Robin Holt51c6f662005-11-13 16:06:42 -08001254
Christoph Hellwig25b29952019-06-13 22:50:49 +02001255 page = vm_normal_page(vma, addr, ptent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 if (unlikely(details) && page) {
1257 /*
1258 * unmap_shared_mapping_pages() wants to
1259 * invalidate cache without truncating:
1260 * unmap shared but keep private pages.
1261 */
1262 if (details->check_mapping &&
Kirill A. Shutemov800d8c62016-07-26 15:26:18 -07001263 details->check_mapping != page_rmapping(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 }
Nick Pigginb5810032005-10-29 18:16:12 -07001266 ptent = ptep_get_and_clear_full(mm, addr, pte,
Zachary Amsdena6003882005-09-03 15:55:04 -07001267 tlb->fullmm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 tlb_remove_tlb_entry(tlb, pte, addr);
1269 if (unlikely(!page))
1270 continue;
Jerome Marchandeca56ff2016-01-14 15:19:26 -08001271
1272 if (!PageAnon(page)) {
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001273 if (pte_dirty(ptent)) {
1274 force_flush = 1;
Hugh Dickins6237bcd2005-10-29 18:15:54 -07001275 set_page_dirty(page);
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001276 }
Johannes Weiner4917e5d2009-01-06 14:39:17 -08001277 if (pte_young(ptent) &&
Joe Perches64363aa2013-07-08 16:00:18 -07001278 likely(!(vma->vm_flags & VM_SEQ_READ)))
Nick Pigginbf3f3bc2009-01-06 14:38:55 -08001279 mark_page_accessed(page);
Hugh Dickins6237bcd2005-10-29 18:15:54 -07001280 }
Jerome Marchandeca56ff2016-01-14 15:19:26 -08001281 rss[mm_counter(page)]--;
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08001282 page_remove_rmap(page, false);
Hugh Dickins3dc14742009-01-06 14:40:08 -08001283 if (unlikely(page_mapcount(page) < 0))
1284 print_bad_pte(vma, addr, ptent, page);
Aneesh Kumar K.Ve9d55e12016-07-26 15:24:09 -07001285 if (unlikely(__tlb_remove_page(tlb, page))) {
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001286 force_flush = 1;
Will Deaconce9ec372014-10-28 13:16:28 -07001287 addr += PAGE_SIZE;
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001288 break;
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001289 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 continue;
1291 }
Jérôme Glisse5042db42017-09-08 16:11:43 -07001292
1293 entry = pte_to_swp_entry(ptent);
Ralph Campbell463b7a12020-08-06 23:22:21 -07001294 if (is_device_private_entry(entry)) {
Jérôme Glisse5042db42017-09-08 16:11:43 -07001295 struct page *page = device_private_entry_to_page(entry);
1296
1297 if (unlikely(details && details->check_mapping)) {
1298 /*
1299 * unmap_shared_mapping_pages() wants to
1300 * invalidate cache without truncating:
1301 * unmap shared but keep private pages.
1302 */
1303 if (details->check_mapping !=
1304 page_rmapping(page))
1305 continue;
1306 }
1307
1308 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1309 rss[mm_counter(page)]--;
1310 page_remove_rmap(page, false);
1311 put_page(page);
1312 continue;
1313 }
1314
Kirill A. Shutemov3e8715f2017-02-22 15:46:34 -08001315 /* If details->check_mapping, we leave swap entries. */
1316 if (unlikely(details))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 continue;
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -08001318
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08001319 if (!non_swap_entry(entry))
1320 rss[MM_SWAPENTS]--;
1321 else if (is_migration_entry(entry)) {
1322 struct page *page;
Konstantin Khlebnikov9f9f1ac2012-01-20 14:34:24 -08001323
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08001324 page = migration_entry_to_page(entry);
Jerome Marchandeca56ff2016-01-14 15:19:26 -08001325 rss[mm_counter(page)]--;
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -08001326 }
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08001327 if (unlikely(!free_swap_and_cache(entry)))
1328 print_bad_pte(vma, addr, ptent, NULL);
Zachary Amsden9888a1c2006-09-30 23:29:31 -07001329 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
Peter Zijlstra97a89412011-05-24 17:12:04 -07001330 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickinsae859762005-10-29 18:16:05 -07001331
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -08001332 add_mm_rss_vec(mm, rss);
Zachary Amsden6606c3e2006-09-30 23:29:33 -07001333 arch_leave_lazy_mmu_mode();
Robin Holt51c6f662005-11-13 16:06:42 -08001334
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001335 /* Do the actual TLB flush before dropping ptl */
Will Deaconfb7332a2014-10-29 10:03:09 +00001336 if (force_flush)
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001337 tlb_flush_mmu_tlbonly(tlb);
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001338 pte_unmap_unlock(start_pte, ptl);
1339
1340 /*
1341 * If we forced a TLB flush (either due to running out of
1342 * batch buffers or because we needed to flush dirty TLB
1343 * entries before releasing the ptl), free the batched
1344 * memory too. Restart if we didn't do everything.
1345 */
1346 if (force_flush) {
1347 force_flush = 0;
Peter Zijlstrafa0aafb2018-09-20 10:54:04 +02001348 tlb_flush_mmu(tlb);
Minchan Kim7b167b62019-09-24 00:02:24 +00001349 }
1350
1351 if (addr != end) {
1352 cond_resched();
1353 goto again;
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001354 }
1355
Robin Holt51c6f662005-11-13 16:06:42 -08001356 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357}
1358
Robin Holt51c6f662005-11-13 16:06:42 -08001359static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
Nick Pigginb5810032005-10-29 18:16:12 -07001360 struct vm_area_struct *vma, pud_t *pud,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 unsigned long addr, unsigned long end,
Peter Zijlstra97a89412011-05-24 17:12:04 -07001362 struct zap_details *details)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363{
1364 pmd_t *pmd;
1365 unsigned long next;
1366
1367 pmd = pmd_offset(pud, addr);
1368 do {
1369 next = pmd_addr_end(addr, end);
Zi Yan84c3fc42017-09-08 16:11:01 -07001370 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
Hugh Dickins53406ed2018-08-01 11:31:52 -07001371 if (next - addr != HPAGE_PMD_SIZE)
David Rientjesfd607752016-12-12 16:42:20 -08001372 __split_huge_pmd(vma, pmd, addr, false, NULL);
Hugh Dickins53406ed2018-08-01 11:31:52 -07001373 else if (zap_huge_pmd(tlb, vma, pmd, addr))
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001374 goto next;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001375 /* fall through */
1376 }
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001377 /*
1378 * Here there can be other concurrent MADV_DONTNEED or
1379 * trans huge page faults running, and if the pmd is
1380 * none or trans huge it can change under us. This is
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001381 * because MADV_DONTNEED holds the mmap_lock in read
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001382 * mode.
1383 */
1384 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1385 goto next;
Peter Zijlstra97a89412011-05-24 17:12:04 -07001386 next = zap_pte_range(tlb, vma, pmd, addr, next, details);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001387next:
Peter Zijlstra97a89412011-05-24 17:12:04 -07001388 cond_resched();
1389 } while (pmd++, addr = next, addr != end);
Robin Holt51c6f662005-11-13 16:06:42 -08001390
1391 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392}
1393
Robin Holt51c6f662005-11-13 16:06:42 -08001394static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001395 struct vm_area_struct *vma, p4d_t *p4d,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 unsigned long addr, unsigned long end,
Peter Zijlstra97a89412011-05-24 17:12:04 -07001397 struct zap_details *details)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398{
1399 pud_t *pud;
1400 unsigned long next;
1401
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001402 pud = pud_offset(p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 do {
1404 next = pud_addr_end(addr, end);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001405 if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1406 if (next - addr != HPAGE_PUD_SIZE) {
Michel Lespinasse42fc5412020-06-08 21:33:44 -07001407 mmap_assert_locked(tlb->mm);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001408 split_huge_pud(vma, pud, addr);
1409 } else if (zap_huge_pud(tlb, vma, pud, addr))
1410 goto next;
1411 /* fall through */
1412 }
Peter Zijlstra97a89412011-05-24 17:12:04 -07001413 if (pud_none_or_clear_bad(pud))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 continue;
Peter Zijlstra97a89412011-05-24 17:12:04 -07001415 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001416next:
1417 cond_resched();
Peter Zijlstra97a89412011-05-24 17:12:04 -07001418 } while (pud++, addr = next, addr != end);
Robin Holt51c6f662005-11-13 16:06:42 -08001419
1420 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421}
1422
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001423static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1424 struct vm_area_struct *vma, pgd_t *pgd,
1425 unsigned long addr, unsigned long end,
1426 struct zap_details *details)
1427{
1428 p4d_t *p4d;
1429 unsigned long next;
1430
1431 p4d = p4d_offset(pgd, addr);
1432 do {
1433 next = p4d_addr_end(addr, end);
1434 if (p4d_none_or_clear_bad(p4d))
1435 continue;
1436 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1437 } while (p4d++, addr = next, addr != end);
1438
1439 return addr;
1440}
1441
Michal Hockoaac45362016-03-25 14:20:24 -07001442void unmap_page_range(struct mmu_gather *tlb,
Al Viro038c7aa2012-03-05 13:25:09 -05001443 struct vm_area_struct *vma,
1444 unsigned long addr, unsigned long end,
1445 struct zap_details *details)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446{
1447 pgd_t *pgd;
1448 unsigned long next;
1449
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 BUG_ON(addr >= end);
Peter Zijlstra2ce6b112018-04-17 16:33:14 +02001451 vm_write_begin(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 tlb_start_vma(tlb, vma);
1453 pgd = pgd_offset(vma->vm_mm, addr);
1454 do {
1455 next = pgd_addr_end(addr, end);
Peter Zijlstra97a89412011-05-24 17:12:04 -07001456 if (pgd_none_or_clear_bad(pgd))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 continue;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001458 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
Peter Zijlstra97a89412011-05-24 17:12:04 -07001459 } while (pgd++, addr = next, addr != end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 tlb_end_vma(tlb, vma);
Peter Zijlstra2ce6b112018-04-17 16:33:14 +02001461 vm_write_end(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463
Al Virof5cc4ee2012-03-05 14:14:20 -05001464
1465static void unmap_single_vma(struct mmu_gather *tlb,
1466 struct vm_area_struct *vma, unsigned long start_addr,
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07001467 unsigned long end_addr,
Al Virof5cc4ee2012-03-05 14:14:20 -05001468 struct zap_details *details)
1469{
1470 unsigned long start = max(vma->vm_start, start_addr);
1471 unsigned long end;
1472
1473 if (start >= vma->vm_end)
1474 return;
1475 end = min(vma->vm_end, end_addr);
1476 if (end <= vma->vm_start)
1477 return;
1478
Srikar Dronamrajucbc91f72012-04-11 16:05:27 +05301479 if (vma->vm_file)
1480 uprobe_munmap(vma, start, end);
1481
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07001482 if (unlikely(vma->vm_flags & VM_PFNMAP))
Suresh Siddha5180da42012-10-08 16:28:29 -07001483 untrack_pfn(vma, 0, 0);
Al Virof5cc4ee2012-03-05 14:14:20 -05001484
1485 if (start != end) {
1486 if (unlikely(is_vm_hugetlb_page(vma))) {
1487 /*
1488 * It is undesirable to test vma->vm_file as it
1489 * should be non-null for valid hugetlb area.
1490 * However, vm_file will be NULL in the error
Davidlohr Bueso7aa6b4a2014-04-07 15:37:01 -07001491 * cleanup path of mmap_region. When
Al Virof5cc4ee2012-03-05 14:14:20 -05001492 * hugetlbfs ->mmap method fails,
Davidlohr Bueso7aa6b4a2014-04-07 15:37:01 -07001493 * mmap_region() nullifies vma->vm_file
Al Virof5cc4ee2012-03-05 14:14:20 -05001494 * before calling this function to clean up.
1495 * Since no pte has actually been setup, it is
1496 * safe to do nothing in this case.
1497 */
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -07001498 if (vma->vm_file) {
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -08001499 i_mmap_lock_write(vma->vm_file->f_mapping);
Mel Gormand8333522012-07-31 16:46:20 -07001500 __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -08001501 i_mmap_unlock_write(vma->vm_file->f_mapping);
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -07001502 }
Al Virof5cc4ee2012-03-05 14:14:20 -05001503 } else
1504 unmap_page_range(tlb, vma, start, end, details);
1505 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506}
1507
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508/**
1509 * unmap_vmas - unmap a range of memory covered by a list of vma's
Randy Dunlap0164f692011-06-15 15:08:09 -07001510 * @tlb: address of the caller's struct mmu_gather
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 * @vma: the starting vma
1512 * @start_addr: virtual address at which to start unmapping
1513 * @end_addr: virtual address at which to end unmapping
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 *
Hugh Dickins508034a2005-10-29 18:16:30 -07001515 * Unmap all pages in the vma list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 * Only addresses between `start' and `end' will be unmapped.
1518 *
1519 * The VMA list must be sorted in ascending virtual address order.
1520 *
1521 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1522 * range after unmap_vmas() returns. So the only responsibility here is to
1523 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1524 * drops the lock and schedules.
1525 */
Al Viro6e8bb012012-03-05 13:41:15 -05001526void unmap_vmas(struct mmu_gather *tlb,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 struct vm_area_struct *vma, unsigned long start_addr,
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07001528 unsigned long end_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001530 struct mmu_notifier_range range;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001532 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1533 start_addr, end_addr);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001534 mmu_notifier_invalidate_range_start(&range);
Al Virof5cc4ee2012-03-05 14:14:20 -05001535 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07001536 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001537 mmu_notifier_invalidate_range_end(&range);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538}
1539
1540/**
1541 * zap_page_range - remove user pages in a given range
1542 * @vma: vm_area_struct holding the applicable pages
Randy Dunlapeb4546b2012-06-20 12:53:02 -07001543 * @start: starting address of pages to zap
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 * @size: number of bytes to zap
Al Virof5cc4ee2012-03-05 14:14:20 -05001545 *
1546 * Caller must protect the VMA list
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 */
Linus Torvalds7e027b12012-05-06 13:43:15 -07001548void zap_page_range(struct vm_area_struct *vma, unsigned long start,
Kirill A. Shutemovecf13852017-02-22 15:46:37 -08001549 unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001551 struct mmu_notifier_range range;
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001552 struct mmu_gather tlb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 lru_add_drain();
Jérôme Glisse7269f992019-05-13 17:20:53 -07001555 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001556 start, start + size);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001557 tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
1558 update_hiwater_rss(vma->vm_mm);
1559 mmu_notifier_invalidate_range_start(&range);
1560 for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
1561 unmap_single_vma(&tlb, vma, start, range.end, NULL);
1562 mmu_notifier_invalidate_range_end(&range);
1563 tlb_finish_mmu(&tlb, start, range.end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564}
1565
Jack Steinerc627f9c2008-07-29 22:33:53 -07001566/**
Al Virof5cc4ee2012-03-05 14:14:20 -05001567 * zap_page_range_single - remove user pages in a given range
1568 * @vma: vm_area_struct holding the applicable pages
1569 * @address: starting address of pages to zap
1570 * @size: number of bytes to zap
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08001571 * @details: details of shared cache invalidation
Al Virof5cc4ee2012-03-05 14:14:20 -05001572 *
1573 * The range must fit into one VMA.
1574 */
1575static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1576 unsigned long size, struct zap_details *details)
1577{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001578 struct mmu_notifier_range range;
Al Virof5cc4ee2012-03-05 14:14:20 -05001579 struct mmu_gather tlb;
Al Virof5cc4ee2012-03-05 14:14:20 -05001580
1581 lru_add_drain();
Jérôme Glisse7269f992019-05-13 17:20:53 -07001582 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001583 address, address + size);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001584 tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
1585 update_hiwater_rss(vma->vm_mm);
1586 mmu_notifier_invalidate_range_start(&range);
1587 unmap_single_vma(&tlb, vma, address, range.end, details);
1588 mmu_notifier_invalidate_range_end(&range);
1589 tlb_finish_mmu(&tlb, address, range.end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590}
1591
Jack Steinerc627f9c2008-07-29 22:33:53 -07001592/**
1593 * zap_vma_ptes - remove ptes mapping the vma
1594 * @vma: vm_area_struct holding ptes to be zapped
1595 * @address: starting address of pages to zap
1596 * @size: number of bytes to zap
1597 *
1598 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1599 *
1600 * The entire address range must be fully contained within the vma.
1601 *
Jack Steinerc627f9c2008-07-29 22:33:53 -07001602 */
Leon Romanovsky27d036e2018-05-29 15:14:07 +03001603void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
Jack Steinerc627f9c2008-07-29 22:33:53 -07001604 unsigned long size)
1605{
1606 if (address < vma->vm_start || address + size > vma->vm_end ||
1607 !(vma->vm_flags & VM_PFNMAP))
Leon Romanovsky27d036e2018-05-29 15:14:07 +03001608 return;
1609
Al Virof5cc4ee2012-03-05 14:14:20 -05001610 zap_page_range_single(vma, address, size, NULL);
Jack Steinerc627f9c2008-07-29 22:33:53 -07001611}
1612EXPORT_SYMBOL_GPL(zap_vma_ptes);
1613
Arjun Roy8cd39842020-04-10 14:33:01 -07001614static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
Linus Torvaldsc9cfcdd2005-11-29 14:03:14 -08001615{
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001616 pgd_t *pgd;
1617 p4d_t *p4d;
1618 pud_t *pud;
1619 pmd_t *pmd;
1620
1621 pgd = pgd_offset(mm, addr);
1622 p4d = p4d_alloc(mm, pgd, addr);
1623 if (!p4d)
1624 return NULL;
1625 pud = pud_alloc(mm, p4d, addr);
1626 if (!pud)
1627 return NULL;
1628 pmd = pmd_alloc(mm, pud, addr);
1629 if (!pmd)
1630 return NULL;
1631
1632 VM_BUG_ON(pmd_trans_huge(*pmd));
Arjun Roy8cd39842020-04-10 14:33:01 -07001633 return pmd;
1634}
1635
1636pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1637 spinlock_t **ptl)
1638{
1639 pmd_t *pmd = walk_to_pmd(mm, addr);
1640
1641 if (!pmd)
1642 return NULL;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001643 return pte_alloc_map_lock(mm, pmd, addr, ptl);
Linus Torvaldsc9cfcdd2005-11-29 14:03:14 -08001644}
1645
Arjun Roy8efd6f5b2020-04-10 14:32:51 -07001646static int validate_page_before_insert(struct page *page)
1647{
1648 if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1649 return -EINVAL;
1650 flush_dcache_page(page);
1651 return 0;
1652}
1653
1654static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte,
1655 unsigned long addr, struct page *page, pgprot_t prot)
1656{
1657 if (!pte_none(*pte))
1658 return -EBUSY;
1659 /* Ok, finally just insert the thing.. */
1660 get_page(page);
1661 inc_mm_counter_fast(mm, mm_counter_file(page));
1662 page_add_file_rmap(page, false);
1663 set_pte_at(mm, addr, pte, mk_pte(page, prot));
1664 return 0;
1665}
1666
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667/*
Linus Torvalds238f58d2005-11-29 13:01:56 -08001668 * This is the old fallback for page remapping.
1669 *
1670 * For historical reasons, it only allows reserved pages. Only
1671 * old drivers should use this, and they needed to mark their
1672 * pages reserved for the old functions anyway.
1673 */
Nick Piggin423bad602008-04-28 02:13:01 -07001674static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1675 struct page *page, pgprot_t prot)
Linus Torvalds238f58d2005-11-29 13:01:56 -08001676{
Nick Piggin423bad602008-04-28 02:13:01 -07001677 struct mm_struct *mm = vma->vm_mm;
Linus Torvalds238f58d2005-11-29 13:01:56 -08001678 int retval;
Linus Torvaldsc9cfcdd2005-11-29 14:03:14 -08001679 pte_t *pte;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08001680 spinlock_t *ptl;
1681
Arjun Roy8efd6f5b2020-04-10 14:32:51 -07001682 retval = validate_page_before_insert(page);
1683 if (retval)
KAMEZAWA Hiroyuki5b4e6552008-10-18 20:28:10 -07001684 goto out;
Linus Torvalds238f58d2005-11-29 13:01:56 -08001685 retval = -ENOMEM;
Linus Torvaldsc9cfcdd2005-11-29 14:03:14 -08001686 pte = get_locked_pte(mm, addr, &ptl);
Linus Torvalds238f58d2005-11-29 13:01:56 -08001687 if (!pte)
KAMEZAWA Hiroyuki5b4e6552008-10-18 20:28:10 -07001688 goto out;
Arjun Roy8efd6f5b2020-04-10 14:32:51 -07001689 retval = insert_page_into_pte_locked(mm, pte, addr, page, prot);
Linus Torvalds238f58d2005-11-29 13:01:56 -08001690 pte_unmap_unlock(pte, ptl);
1691out:
1692 return retval;
1693}
1694
Arjun Roy8cd39842020-04-10 14:33:01 -07001695#ifdef pte_index
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001696static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
Arjun Roy8cd39842020-04-10 14:33:01 -07001697 unsigned long addr, struct page *page, pgprot_t prot)
1698{
1699 int err;
1700
1701 if (!page_count(page))
1702 return -EINVAL;
1703 err = validate_page_before_insert(page);
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001704 if (err)
1705 return err;
1706 return insert_page_into_pte_locked(mm, pte, addr, page, prot);
Arjun Roy8cd39842020-04-10 14:33:01 -07001707}
1708
1709/* insert_pages() amortizes the cost of spinlock operations
1710 * when inserting pages in a loop. Arch *must* define pte_index.
1711 */
1712static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1713 struct page **pages, unsigned long *num, pgprot_t prot)
1714{
1715 pmd_t *pmd = NULL;
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001716 pte_t *start_pte, *pte;
1717 spinlock_t *pte_lock;
Arjun Roy8cd39842020-04-10 14:33:01 -07001718 struct mm_struct *const mm = vma->vm_mm;
1719 unsigned long curr_page_idx = 0;
1720 unsigned long remaining_pages_total = *num;
1721 unsigned long pages_to_write_in_pmd;
1722 int ret;
1723more:
1724 ret = -EFAULT;
1725 pmd = walk_to_pmd(mm, addr);
1726 if (!pmd)
1727 goto out;
1728
1729 pages_to_write_in_pmd = min_t(unsigned long,
1730 remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1731
1732 /* Allocate the PTE if necessary; takes PMD lock once only. */
1733 ret = -ENOMEM;
1734 if (pte_alloc(mm, pmd))
1735 goto out;
Arjun Roy8cd39842020-04-10 14:33:01 -07001736
1737 while (pages_to_write_in_pmd) {
1738 int pte_idx = 0;
1739 const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1740
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001741 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
1742 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
1743 int err = insert_page_in_batch_locked(mm, pte,
Arjun Roy8cd39842020-04-10 14:33:01 -07001744 addr, pages[curr_page_idx], prot);
1745 if (unlikely(err)) {
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001746 pte_unmap_unlock(start_pte, pte_lock);
Arjun Roy8cd39842020-04-10 14:33:01 -07001747 ret = err;
1748 remaining_pages_total -= pte_idx;
1749 goto out;
1750 }
1751 addr += PAGE_SIZE;
1752 ++curr_page_idx;
1753 }
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001754 pte_unmap_unlock(start_pte, pte_lock);
Arjun Roy8cd39842020-04-10 14:33:01 -07001755 pages_to_write_in_pmd -= batch_size;
1756 remaining_pages_total -= batch_size;
1757 }
1758 if (remaining_pages_total)
1759 goto more;
1760 ret = 0;
1761out:
1762 *num = remaining_pages_total;
1763 return ret;
1764}
1765#endif /* ifdef pte_index */
1766
1767/**
1768 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1769 * @vma: user vma to map to
1770 * @addr: target start user address of these pages
1771 * @pages: source kernel pages
1772 * @num: in: number of pages to map. out: number of pages that were *not*
1773 * mapped. (0 means all pages were successfully mapped).
1774 *
1775 * Preferred over vm_insert_page() when inserting multiple pages.
1776 *
1777 * In case of error, we may have mapped a subset of the provided
1778 * pages. It is the caller's responsibility to account for this case.
1779 *
1780 * The same restrictions apply as in vm_insert_page().
1781 */
1782int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1783 struct page **pages, unsigned long *num)
1784{
1785#ifdef pte_index
1786 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1787
1788 if (addr < vma->vm_start || end_addr >= vma->vm_end)
1789 return -EFAULT;
1790 if (!(vma->vm_flags & VM_MIXEDMAP)) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001791 BUG_ON(mmap_read_trylock(vma->vm_mm));
Arjun Roy8cd39842020-04-10 14:33:01 -07001792 BUG_ON(vma->vm_flags & VM_PFNMAP);
1793 vma->vm_flags |= VM_MIXEDMAP;
1794 }
1795 /* Defer page refcount checking till we're about to map that page. */
1796 return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1797#else
1798 unsigned long idx = 0, pgcount = *num;
Tom Rix45779b02020-07-23 21:15:18 -07001799 int err = -EINVAL;
Arjun Roy8cd39842020-04-10 14:33:01 -07001800
1801 for (; idx < pgcount; ++idx) {
1802 err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1803 if (err)
1804 break;
1805 }
1806 *num = pgcount - idx;
1807 return err;
1808#endif /* ifdef pte_index */
1809}
1810EXPORT_SYMBOL(vm_insert_pages);
1811
Rolf Eike Beerbfa5bf62006-09-25 23:31:22 -07001812/**
1813 * vm_insert_page - insert single page into user vma
1814 * @vma: user vma to map to
1815 * @addr: target user address of this page
1816 * @page: source kernel page
1817 *
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001818 * This allows drivers to insert individual pages they've allocated
1819 * into a user vma.
1820 *
1821 * The page has to be a nice clean _individual_ kernel allocation.
1822 * If you allocate a compound page, you need to have marked it as
1823 * such (__GFP_COMP), or manually just split the page up yourself
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08001824 * (see split_page()).
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001825 *
1826 * NOTE! Traditionally this was done with "remap_pfn_range()" which
1827 * took an arbitrary page protection parameter. This doesn't allow
1828 * that. Your vma protection will have to be set up correctly, which
1829 * means that if you want a shared writable mapping, you'd better
1830 * ask for a shared writable mapping!
1831 *
1832 * The page does not need to be reserved.
Konstantin Khlebnikov4b6e1e32012-10-08 16:28:40 -07001833 *
1834 * Usually this function is called from f_op->mmap() handler
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001835 * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
Konstantin Khlebnikov4b6e1e32012-10-08 16:28:40 -07001836 * Caller must set VM_MIXEDMAP on vma if it wants to call this
1837 * function from other places, for example from page-fault handler.
Mike Rapoporta862f682019-03-05 15:48:42 -08001838 *
1839 * Return: %0 on success, negative error code otherwise.
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001840 */
Nick Piggin423bad602008-04-28 02:13:01 -07001841int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1842 struct page *page)
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001843{
1844 if (addr < vma->vm_start || addr >= vma->vm_end)
1845 return -EFAULT;
1846 if (!page_count(page))
1847 return -EINVAL;
Konstantin Khlebnikov4b6e1e32012-10-08 16:28:40 -07001848 if (!(vma->vm_flags & VM_MIXEDMAP)) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001849 BUG_ON(mmap_read_trylock(vma->vm_mm));
Konstantin Khlebnikov4b6e1e32012-10-08 16:28:40 -07001850 BUG_ON(vma->vm_flags & VM_PFNMAP);
1851 vma->vm_flags |= VM_MIXEDMAP;
1852 }
Nick Piggin423bad602008-04-28 02:13:01 -07001853 return insert_page(vma, addr, page, vma->vm_page_prot);
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001854}
Linus Torvaldse3c33742005-12-03 20:48:11 -08001855EXPORT_SYMBOL(vm_insert_page);
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001856
Souptick Joardera667d742019-05-13 17:21:56 -07001857/*
1858 * __vm_map_pages - maps range of kernel pages into user vma
1859 * @vma: user vma to map to
1860 * @pages: pointer to array of source kernel pages
1861 * @num: number of pages in page array
1862 * @offset: user's requested vm_pgoff
1863 *
1864 * This allows drivers to map range of kernel pages into a user vma.
1865 *
1866 * Return: 0 on success and error code otherwise.
1867 */
1868static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1869 unsigned long num, unsigned long offset)
1870{
1871 unsigned long count = vma_pages(vma);
1872 unsigned long uaddr = vma->vm_start;
1873 int ret, i;
1874
1875 /* Fail if the user requested offset is beyond the end of the object */
Miguel Ojeda96756fc2019-07-11 20:58:47 -07001876 if (offset >= num)
Souptick Joardera667d742019-05-13 17:21:56 -07001877 return -ENXIO;
1878
1879 /* Fail if the user requested size exceeds available object size */
1880 if (count > num - offset)
1881 return -ENXIO;
1882
1883 for (i = 0; i < count; i++) {
1884 ret = vm_insert_page(vma, uaddr, pages[offset + i]);
1885 if (ret < 0)
1886 return ret;
1887 uaddr += PAGE_SIZE;
1888 }
1889
1890 return 0;
1891}
1892
1893/**
1894 * vm_map_pages - maps range of kernel pages starts with non zero offset
1895 * @vma: user vma to map to
1896 * @pages: pointer to array of source kernel pages
1897 * @num: number of pages in page array
1898 *
1899 * Maps an object consisting of @num pages, catering for the user's
1900 * requested vm_pgoff
1901 *
1902 * If we fail to insert any page into the vma, the function will return
1903 * immediately leaving any previously inserted pages present. Callers
1904 * from the mmap handler may immediately return the error as their caller
1905 * will destroy the vma, removing any successfully inserted pages. Other
1906 * callers should make their own arrangements for calling unmap_region().
1907 *
1908 * Context: Process context. Called by mmap handlers.
1909 * Return: 0 on success and error code otherwise.
1910 */
1911int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1912 unsigned long num)
1913{
1914 return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
1915}
1916EXPORT_SYMBOL(vm_map_pages);
1917
1918/**
1919 * vm_map_pages_zero - map range of kernel pages starts with zero offset
1920 * @vma: user vma to map to
1921 * @pages: pointer to array of source kernel pages
1922 * @num: number of pages in page array
1923 *
1924 * Similar to vm_map_pages(), except that it explicitly sets the offset
1925 * to 0. This function is intended for the drivers that did not consider
1926 * vm_pgoff.
1927 *
1928 * Context: Process context. Called by mmap handlers.
1929 * Return: 0 on success and error code otherwise.
1930 */
1931int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
1932 unsigned long num)
1933{
1934 return __vm_map_pages(vma, pages, num, 0);
1935}
1936EXPORT_SYMBOL(vm_map_pages_zero);
1937
Matthew Wilcox9b5a8e02018-10-26 15:04:40 -07001938static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
Ross Zwislerb2770da62017-09-06 16:18:35 -07001939 pfn_t pfn, pgprot_t prot, bool mkwrite)
Nick Piggin423bad602008-04-28 02:13:01 -07001940{
1941 struct mm_struct *mm = vma->vm_mm;
Nick Piggin423bad602008-04-28 02:13:01 -07001942 pte_t *pte, entry;
1943 spinlock_t *ptl;
1944
Nick Piggin423bad602008-04-28 02:13:01 -07001945 pte = get_locked_pte(mm, addr, &ptl);
1946 if (!pte)
Matthew Wilcox9b5a8e02018-10-26 15:04:40 -07001947 return VM_FAULT_OOM;
Ross Zwislerb2770da62017-09-06 16:18:35 -07001948 if (!pte_none(*pte)) {
1949 if (mkwrite) {
1950 /*
1951 * For read faults on private mappings the PFN passed
1952 * in may not match the PFN we have mapped if the
1953 * mapped PFN is a writeable COW page. In the mkwrite
1954 * case we are creating a writable PTE for a shared
Jan Karaf2c57d92018-10-30 15:10:47 -07001955 * mapping and we expect the PFNs to match. If they
1956 * don't match, we are likely racing with block
1957 * allocation and mapping invalidation so just skip the
1958 * update.
Ross Zwislerb2770da62017-09-06 16:18:35 -07001959 */
Jan Karaf2c57d92018-10-30 15:10:47 -07001960 if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
1961 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
Ross Zwislerb2770da62017-09-06 16:18:35 -07001962 goto out_unlock;
Jan Karaf2c57d92018-10-30 15:10:47 -07001963 }
Jan Karacae85cb2019-03-28 20:43:19 -07001964 entry = pte_mkyoung(*pte);
Laurent Dufour32507b62018-04-17 16:33:18 +02001965 entry = maybe_mkwrite(pte_mkdirty(entry),
1966 vma->vm_flags);
Jan Karacae85cb2019-03-28 20:43:19 -07001967 if (ptep_set_access_flags(vma, addr, pte, entry, 1))
1968 update_mmu_cache(vma, addr, pte);
1969 }
1970 goto out_unlock;
Ross Zwislerb2770da62017-09-06 16:18:35 -07001971 }
Nick Piggin423bad602008-04-28 02:13:01 -07001972
1973 /* Ok, finally just insert the thing.. */
Dan Williams01c8f1c2016-01-15 16:56:40 -08001974 if (pfn_t_devmap(pfn))
1975 entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
1976 else
1977 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
Ross Zwislerb2770da62017-09-06 16:18:35 -07001978
Ross Zwislerb2770da62017-09-06 16:18:35 -07001979 if (mkwrite) {
1980 entry = pte_mkyoung(entry);
Laurent Dufour32507b62018-04-17 16:33:18 +02001981 entry = maybe_mkwrite(pte_mkdirty(entry), vma->vm_flags);
Ross Zwislerb2770da62017-09-06 16:18:35 -07001982 }
1983
Nick Piggin423bad602008-04-28 02:13:01 -07001984 set_pte_at(mm, addr, pte, entry);
Russell King4b3073e2009-12-18 16:40:18 +00001985 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
Nick Piggin423bad602008-04-28 02:13:01 -07001986
Nick Piggin423bad602008-04-28 02:13:01 -07001987out_unlock:
1988 pte_unmap_unlock(pte, ptl);
Matthew Wilcox9b5a8e02018-10-26 15:04:40 -07001989 return VM_FAULT_NOPAGE;
Nick Piggin423bad602008-04-28 02:13:01 -07001990}
1991
Matthew Wilcoxf5e6d1d2018-10-26 15:04:13 -07001992/**
1993 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
1994 * @vma: user vma to map to
1995 * @addr: target user address of this page
1996 * @pfn: source kernel pfn
1997 * @pgprot: pgprot flags for the inserted page
1998 *
Randy Dunlapa1a0aea2020-08-11 18:33:05 -07001999 * This is exactly like vmf_insert_pfn(), except that it allows drivers
Matthew Wilcoxf5e6d1d2018-10-26 15:04:13 -07002000 * to override pgprot on a per-page basis.
2001 *
2002 * This only makes sense for IO mappings, and it makes no sense for
2003 * COW mappings. In general, using multiple vmas is preferable;
Matthew Wilcoxae2b01f2018-10-26 15:04:29 -07002004 * vmf_insert_pfn_prot should only be used if using multiple VMAs is
Matthew Wilcoxf5e6d1d2018-10-26 15:04:13 -07002005 * impractical.
2006 *
Thomas Hellstrom574c5b32019-11-22 09:25:12 +01002007 * See vmf_insert_mixed_prot() for a discussion of the implication of using
2008 * a value of @pgprot different from that of @vma->vm_page_prot.
2009 *
Matthew Wilcoxae2b01f2018-10-26 15:04:29 -07002010 * Context: Process context. May allocate using %GFP_KERNEL.
Matthew Wilcoxf5e6d1d2018-10-26 15:04:13 -07002011 * Return: vm_fault_t value.
2012 */
2013vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2014 unsigned long pfn, pgprot_t pgprot)
2015{
Matthew Wilcox6d958542018-10-26 15:04:33 -07002016 /*
2017 * Technically, architectures with pte_special can avoid all these
2018 * restrictions (same for remap_pfn_range). However we would like
2019 * consistency in testing and feature parity among all, so we should
2020 * try to keep these invariants in place for everybody.
2021 */
2022 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2023 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2024 (VM_PFNMAP|VM_MIXEDMAP));
2025 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2026 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2027
2028 if (addr < vma->vm_start || addr >= vma->vm_end)
2029 return VM_FAULT_SIGBUS;
2030
2031 if (!pfn_modify_allowed(pfn, pgprot))
2032 return VM_FAULT_SIGBUS;
2033
2034 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2035
Matthew Wilcox9b5a8e02018-10-26 15:04:40 -07002036 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
Matthew Wilcox6d958542018-10-26 15:04:33 -07002037 false);
Matthew Wilcoxf5e6d1d2018-10-26 15:04:13 -07002038}
2039EXPORT_SYMBOL(vmf_insert_pfn_prot);
Nick Piggine0dc0d82007-02-12 00:51:36 -08002040
Matthew Wilcoxae2b01f2018-10-26 15:04:29 -07002041/**
2042 * vmf_insert_pfn - insert single pfn into user vma
2043 * @vma: user vma to map to
2044 * @addr: target user address of this page
2045 * @pfn: source kernel pfn
2046 *
2047 * Similar to vm_insert_page, this allows drivers to insert individual pages
2048 * they've allocated into a user vma. Same comments apply.
2049 *
2050 * This function should only be called from a vm_ops->fault handler, and
2051 * in that case the handler should return the result of this function.
2052 *
2053 * vma cannot be a COW mapping.
2054 *
2055 * As this is called only for pages that do not currently exist, we
2056 * do not need to flush old virtual caches or the TLB.
2057 *
2058 * Context: Process context. May allocate using %GFP_KERNEL.
2059 * Return: vm_fault_t value.
2060 */
2061vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2062 unsigned long pfn)
2063{
2064 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2065}
2066EXPORT_SYMBOL(vmf_insert_pfn);
2067
Dan Williams785a3fa2017-10-23 07:20:00 -07002068static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2069{
2070 /* these checks mirror the abort conditions in vm_normal_page */
2071 if (vma->vm_flags & VM_MIXEDMAP)
2072 return true;
2073 if (pfn_t_devmap(pfn))
2074 return true;
2075 if (pfn_t_special(pfn))
2076 return true;
2077 if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2078 return true;
2079 return false;
2080}
2081
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002082static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
Thomas Hellstrom574c5b32019-11-22 09:25:12 +01002083 unsigned long addr, pfn_t pfn, pgprot_t pgprot,
2084 bool mkwrite)
Nick Piggin423bad602008-04-28 02:13:01 -07002085{
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002086 int err;
Dan Williams87744ab2016-10-07 17:00:18 -07002087
Dan Williams785a3fa2017-10-23 07:20:00 -07002088 BUG_ON(!vm_mixed_ok(vma, pfn));
Nick Piggin423bad602008-04-28 02:13:01 -07002089
2090 if (addr < vma->vm_start || addr >= vma->vm_end)
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002091 return VM_FAULT_SIGBUS;
Borislav Petkov308a0472016-10-26 19:43:43 +02002092
2093 track_pfn_insert(vma, &pgprot, pfn);
Nick Piggin423bad602008-04-28 02:13:01 -07002094
Andi Kleen42e40892018-06-13 15:48:27 -07002095 if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002096 return VM_FAULT_SIGBUS;
Andi Kleen42e40892018-06-13 15:48:27 -07002097
Nick Piggin423bad602008-04-28 02:13:01 -07002098 /*
2099 * If we don't have pte special, then we have to use the pfn_valid()
2100 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2101 * refcount the page if pfn_valid is true (hence insert_page rather
Hugh Dickins62eede62009-09-21 17:03:34 -07002102 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
2103 * without pte special, it would there be refcounted as a normal page.
Nick Piggin423bad602008-04-28 02:13:01 -07002104 */
Laurent Dufour00b3a332018-06-07 17:06:12 -07002105 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2106 !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
Nick Piggin423bad602008-04-28 02:13:01 -07002107 struct page *page;
2108
Dan Williams03fc2da2016-01-26 09:48:05 -08002109 /*
2110 * At this point we are committed to insert_page()
2111 * regardless of whether the caller specified flags that
2112 * result in pfn_t_has_page() == false.
2113 */
2114 page = pfn_to_page(pfn_t_to_pfn(pfn));
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002115 err = insert_page(vma, addr, page, pgprot);
2116 } else {
Matthew Wilcox9b5a8e02018-10-26 15:04:40 -07002117 return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
Nick Piggin423bad602008-04-28 02:13:01 -07002118 }
Ross Zwislerb2770da62017-09-06 16:18:35 -07002119
Matthew Wilcox5d747632018-10-26 15:04:10 -07002120 if (err == -ENOMEM)
2121 return VM_FAULT_OOM;
2122 if (err < 0 && err != -EBUSY)
2123 return VM_FAULT_SIGBUS;
2124
2125 return VM_FAULT_NOPAGE;
Nick Piggin423bad602008-04-28 02:13:01 -07002126}
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002127
Thomas Hellstrom574c5b32019-11-22 09:25:12 +01002128/**
2129 * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
2130 * @vma: user vma to map to
2131 * @addr: target user address of this page
2132 * @pfn: source kernel pfn
2133 * @pgprot: pgprot flags for the inserted page
2134 *
Randy Dunlapa1a0aea2020-08-11 18:33:05 -07002135 * This is exactly like vmf_insert_mixed(), except that it allows drivers
Thomas Hellstrom574c5b32019-11-22 09:25:12 +01002136 * to override pgprot on a per-page basis.
2137 *
2138 * Typically this function should be used by drivers to set caching- and
2139 * encryption bits different than those of @vma->vm_page_prot, because
2140 * the caching- or encryption mode may not be known at mmap() time.
2141 * This is ok as long as @vma->vm_page_prot is not used by the core vm
2142 * to set caching and encryption bits for those vmas (except for COW pages).
2143 * This is ensured by core vm only modifying these page table entries using
2144 * functions that don't touch caching- or encryption bits, using pte_modify()
2145 * if needed. (See for example mprotect()).
2146 * Also when new page-table entries are created, this is only done using the
2147 * fault() callback, and never using the value of vma->vm_page_prot,
2148 * except for page-table entries that point to anonymous pages as the result
2149 * of COW.
2150 *
2151 * Context: Process context. May allocate using %GFP_KERNEL.
2152 * Return: vm_fault_t value.
2153 */
2154vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2155 pfn_t pfn, pgprot_t pgprot)
2156{
2157 return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
2158}
Thomas Hellstrom5379e4d2019-11-22 09:34:35 +01002159EXPORT_SYMBOL(vmf_insert_mixed_prot);
Thomas Hellstrom574c5b32019-11-22 09:25:12 +01002160
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002161vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2162 pfn_t pfn)
2163{
Thomas Hellstrom574c5b32019-11-22 09:25:12 +01002164 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002165}
Matthew Wilcox5d747632018-10-26 15:04:10 -07002166EXPORT_SYMBOL(vmf_insert_mixed);
Nick Piggin423bad602008-04-28 02:13:01 -07002167
Souptick Joarderab77dab2018-06-07 17:04:29 -07002168/*
2169 * If the insertion of PTE failed because someone else already added a
2170 * different entry in the mean time, we treat that as success as we assume
2171 * the same entry was actually inserted.
2172 */
Souptick Joarderab77dab2018-06-07 17:04:29 -07002173vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2174 unsigned long addr, pfn_t pfn)
Ross Zwislerb2770da62017-09-06 16:18:35 -07002175{
Thomas Hellstrom574c5b32019-11-22 09:25:12 +01002176 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
Ross Zwislerb2770da62017-09-06 16:18:35 -07002177}
Souptick Joarderab77dab2018-06-07 17:04:29 -07002178EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
Ross Zwislerb2770da62017-09-06 16:18:35 -07002179
Linus Torvaldsa145dd42005-11-30 09:35:19 -08002180/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 * maps a range of physical memory into the requested pages. the old
2182 * mappings are removed. any references to nonexistent pages results
2183 * in null mappings (currently treated as "copy-on-access")
2184 */
2185static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2186 unsigned long addr, unsigned long end,
2187 unsigned long pfn, pgprot_t prot)
2188{
2189 pte_t *pte;
Hugh Dickinsc74df322005-10-29 18:16:23 -07002190 spinlock_t *ptl;
Andi Kleen42e40892018-06-13 15:48:27 -07002191 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
Hugh Dickinsc74df322005-10-29 18:16:23 -07002193 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 if (!pte)
2195 return -ENOMEM;
Zachary Amsden6606c3e2006-09-30 23:29:33 -07002196 arch_enter_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 do {
2198 BUG_ON(!pte_none(*pte));
Andi Kleen42e40892018-06-13 15:48:27 -07002199 if (!pfn_modify_allowed(pfn, prot)) {
2200 err = -EACCES;
2201 break;
2202 }
Nick Piggin7e675132008-04-28 02:13:00 -07002203 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 pfn++;
2205 } while (pte++, addr += PAGE_SIZE, addr != end);
Zachary Amsden6606c3e2006-09-30 23:29:33 -07002206 arch_leave_lazy_mmu_mode();
Hugh Dickinsc74df322005-10-29 18:16:23 -07002207 pte_unmap_unlock(pte - 1, ptl);
Andi Kleen42e40892018-06-13 15:48:27 -07002208 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209}
2210
2211static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2212 unsigned long addr, unsigned long end,
2213 unsigned long pfn, pgprot_t prot)
2214{
2215 pmd_t *pmd;
2216 unsigned long next;
Andi Kleen42e40892018-06-13 15:48:27 -07002217 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218
2219 pfn -= addr >> PAGE_SHIFT;
2220 pmd = pmd_alloc(mm, pud, addr);
2221 if (!pmd)
2222 return -ENOMEM;
Andrea Arcangelif66055ab2011-01-13 15:46:54 -08002223 VM_BUG_ON(pmd_trans_huge(*pmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 do {
2225 next = pmd_addr_end(addr, end);
Andi Kleen42e40892018-06-13 15:48:27 -07002226 err = remap_pte_range(mm, pmd, addr, next,
2227 pfn + (addr >> PAGE_SHIFT), prot);
2228 if (err)
2229 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 } while (pmd++, addr = next, addr != end);
2231 return 0;
2232}
2233
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002234static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 unsigned long addr, unsigned long end,
2236 unsigned long pfn, pgprot_t prot)
2237{
2238 pud_t *pud;
2239 unsigned long next;
Andi Kleen42e40892018-06-13 15:48:27 -07002240 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241
2242 pfn -= addr >> PAGE_SHIFT;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002243 pud = pud_alloc(mm, p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 if (!pud)
2245 return -ENOMEM;
2246 do {
2247 next = pud_addr_end(addr, end);
Andi Kleen42e40892018-06-13 15:48:27 -07002248 err = remap_pmd_range(mm, pud, addr, next,
2249 pfn + (addr >> PAGE_SHIFT), prot);
2250 if (err)
2251 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 } while (pud++, addr = next, addr != end);
2253 return 0;
2254}
2255
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002256static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2257 unsigned long addr, unsigned long end,
2258 unsigned long pfn, pgprot_t prot)
2259{
2260 p4d_t *p4d;
2261 unsigned long next;
Andi Kleen42e40892018-06-13 15:48:27 -07002262 int err;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002263
2264 pfn -= addr >> PAGE_SHIFT;
2265 p4d = p4d_alloc(mm, pgd, addr);
2266 if (!p4d)
2267 return -ENOMEM;
2268 do {
2269 next = p4d_addr_end(addr, end);
Andi Kleen42e40892018-06-13 15:48:27 -07002270 err = remap_pud_range(mm, p4d, addr, next,
2271 pfn + (addr >> PAGE_SHIFT), prot);
2272 if (err)
2273 return err;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002274 } while (p4d++, addr = next, addr != end);
2275 return 0;
2276}
2277
Rolf Eike Beerbfa5bf62006-09-25 23:31:22 -07002278/**
2279 * remap_pfn_range - remap kernel memory to userspace
2280 * @vma: user vma to map to
Alex Zhang0c4123e2020-08-06 23:22:24 -07002281 * @addr: target page aligned user address to start at
WANG Wenhu86a76332020-04-01 21:09:03 -07002282 * @pfn: page frame number of kernel physical memory address
chenqiwu552657b2020-04-06 20:08:33 -07002283 * @size: size of mapping area
Rolf Eike Beerbfa5bf62006-09-25 23:31:22 -07002284 * @prot: page protection flags for this mapping
2285 *
Mike Rapoporta862f682019-03-05 15:48:42 -08002286 * Note: this is only safe if the mm semaphore is held when called.
2287 *
2288 * Return: %0 on success, negative error code otherwise.
Rolf Eike Beerbfa5bf62006-09-25 23:31:22 -07002289 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2291 unsigned long pfn, unsigned long size, pgprot_t prot)
2292{
2293 pgd_t *pgd;
2294 unsigned long next;
Hugh Dickins2d15cab2005-06-25 14:54:33 -07002295 unsigned long end = addr + PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 struct mm_struct *mm = vma->vm_mm;
Yongji Xied5957d22016-05-20 16:57:41 -07002297 unsigned long remap_pfn = pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 int err;
2299
Alex Zhang0c4123e2020-08-06 23:22:24 -07002300 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2301 return -EINVAL;
2302
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 /*
2304 * Physically remapped pages are special. Tell the
2305 * rest of the world about it:
2306 * VM_IO tells people not to look at these pages
2307 * (accesses can have side effects).
Linus Torvalds6aab3412005-11-28 14:34:23 -08002308 * VM_PFNMAP tells the core MM that the base pages are just
2309 * raw PFN mappings, and do not have a "struct page" associated
2310 * with them.
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07002311 * VM_DONTEXPAND
2312 * Disable vma merging and expanding with mremap().
2313 * VM_DONTDUMP
2314 * Omit vma from core dump, even when VM_IO turned off.
Linus Torvaldsfb155c12005-12-11 19:46:02 -08002315 *
2316 * There's a horrible special case to handle copy-on-write
2317 * behaviour that some programs depend on. We mark the "original"
2318 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07002319 * See vm_normal_page() for details.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 */
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07002321 if (is_cow_mapping(vma->vm_flags)) {
2322 if (addr != vma->vm_start || end != vma->vm_end)
2323 return -EINVAL;
Linus Torvaldsfb155c12005-12-11 19:46:02 -08002324 vma->vm_pgoff = pfn;
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07002325 }
2326
Yongji Xied5957d22016-05-20 16:57:41 -07002327 err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07002328 if (err)
venkatesh.pallipadi@intel.com3c8bb732008-12-18 11:41:27 -08002329 return -EINVAL;
Linus Torvaldsfb155c12005-12-11 19:46:02 -08002330
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07002331 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
2333 BUG_ON(addr >= end);
2334 pfn -= addr >> PAGE_SHIFT;
2335 pgd = pgd_offset(mm, addr);
2336 flush_cache_range(vma, addr, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 do {
2338 next = pgd_addr_end(addr, end);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002339 err = remap_p4d_range(mm, pgd, addr, next,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 pfn + (addr >> PAGE_SHIFT), prot);
2341 if (err)
2342 break;
2343 } while (pgd++, addr = next, addr != end);
venkatesh.pallipadi@intel.com2ab64032008-12-18 11:41:29 -08002344
2345 if (err)
Yongji Xied5957d22016-05-20 16:57:41 -07002346 untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
venkatesh.pallipadi@intel.com2ab64032008-12-18 11:41:29 -08002347
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 return err;
2349}
2350EXPORT_SYMBOL(remap_pfn_range);
2351
Linus Torvaldsb4cbb192013-04-16 13:45:37 -07002352/**
2353 * vm_iomap_memory - remap memory to userspace
2354 * @vma: user vma to map to
Wang Wenhuabd69b92020-04-01 21:09:07 -07002355 * @start: start of the physical memory to be mapped
Linus Torvaldsb4cbb192013-04-16 13:45:37 -07002356 * @len: size of area
2357 *
2358 * This is a simplified io_remap_pfn_range() for common driver use. The
2359 * driver just needs to give us the physical memory range to be mapped,
2360 * we'll figure out the rest from the vma information.
2361 *
2362 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2363 * whatever write-combining details or similar.
Mike Rapoporta862f682019-03-05 15:48:42 -08002364 *
2365 * Return: %0 on success, negative error code otherwise.
Linus Torvaldsb4cbb192013-04-16 13:45:37 -07002366 */
2367int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2368{
2369 unsigned long vm_len, pfn, pages;
2370
2371 /* Check that the physical memory area passed in looks valid */
2372 if (start + len < start)
2373 return -EINVAL;
2374 /*
2375 * You *really* shouldn't map things that aren't page-aligned,
2376 * but we've historically allowed it because IO memory might
2377 * just have smaller alignment.
2378 */
2379 len += start & ~PAGE_MASK;
2380 pfn = start >> PAGE_SHIFT;
2381 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2382 if (pfn + pages < pfn)
2383 return -EINVAL;
2384
2385 /* We start the mapping 'vm_pgoff' pages into the area */
2386 if (vma->vm_pgoff > pages)
2387 return -EINVAL;
2388 pfn += vma->vm_pgoff;
2389 pages -= vma->vm_pgoff;
2390
2391 /* Can we fit all of the mapping? */
2392 vm_len = vma->vm_end - vma->vm_start;
2393 if (vm_len >> PAGE_SHIFT > pages)
2394 return -EINVAL;
2395
2396 /* Ok, let it rip */
2397 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2398}
2399EXPORT_SYMBOL(vm_iomap_memory);
2400
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002401static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2402 unsigned long addr, unsigned long end,
Joerg Roedele80d3902020-09-04 16:35:43 -07002403 pte_fn_t fn, void *data, bool create,
2404 pgtbl_mod_mask *mask)
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002405{
2406 pte_t *pte;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002407 int err = 0;
Kees Cook3f649ab2020-06-03 13:09:38 -07002408 spinlock_t *ptl;
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002409
Daniel Axtensbe1db472019-12-17 20:51:41 -08002410 if (create) {
2411 pte = (mm == &init_mm) ?
Joerg Roedele80d3902020-09-04 16:35:43 -07002412 pte_alloc_kernel_track(pmd, addr, mask) :
Daniel Axtensbe1db472019-12-17 20:51:41 -08002413 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2414 if (!pte)
2415 return -ENOMEM;
2416 } else {
2417 pte = (mm == &init_mm) ?
2418 pte_offset_kernel(pmd, addr) :
2419 pte_offset_map_lock(mm, pmd, addr, &ptl);
2420 }
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002421
2422 BUG_ON(pmd_huge(*pmd));
2423
Jeremy Fitzhardinge38e0edb2009-01-06 14:39:21 -08002424 arch_enter_lazy_mmu_mode();
2425
Christoph Hellwigeeb4a052020-10-17 16:15:14 -07002426 if (fn) {
2427 do {
2428 if (create || !pte_none(*pte)) {
2429 err = fn(pte++, addr, data);
2430 if (err)
2431 break;
2432 }
2433 } while (addr += PAGE_SIZE, addr != end);
2434 }
Joerg Roedele80d3902020-09-04 16:35:43 -07002435 *mask |= PGTBL_PTE_MODIFIED;
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002436
Jeremy Fitzhardinge38e0edb2009-01-06 14:39:21 -08002437 arch_leave_lazy_mmu_mode();
2438
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002439 if (mm != &init_mm)
2440 pte_unmap_unlock(pte-1, ptl);
2441 return err;
2442}
2443
2444static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2445 unsigned long addr, unsigned long end,
Joerg Roedele80d3902020-09-04 16:35:43 -07002446 pte_fn_t fn, void *data, bool create,
2447 pgtbl_mod_mask *mask)
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002448{
2449 pmd_t *pmd;
2450 unsigned long next;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002451 int err = 0;
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002452
Andi Kleenceb86872008-07-23 21:27:50 -07002453 BUG_ON(pud_huge(*pud));
2454
Daniel Axtensbe1db472019-12-17 20:51:41 -08002455 if (create) {
Joerg Roedele80d3902020-09-04 16:35:43 -07002456 pmd = pmd_alloc_track(mm, pud, addr, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002457 if (!pmd)
2458 return -ENOMEM;
2459 } else {
2460 pmd = pmd_offset(pud, addr);
2461 }
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002462 do {
2463 next = pmd_addr_end(addr, end);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002464 if (create || !pmd_none_or_clear_bad(pmd)) {
2465 err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
Joerg Roedele80d3902020-09-04 16:35:43 -07002466 create, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002467 if (err)
2468 break;
2469 }
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002470 } while (pmd++, addr = next, addr != end);
2471 return err;
2472}
2473
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002474static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002475 unsigned long addr, unsigned long end,
Joerg Roedele80d3902020-09-04 16:35:43 -07002476 pte_fn_t fn, void *data, bool create,
2477 pgtbl_mod_mask *mask)
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002478{
2479 pud_t *pud;
2480 unsigned long next;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002481 int err = 0;
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002482
Daniel Axtensbe1db472019-12-17 20:51:41 -08002483 if (create) {
Joerg Roedele80d3902020-09-04 16:35:43 -07002484 pud = pud_alloc_track(mm, p4d, addr, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002485 if (!pud)
2486 return -ENOMEM;
2487 } else {
2488 pud = pud_offset(p4d, addr);
2489 }
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002490 do {
2491 next = pud_addr_end(addr, end);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002492 if (create || !pud_none_or_clear_bad(pud)) {
2493 err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
Joerg Roedele80d3902020-09-04 16:35:43 -07002494 create, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002495 if (err)
2496 break;
2497 }
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002498 } while (pud++, addr = next, addr != end);
2499 return err;
2500}
2501
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002502static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2503 unsigned long addr, unsigned long end,
Joerg Roedele80d3902020-09-04 16:35:43 -07002504 pte_fn_t fn, void *data, bool create,
2505 pgtbl_mod_mask *mask)
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002506{
2507 p4d_t *p4d;
2508 unsigned long next;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002509 int err = 0;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002510
Daniel Axtensbe1db472019-12-17 20:51:41 -08002511 if (create) {
Joerg Roedele80d3902020-09-04 16:35:43 -07002512 p4d = p4d_alloc_track(mm, pgd, addr, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002513 if (!p4d)
2514 return -ENOMEM;
2515 } else {
2516 p4d = p4d_offset(pgd, addr);
2517 }
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002518 do {
2519 next = p4d_addr_end(addr, end);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002520 if (create || !p4d_none_or_clear_bad(p4d)) {
2521 err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
Joerg Roedele80d3902020-09-04 16:35:43 -07002522 create, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002523 if (err)
2524 break;
2525 }
2526 } while (p4d++, addr = next, addr != end);
2527 return err;
2528}
2529
2530static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2531 unsigned long size, pte_fn_t fn,
2532 void *data, bool create)
2533{
2534 pgd_t *pgd;
Joerg Roedele80d3902020-09-04 16:35:43 -07002535 unsigned long start = addr, next;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002536 unsigned long end = addr + size;
Joerg Roedele80d3902020-09-04 16:35:43 -07002537 pgtbl_mod_mask mask = 0;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002538 int err = 0;
2539
2540 if (WARN_ON(addr >= end))
2541 return -EINVAL;
2542
2543 pgd = pgd_offset(mm, addr);
2544 do {
2545 next = pgd_addr_end(addr, end);
2546 if (!create && pgd_none_or_clear_bad(pgd))
2547 continue;
Joerg Roedele80d3902020-09-04 16:35:43 -07002548 err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002549 if (err)
2550 break;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002551 } while (pgd++, addr = next, addr != end);
2552
Joerg Roedele80d3902020-09-04 16:35:43 -07002553 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2554 arch_sync_kernel_mappings(start, start + size);
2555
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002556 return err;
2557}
2558
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002559/*
2560 * Scan a region of virtual memory, filling in page tables as necessary
2561 * and calling a provided function on each leaf page table.
2562 */
2563int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2564 unsigned long size, pte_fn_t fn, void *data)
2565{
Daniel Axtensbe1db472019-12-17 20:51:41 -08002566 return __apply_to_page_range(mm, addr, size, fn, data, true);
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002567}
2568EXPORT_SYMBOL_GPL(apply_to_page_range);
2569
Laurent Dufourb23ffc12018-04-17 16:33:11 +02002570static inline bool pte_spinlock(struct vm_fault *vmf)
2571{
2572 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
2573 spin_lock(vmf->ptl);
2574 return true;
2575}
2576
Peter Zijlstraafeec972018-04-17 16:33:10 +02002577static inline bool pte_map_lock(struct vm_fault *vmf)
2578{
2579 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
2580 vmf->address, &vmf->ptl);
2581 return true;
2582}
2583
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584/*
Daniel Axtensbe1db472019-12-17 20:51:41 -08002585 * Scan a region of virtual memory, calling a provided function on
2586 * each leaf page table where it exists.
2587 *
2588 * Unlike apply_to_page_range, this does _not_ fill in page tables
2589 * where they are absent.
2590 */
2591int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2592 unsigned long size, pte_fn_t fn, void *data)
2593{
2594 return __apply_to_page_range(mm, addr, size, fn, data, false);
2595}
2596EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2597
2598/*
Kirill A. Shutemov9b4bdd22015-02-10 14:09:51 -08002599 * handle_pte_fault chooses page fault handler according to an entry which was
2600 * read non-atomically. Before making any commitment, on those architectures
2601 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2602 * parts, do_swap_page must check under lock before unmapping the pte and
2603 * proceeding (but do_wp_page is only called after already making such a check;
Ryota Ozakia335b2e2011-02-10 13:56:28 +09002604 * and do_anonymous_page can safely check later on).
Laurent Dufour5835d872018-04-17 16:33:12 +02002605 *
2606 * pte_unmap_same() returns:
2607 * 0 if the PTE are the same
2608 * VM_FAULT_PTNOTSAME if the PTE are different
2609 * VM_FAULT_RETRY if the VMA has changed in our back during
2610 * a speculative page fault handling.
Hugh Dickins8f4e2102005-10-29 18:16:26 -07002611 */
Laurent Dufour5835d872018-04-17 16:33:12 +02002612static inline int pte_unmap_same(struct vm_fault *vmf)
Hugh Dickins8f4e2102005-10-29 18:16:26 -07002613{
Laurent Dufour5835d872018-04-17 16:33:12 +02002614 int ret = 0;
2615
2616#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
Hugh Dickins8f4e2102005-10-29 18:16:26 -07002617 if (sizeof(pte_t) > sizeof(unsigned long)) {
Laurent Dufour5835d872018-04-17 16:33:12 +02002618 if (pte_spinlock(vmf)) {
2619 if (!pte_same(*vmf->pte, vmf->orig_pte))
2620 ret = VM_FAULT_PTNOTSAME;
2621 spin_unlock(vmf->ptl);
2622 } else
2623 ret = VM_FAULT_RETRY;
Hugh Dickins8f4e2102005-10-29 18:16:26 -07002624 }
2625#endif
Laurent Dufour5835d872018-04-17 16:33:12 +02002626 pte_unmap(vmf->pte);
2627 return ret;
Hugh Dickins8f4e2102005-10-29 18:16:26 -07002628}
2629
Jia He83d116c2019-10-11 22:09:39 +08002630static inline bool cow_user_page(struct page *dst, struct page *src,
2631 struct vm_fault *vmf)
Linus Torvalds6aab3412005-11-28 14:34:23 -08002632{
Jia He83d116c2019-10-11 22:09:39 +08002633 bool ret;
2634 void *kaddr;
2635 void __user *uaddr;
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002636 bool locked = false;
Jia He83d116c2019-10-11 22:09:39 +08002637 struct vm_area_struct *vma = vmf->vma;
2638 struct mm_struct *mm = vma->vm_mm;
2639 unsigned long addr = vmf->address;
2640
Jia He83d116c2019-10-11 22:09:39 +08002641 if (likely(src)) {
2642 copy_user_highpage(dst, src, addr, vma);
2643 return true;
2644 }
2645
Linus Torvalds6aab3412005-11-28 14:34:23 -08002646 /*
2647 * If the source page was a PFN mapping, we don't have
2648 * a "struct page" for it. We do a best-effort copy by
2649 * just copying from the original user address. If that
2650 * fails, we just zero-fill it. Live with it.
2651 */
Jia He83d116c2019-10-11 22:09:39 +08002652 kaddr = kmap_atomic(dst);
2653 uaddr = (void __user *)(addr & PAGE_MASK);
Linus Torvalds5d2a2dbbc2005-11-29 14:07:55 -08002654
Jia He83d116c2019-10-11 22:09:39 +08002655 /*
2656 * On architectures with software "accessed" bits, we would
2657 * take a double page fault, so mark it accessed here.
2658 */
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002659 if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
Jia He83d116c2019-10-11 22:09:39 +08002660 pte_t entry;
2661
2662 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002663 locked = true;
Jia He83d116c2019-10-11 22:09:39 +08002664 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2665 /*
2666 * Other thread has already handled the fault
Bibo Mao7df67692020-05-27 10:25:18 +08002667 * and update local tlb only
Jia He83d116c2019-10-11 22:09:39 +08002668 */
Bibo Mao7df67692020-05-27 10:25:18 +08002669 update_mmu_tlb(vma, addr, vmf->pte);
Jia He83d116c2019-10-11 22:09:39 +08002670 ret = false;
2671 goto pte_unlock;
2672 }
2673
2674 entry = pte_mkyoung(vmf->orig_pte);
2675 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2676 update_mmu_cache(vma, addr, vmf->pte);
2677 }
2678
2679 /*
2680 * This really shouldn't fail, because the page is there
2681 * in the page tables. But it might just be unreadable,
2682 * in which case we just give up and fill the result with
2683 * zeroes.
2684 */
2685 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002686 if (locked)
2687 goto warn;
2688
2689 /* Re-validate under PTL if the page is still mapped */
2690 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2691 locked = true;
2692 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
Bibo Mao7df67692020-05-27 10:25:18 +08002693 /* The PTE changed under us, update local tlb */
2694 update_mmu_tlb(vma, addr, vmf->pte);
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002695 ret = false;
2696 goto pte_unlock;
2697 }
2698
Linus Torvalds5d2a2dbbc2005-11-29 14:07:55 -08002699 /*
Ethon Paul985ba002020-06-04 16:49:43 -07002700 * The same page can be mapped back since last copy attempt.
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002701 * Try to copy again under PTL.
Linus Torvalds5d2a2dbbc2005-11-29 14:07:55 -08002702 */
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002703 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2704 /*
2705 * Give a warn in case there can be some obscure
2706 * use-case
2707 */
2708warn:
2709 WARN_ON_ONCE(1);
2710 clear_page(kaddr);
2711 }
Jia He83d116c2019-10-11 22:09:39 +08002712 }
2713
2714 ret = true;
2715
2716pte_unlock:
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002717 if (locked)
Jia He83d116c2019-10-11 22:09:39 +08002718 pte_unmap_unlock(vmf->pte, vmf->ptl);
2719 kunmap_atomic(kaddr);
2720 flush_dcache_page(dst);
2721
2722 return ret;
Linus Torvalds6aab3412005-11-28 14:34:23 -08002723}
2724
Michal Hockoc20cd452016-01-14 15:20:12 -08002725static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2726{
2727 struct file *vm_file = vma->vm_file;
2728
2729 if (vm_file)
2730 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2731
2732 /*
2733 * Special mappings (e.g. VDSO) do not have any file so fake
2734 * a default GFP_KERNEL for them.
2735 */
2736 return GFP_KERNEL;
2737}
2738
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739/*
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07002740 * Notify the address space that the page is about to become writable so that
2741 * it can prohibit this or wait for the page to get into an appropriate state.
2742 *
2743 * We do this without the lock held, so that it can sleep if it needs to.
2744 */
Souptick Joarder2b740302018-08-23 17:01:36 -07002745static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07002746{
Souptick Joarder2b740302018-08-23 17:01:36 -07002747 vm_fault_t ret;
Jan Kara38b8cb72016-12-14 15:07:30 -08002748 struct page *page = vmf->page;
2749 unsigned int old_flags = vmf->flags;
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07002750
Jan Kara38b8cb72016-12-14 15:07:30 -08002751 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07002752
Darrick J. Wongdc617f22019-08-20 07:55:16 -07002753 if (vmf->vma->vm_file &&
2754 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2755 return VM_FAULT_SIGBUS;
2756
Dave Jiang11bac802017-02-24 14:56:41 -08002757 ret = vmf->vma->vm_ops->page_mkwrite(vmf);
Jan Kara38b8cb72016-12-14 15:07:30 -08002758 /* Restore original flags so that caller is not surprised */
2759 vmf->flags = old_flags;
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07002760 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2761 return ret;
2762 if (unlikely(!(ret & VM_FAULT_LOCKED))) {
2763 lock_page(page);
2764 if (!page->mapping) {
2765 unlock_page(page);
2766 return 0; /* retry */
2767 }
2768 ret |= VM_FAULT_LOCKED;
2769 } else
2770 VM_BUG_ON_PAGE(!PageLocked(page), page);
2771 return ret;
2772}
2773
2774/*
Jan Kara97ba0c22016-12-14 15:07:27 -08002775 * Handle dirtying of a page in shared file mapping on a write fault.
2776 *
2777 * The function expects the page to be locked and unlocks it.
2778 */
Johannes Weiner89b15332019-11-30 17:50:22 -08002779static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
Jan Kara97ba0c22016-12-14 15:07:27 -08002780{
Johannes Weiner89b15332019-11-30 17:50:22 -08002781 struct vm_area_struct *vma = vmf->vma;
Jan Kara97ba0c22016-12-14 15:07:27 -08002782 struct address_space *mapping;
Johannes Weiner89b15332019-11-30 17:50:22 -08002783 struct page *page = vmf->page;
Jan Kara97ba0c22016-12-14 15:07:27 -08002784 bool dirtied;
2785 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2786
2787 dirtied = set_page_dirty(page);
2788 VM_BUG_ON_PAGE(PageAnon(page), page);
2789 /*
2790 * Take a local copy of the address_space - page.mapping may be zeroed
2791 * by truncate after unlock_page(). The address_space itself remains
2792 * pinned by vma->vm_file's reference. We rely on unlock_page()'s
2793 * release semantics to prevent the compiler from undoing this copying.
2794 */
2795 mapping = page_rmapping(page);
2796 unlock_page(page);
2797
Jan Kara97ba0c22016-12-14 15:07:27 -08002798 if (!page_mkwrite)
2799 file_update_time(vma->vm_file);
Johannes Weiner89b15332019-11-30 17:50:22 -08002800
2801 /*
2802 * Throttle page dirtying rate down to writeback speed.
2803 *
2804 * mapping may be NULL here because some device drivers do not
2805 * set page.mapping but still dirty their pages
2806 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002807 * Drop the mmap_lock before waiting on IO, if we can. The file
Johannes Weiner89b15332019-11-30 17:50:22 -08002808 * is pinning the mapping, as per above.
2809 */
2810 if ((dirtied || page_mkwrite) && mapping) {
2811 struct file *fpin;
2812
2813 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2814 balance_dirty_pages_ratelimited(mapping);
2815 if (fpin) {
2816 fput(fpin);
2817 return VM_FAULT_RETRY;
2818 }
2819 }
2820
2821 return 0;
Jan Kara97ba0c22016-12-14 15:07:27 -08002822}
2823
2824/*
Shachar Raindel4e047f82015-04-14 15:46:25 -07002825 * Handle write page faults for pages that can be reused in the current vma
2826 *
2827 * This can happen either due to the mapping being with the VM_SHARED flag,
2828 * or due to us being the last reference standing to the page. In either
2829 * case, all we need to do here is to mark the page as writable and update
2830 * any related book-keeping.
2831 */
Jan Kara997dd982016-12-14 15:07:36 -08002832static inline void wp_page_reuse(struct vm_fault *vmf)
Jan Kara82b0f8c2016-12-14 15:06:58 -08002833 __releases(vmf->ptl)
Shachar Raindel4e047f82015-04-14 15:46:25 -07002834{
Jan Kara82b0f8c2016-12-14 15:06:58 -08002835 struct vm_area_struct *vma = vmf->vma;
Jan Karaa41b70d2016-12-14 15:07:33 -08002836 struct page *page = vmf->page;
Shachar Raindel4e047f82015-04-14 15:46:25 -07002837 pte_t entry;
2838 /*
2839 * Clear the pages cpupid information as the existing
2840 * information potentially belongs to a now completely
2841 * unrelated process.
2842 */
2843 if (page)
2844 page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
2845
Jan Kara29943022016-12-14 15:07:16 -08002846 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2847 entry = pte_mkyoung(vmf->orig_pte);
Laurent Dufour32507b62018-04-17 16:33:18 +02002848 entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags);
Jan Kara82b0f8c2016-12-14 15:06:58 -08002849 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
2850 update_mmu_cache(vma, vmf->address, vmf->pte);
2851 pte_unmap_unlock(vmf->pte, vmf->ptl);
Peter Xu798a6b82020-08-21 19:49:58 -04002852 count_vm_event(PGREUSE);
Shachar Raindel4e047f82015-04-14 15:46:25 -07002853}
2854
2855/*
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002856 * Handle the case of a page which we actually need to copy to a new page.
2857 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002858 * Called with mmap_lock locked and the old page referenced, but
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002859 * without the ptl held.
2860 *
2861 * High level logic flow:
2862 *
2863 * - Allocate a page, copy the content of the old page to the new one.
2864 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
2865 * - Take the PTL. If the pte changed, bail out and release the allocated page
2866 * - If the pte is still the way we remember it, update the page table and all
2867 * relevant references. This includes dropping the reference the page-table
2868 * held to the old page, as well as updating the rmap.
2869 * - In any case, unlock the PTL and drop the reference we took to the old page.
2870 */
Souptick Joarder2b740302018-08-23 17:01:36 -07002871static vm_fault_t wp_page_copy(struct vm_fault *vmf)
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002872{
Jan Kara82b0f8c2016-12-14 15:06:58 -08002873 struct vm_area_struct *vma = vmf->vma;
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07002874 struct mm_struct *mm = vma->vm_mm;
Jan Karaa41b70d2016-12-14 15:07:33 -08002875 struct page *old_page = vmf->page;
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002876 struct page *new_page = NULL;
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002877 pte_t entry;
2878 int page_copied = 0;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002879 struct mmu_notifier_range range;
Peter Zijlstraafeec972018-04-17 16:33:10 +02002880 int ret = VM_FAULT_OOM;
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002881
2882 if (unlikely(anon_vma_prepare(vma)))
Peter Zijlstraafeec972018-04-17 16:33:10 +02002883 goto out;
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002884
Jan Kara29943022016-12-14 15:07:16 -08002885 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08002886 new_page = alloc_zeroed_user_highpage_movable(vma,
2887 vmf->address);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002888 if (!new_page)
Peter Zijlstraafeec972018-04-17 16:33:10 +02002889 goto out;
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002890 } else {
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07002891 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
Jan Kara82b0f8c2016-12-14 15:06:58 -08002892 vmf->address);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002893 if (!new_page)
Peter Zijlstraafeec972018-04-17 16:33:10 +02002894 goto out;
Jia He83d116c2019-10-11 22:09:39 +08002895
2896 if (!cow_user_page(new_page, old_page, vmf)) {
2897 /*
2898 * COW failed, if the fault was solved by other,
2899 * it's fine. If not, userspace would re-fault on
2900 * the same address and we will handle the fault
2901 * from the second attempt.
2902 */
2903 put_page(new_page);
2904 if (old_page)
2905 put_page(old_page);
2906 return 0;
2907 }
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002908 }
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002909
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07002910 if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
Peter Zijlstraafeec972018-04-17 16:33:10 +02002911 goto out_free_new;
Johannes Weiner9d82c692020-06-03 16:02:04 -07002912 cgroup_throttle_swaprate(new_page, GFP_KERNEL);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002913
Mel Gormaneb3c24f2015-06-24 16:57:27 -07002914 __SetPageUptodate(new_page);
2915
Jérôme Glisse7269f992019-05-13 17:20:53 -07002916 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07002917 vmf->address & PAGE_MASK,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002918 (vmf->address & PAGE_MASK) + PAGE_SIZE);
2919 mmu_notifier_invalidate_range_start(&range);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002920
2921 /*
2922 * Re-check the pte - we dropped the lock
2923 */
Peter Zijlstraafeec972018-04-17 16:33:10 +02002924 if (!pte_map_lock(vmf)) {
2925 ret = VM_FAULT_RETRY;
2926 goto out_free_new;
2927 }
Jan Kara29943022016-12-14 15:07:16 -08002928 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002929 if (old_page) {
2930 if (!PageAnon(old_page)) {
Jerome Marchandeca56ff2016-01-14 15:19:26 -08002931 dec_mm_counter_fast(mm,
2932 mm_counter_file(old_page));
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002933 inc_mm_counter_fast(mm, MM_ANONPAGES);
2934 }
2935 } else {
2936 inc_mm_counter_fast(mm, MM_ANONPAGES);
2937 }
Jan Kara29943022016-12-14 15:07:16 -08002938 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
Laurent Dufour32507b62018-04-17 16:33:18 +02002939 entry = mk_pte(new_page, vmf->vma_page_prot);
Bibo Mao44bf4312020-05-27 10:25:19 +08002940 entry = pte_sw_mkyoung(entry);
Laurent Dufour32507b62018-04-17 16:33:18 +02002941 entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002942 /*
2943 * Clear the pte entry and flush it first, before updating the
2944 * pte with the new entry. This will avoid a race condition
2945 * seen in the presence of one thread doing SMC and another
2946 * thread doing COW.
2947 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08002948 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
2949 page_add_new_anon_rmap(new_page, vma, vmf->address, false);
Joonsoo Kimb5181542020-08-11 18:30:40 -07002950 lru_cache_add_inactive_or_unevictable(new_page, vma);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002951 /*
2952 * We call the notify macro here because, when using secondary
2953 * mmu page tables (such as kvm shadow page tables), we want the
2954 * new page to be mapped directly into the secondary page table.
2955 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08002956 set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
2957 update_mmu_cache(vma, vmf->address, vmf->pte);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002958 if (old_page) {
2959 /*
2960 * Only after switching the pte to the new page may
2961 * we remove the mapcount here. Otherwise another
2962 * process may come and find the rmap count decremented
2963 * before the pte is switched to the new page, and
2964 * "reuse" the old page writing into it while our pte
2965 * here still points into it and can be read by other
2966 * threads.
2967 *
2968 * The critical issue is to order this
2969 * page_remove_rmap with the ptp_clear_flush above.
2970 * Those stores are ordered by (if nothing else,)
2971 * the barrier present in the atomic_add_negative
2972 * in page_remove_rmap.
2973 *
2974 * Then the TLB flush in ptep_clear_flush ensures that
2975 * no process can access the old page before the
2976 * decremented mapcount is visible. And the old page
2977 * cannot be reused until after the decremented
2978 * mapcount is visible. So transitively, TLBs to
2979 * old page will be flushed before it can be reused.
2980 */
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08002981 page_remove_rmap(old_page, false);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002982 }
2983
2984 /* Free the old page.. */
2985 new_page = old_page;
2986 page_copied = 1;
2987 } else {
Bibo Mao7df67692020-05-27 10:25:18 +08002988 update_mmu_tlb(vma, vmf->address, vmf->pte);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002989 }
2990
2991 if (new_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002992 put_page(new_page);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002993
Jan Kara82b0f8c2016-12-14 15:06:58 -08002994 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08002995 /*
2996 * No need to double call mmu_notifier->invalidate_range() callback as
2997 * the above ptep_clear_flush_notify() did already call it.
2998 */
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002999 mmu_notifier_invalidate_range_only_end(&range);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07003000 if (old_page) {
3001 /*
3002 * Don't let another task, with possibly unlocked vma,
3003 * keep the mlocked page.
3004 */
Laurent Dufour32507b62018-04-17 16:33:18 +02003005 if (page_copied && (vmf->vma_flags & VM_LOCKED)) {
Shachar Raindel2f38ab22015-04-14 15:46:32 -07003006 lock_page(old_page); /* LRU manipulation */
Kirill A. Shutemove90309c2016-01-15 16:54:33 -08003007 if (PageMlocked(old_page))
3008 munlock_vma_page(old_page);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07003009 unlock_page(old_page);
3010 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003011 put_page(old_page);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07003012 }
3013 return page_copied ? VM_FAULT_WRITE : 0;
Peter Zijlstraafeec972018-04-17 16:33:10 +02003014out_free_new:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003015 put_page(new_page);
Peter Zijlstraafeec972018-04-17 16:33:10 +02003016out:
Shachar Raindel2f38ab22015-04-14 15:46:32 -07003017 if (old_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003018 put_page(old_page);
Peter Zijlstraafeec972018-04-17 16:33:10 +02003019 return ret;
Shachar Raindel2f38ab22015-04-14 15:46:32 -07003020}
3021
Jan Kara66a61972016-12-14 15:07:39 -08003022/**
3023 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3024 * writeable once the page is prepared
3025 *
3026 * @vmf: structure describing the fault
3027 *
3028 * This function handles all that is needed to finish a write page fault in a
3029 * shared mapping due to PTE being read-only once the mapped page is prepared.
Mike Rapoporta862f682019-03-05 15:48:42 -08003030 * It handles locking of PTE and modifying it.
Jan Kara66a61972016-12-14 15:07:39 -08003031 *
3032 * The function expects the page to be locked or other protection against
3033 * concurrent faults / writeback (such as DAX radix tree locks).
Mike Rapoporta862f682019-03-05 15:48:42 -08003034 *
3035 * Return: %VM_FAULT_WRITE on success, %0 when PTE got changed before
3036 * we acquired PTE lock.
Jan Kara66a61972016-12-14 15:07:39 -08003037 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003038vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
Jan Kara66a61972016-12-14 15:07:39 -08003039{
Laurent Dufour32507b62018-04-17 16:33:18 +02003040 WARN_ON_ONCE(!(vmf->vma_flags & VM_SHARED));
Peter Zijlstraafeec972018-04-17 16:33:10 +02003041 if (!pte_map_lock(vmf))
3042 return VM_FAULT_RETRY;
Jan Kara66a61972016-12-14 15:07:39 -08003043 /*
3044 * We might have raced with another page fault while we released the
3045 * pte_offset_map_lock.
3046 */
3047 if (!pte_same(*vmf->pte, vmf->orig_pte)) {
Bibo Mao7df67692020-05-27 10:25:18 +08003048 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
Jan Kara66a61972016-12-14 15:07:39 -08003049 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jan Karaa19e2552016-12-14 15:07:42 -08003050 return VM_FAULT_NOPAGE;
Jan Kara66a61972016-12-14 15:07:39 -08003051 }
3052 wp_page_reuse(vmf);
Jan Karaa19e2552016-12-14 15:07:42 -08003053 return 0;
Jan Kara66a61972016-12-14 15:07:39 -08003054}
3055
Boaz Harroshdd906182015-04-15 16:15:11 -07003056/*
3057 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3058 * mapping
3059 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003060static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
Boaz Harroshdd906182015-04-15 16:15:11 -07003061{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003062 struct vm_area_struct *vma = vmf->vma;
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003063
Boaz Harroshdd906182015-04-15 16:15:11 -07003064 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
Souptick Joarder2b740302018-08-23 17:01:36 -07003065 vm_fault_t ret;
Boaz Harroshdd906182015-04-15 16:15:11 -07003066
Jan Kara82b0f8c2016-12-14 15:06:58 -08003067 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jan Karafe822212016-12-14 15:07:13 -08003068 vmf->flags |= FAULT_FLAG_MKWRITE;
Dave Jiang11bac802017-02-24 14:56:41 -08003069 ret = vma->vm_ops->pfn_mkwrite(vmf);
Jan Kara2f89dc12016-12-14 15:07:50 -08003070 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
Boaz Harroshdd906182015-04-15 16:15:11 -07003071 return ret;
Jan Kara66a61972016-12-14 15:07:39 -08003072 return finish_mkwrite_fault(vmf);
Boaz Harroshdd906182015-04-15 16:15:11 -07003073 }
Jan Kara997dd982016-12-14 15:07:36 -08003074 wp_page_reuse(vmf);
3075 return VM_FAULT_WRITE;
Boaz Harroshdd906182015-04-15 16:15:11 -07003076}
3077
Souptick Joarder2b740302018-08-23 17:01:36 -07003078static vm_fault_t wp_page_shared(struct vm_fault *vmf)
Jan Kara82b0f8c2016-12-14 15:06:58 -08003079 __releases(vmf->ptl)
Shachar Raindel93e478d2015-04-14 15:46:35 -07003080{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003081 struct vm_area_struct *vma = vmf->vma;
Johannes Weiner89b15332019-11-30 17:50:22 -08003082 vm_fault_t ret = VM_FAULT_WRITE;
Shachar Raindel93e478d2015-04-14 15:46:35 -07003083
Jan Karaa41b70d2016-12-14 15:07:33 -08003084 get_page(vmf->page);
Shachar Raindel93e478d2015-04-14 15:46:35 -07003085
Shachar Raindel93e478d2015-04-14 15:46:35 -07003086 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
Souptick Joarder2b740302018-08-23 17:01:36 -07003087 vm_fault_t tmp;
Shachar Raindel93e478d2015-04-14 15:46:35 -07003088
Jan Kara82b0f8c2016-12-14 15:06:58 -08003089 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jan Kara38b8cb72016-12-14 15:07:30 -08003090 tmp = do_page_mkwrite(vmf);
Shachar Raindel93e478d2015-04-14 15:46:35 -07003091 if (unlikely(!tmp || (tmp &
3092 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
Jan Karaa41b70d2016-12-14 15:07:33 -08003093 put_page(vmf->page);
Shachar Raindel93e478d2015-04-14 15:46:35 -07003094 return tmp;
3095 }
Jan Kara66a61972016-12-14 15:07:39 -08003096 tmp = finish_mkwrite_fault(vmf);
Jan Karaa19e2552016-12-14 15:07:42 -08003097 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
Jan Karaa41b70d2016-12-14 15:07:33 -08003098 unlock_page(vmf->page);
Jan Karaa41b70d2016-12-14 15:07:33 -08003099 put_page(vmf->page);
Jan Kara66a61972016-12-14 15:07:39 -08003100 return tmp;
Shachar Raindel93e478d2015-04-14 15:46:35 -07003101 }
Jan Kara66a61972016-12-14 15:07:39 -08003102 } else {
3103 wp_page_reuse(vmf);
Jan Kara997dd982016-12-14 15:07:36 -08003104 lock_page(vmf->page);
Shachar Raindel93e478d2015-04-14 15:46:35 -07003105 }
Johannes Weiner89b15332019-11-30 17:50:22 -08003106 ret |= fault_dirty_shared_page(vmf);
Jan Kara997dd982016-12-14 15:07:36 -08003107 put_page(vmf->page);
Shachar Raindel93e478d2015-04-14 15:46:35 -07003108
Johannes Weiner89b15332019-11-30 17:50:22 -08003109 return ret;
Shachar Raindel93e478d2015-04-14 15:46:35 -07003110}
3111
Shachar Raindel2f38ab22015-04-14 15:46:32 -07003112/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113 * This routine handles present pages, when users try to write
3114 * to a shared page. It is done by copying the page to a new address
3115 * and decrementing the shared-page counter for the old page.
3116 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117 * Note that this routine assumes that the protection checks have been
3118 * done by the caller (the low-level page fault routine in most cases).
3119 * Thus we can safely just mark it writable once we've done any necessary
3120 * COW.
3121 *
3122 * We also mark the page dirty at this point even though the page will
3123 * change only once the write actually happens. This avoids a few races,
3124 * and potentially makes it more efficient.
3125 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003126 * We enter with non-exclusive mmap_lock (to exclude vma changes,
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003127 * but allow concurrent faults), with pte both mapped and locked.
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003128 * We return with mmap_lock still held, but pte unmapped and unlocked.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003130static vm_fault_t do_wp_page(struct vm_fault *vmf)
Jan Kara82b0f8c2016-12-14 15:06:58 -08003131 __releases(vmf->ptl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003133 struct vm_area_struct *vma = vmf->vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134
Peter Xu292924b2020-04-06 20:05:49 -07003135 if (userfaultfd_pte_wp(vma, *vmf->pte)) {
Andrea Arcangeli529b9302020-04-06 20:05:29 -07003136 pte_unmap_unlock(vmf->pte, vmf->ptl);
3137 return handle_userfault(vmf, VM_UFFD_WP);
3138 }
3139
Jan Karaa41b70d2016-12-14 15:07:33 -08003140 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3141 if (!vmf->page) {
Peter Zijlstra251b97f2008-07-04 09:59:24 -07003142 /*
Peter Feiner64e455072014-10-13 15:55:46 -07003143 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3144 * VM_PFNMAP VMA.
Peter Zijlstra251b97f2008-07-04 09:59:24 -07003145 *
3146 * We should not cow pages in a shared writeable mapping.
Boaz Harroshdd906182015-04-15 16:15:11 -07003147 * Just mark the pages writable and/or call ops->pfn_mkwrite.
Peter Zijlstra251b97f2008-07-04 09:59:24 -07003148 */
Laurent Dufour32507b62018-04-17 16:33:18 +02003149 if ((vmf->vma_flags & (VM_WRITE|VM_SHARED)) ==
Peter Zijlstra251b97f2008-07-04 09:59:24 -07003150 (VM_WRITE|VM_SHARED))
Jan Kara29943022016-12-14 15:07:16 -08003151 return wp_pfn_shared(vmf);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07003152
Jan Kara82b0f8c2016-12-14 15:06:58 -08003153 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jan Karaa41b70d2016-12-14 15:07:33 -08003154 return wp_page_copy(vmf);
Peter Zijlstra251b97f2008-07-04 09:59:24 -07003155 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156
Peter Zijlstrad08b3852006-09-25 23:30:57 -07003157 /*
Peter Zijlstraee6a6452006-09-25 23:31:00 -07003158 * Take out anonymous pages first, anonymous shared vmas are
3159 * not dirty accountable.
Peter Zijlstrad08b3852006-09-25 23:30:57 -07003160 */
Kirill Tkhai52d1e602019-03-05 15:43:06 -08003161 if (PageAnon(vmf->page)) {
Linus Torvalds09854ba2020-08-21 19:49:55 -04003162 struct page *page = vmf->page;
3163
3164 /* PageKsm() doesn't necessarily raise the page refcount */
3165 if (PageKsm(page) || page_count(page) != 1)
Kirill Tkhai52d1e602019-03-05 15:43:06 -08003166 goto copy;
Linus Torvalds09854ba2020-08-21 19:49:55 -04003167 if (!trylock_page(page))
3168 goto copy;
3169 if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
3170 unlock_page(page);
3171 goto copy;
Peter Zijlstraee6a6452006-09-25 23:31:00 -07003172 }
Linus Torvalds09854ba2020-08-21 19:49:55 -04003173 /*
3174 * Ok, we've got the only map reference, and the only
3175 * page count reference, and the page is locked,
3176 * it's dark out, and we're wearing sunglasses. Hit it.
3177 */
Linus Torvalds09854ba2020-08-21 19:49:55 -04003178 unlock_page(page);
Linus Torvaldsbe068f22020-09-24 08:41:32 -07003179 wp_page_reuse(vmf);
Linus Torvalds09854ba2020-08-21 19:49:55 -04003180 return VM_FAULT_WRITE;
Laurent Dufour32507b62018-04-17 16:33:18 +02003181 } else if (unlikely((vmf->vma_flags & (VM_WRITE|VM_SHARED)) ==
Peter Zijlstrad08b3852006-09-25 23:30:57 -07003182 (VM_WRITE|VM_SHARED))) {
Jan Karaa41b70d2016-12-14 15:07:33 -08003183 return wp_page_shared(vmf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184 }
Kirill Tkhai52d1e602019-03-05 15:43:06 -08003185copy:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186 /*
3187 * Ok, we need to copy. Oh, well..
3188 */
Jan Karaa41b70d2016-12-14 15:07:33 -08003189 get_page(vmf->page);
Shachar Raindel28766802015-04-14 15:46:29 -07003190
Jan Kara82b0f8c2016-12-14 15:06:58 -08003191 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jan Karaa41b70d2016-12-14 15:07:33 -08003192 return wp_page_copy(vmf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193}
3194
Peter Zijlstra97a89412011-05-24 17:12:04 -07003195static void unmap_mapping_range_vma(struct vm_area_struct *vma,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196 unsigned long start_addr, unsigned long end_addr,
3197 struct zap_details *details)
3198{
Al Virof5cc4ee2012-03-05 14:14:20 -05003199 zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200}
3201
Davidlohr Buesof808c132017-09-08 16:15:08 -07003202static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 struct zap_details *details)
3204{
3205 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206 pgoff_t vba, vea, zba, zea;
3207
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -07003208 vma_interval_tree_foreach(vma, root,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 details->first_index, details->last_index) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210
3211 vba = vma->vm_pgoff;
Libind6e93212013-07-03 15:01:26 -07003212 vea = vba + vma_pages(vma) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213 zba = details->first_index;
3214 if (zba < vba)
3215 zba = vba;
3216 zea = details->last_index;
3217 if (zea > vea)
3218 zea = vea;
3219
Peter Zijlstra97a89412011-05-24 17:12:04 -07003220 unmap_mapping_range_vma(vma,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3222 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
Peter Zijlstra97a89412011-05-24 17:12:04 -07003223 details);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224 }
3225}
3226
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227/**
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08003228 * unmap_mapping_pages() - Unmap pages from processes.
3229 * @mapping: The address space containing pages to be unmapped.
3230 * @start: Index of first page to be unmapped.
3231 * @nr: Number of pages to be unmapped. 0 to unmap to end of file.
3232 * @even_cows: Whether to unmap even private COWed pages.
3233 *
3234 * Unmap the pages in this address space from any userspace process which
3235 * has them mmaped. Generally, you want to remove COWed pages as well when
3236 * a file is being truncated, but not when invalidating pages from the page
3237 * cache.
3238 */
3239void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3240 pgoff_t nr, bool even_cows)
3241{
3242 struct zap_details details = { };
3243
3244 details.check_mapping = even_cows ? NULL : mapping;
3245 details.first_index = start;
3246 details.last_index = start + nr - 1;
3247 if (details.last_index < details.first_index)
3248 details.last_index = ULONG_MAX;
3249
3250 i_mmap_lock_write(mapping);
3251 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3252 unmap_mapping_range_tree(&mapping->i_mmap, &details);
3253 i_mmap_unlock_write(mapping);
3254}
3255
3256/**
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08003257 * unmap_mapping_range - unmap the portion of all mmaps in the specified
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08003258 * address_space corresponding to the specified byte range in the underlying
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08003259 * file.
3260 *
Martin Waitz3d410882005-06-23 22:05:21 -07003261 * @mapping: the address space containing mmaps to be unmapped.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262 * @holebegin: byte in first page to unmap, relative to the start of
3263 * the underlying file. This will be rounded down to a PAGE_SIZE
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +10003264 * boundary. Note that this is different from truncate_pagecache(), which
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265 * must keep the partial page. In contrast, we must get rid of
3266 * partial pages.
3267 * @holelen: size of prospective hole in bytes. This will be rounded
3268 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
3269 * end of the file.
3270 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3271 * but 0 when invalidating pagecache, don't throw away private data.
3272 */
3273void unmap_mapping_range(struct address_space *mapping,
3274 loff_t const holebegin, loff_t const holelen, int even_cows)
3275{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276 pgoff_t hba = holebegin >> PAGE_SHIFT;
3277 pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3278
3279 /* Check for overflow. */
3280 if (sizeof(holelen) > sizeof(hlen)) {
3281 long long holeend =
3282 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3283 if (holeend & ~(long long)ULONG_MAX)
3284 hlen = ULONG_MAX - hba + 1;
3285 }
3286
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08003287 unmap_mapping_pages(mapping, hba, hlen, even_cows);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288}
3289EXPORT_SYMBOL(unmap_mapping_range);
3290
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003292 * We enter with non-exclusive mmap_lock (to exclude vma changes,
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003293 * but allow concurrent faults), and pte mapped but not yet locked.
Paul Cassella9a95f3c2014-08-06 16:07:24 -07003294 * We return with pte unmapped and unlocked.
3295 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003296 * We return with the mmap_lock locked or unlocked in the same cases
Paul Cassella9a95f3c2014-08-06 16:07:24 -07003297 * as does filemap_fault().
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003299vm_fault_t do_swap_page(struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003301 struct vm_area_struct *vma = vmf->vma;
Minchan Kimeaf649eb2018-04-05 16:23:39 -07003302 struct page *page = NULL, *swapcache;
Hugh Dickins65500d22005-10-29 18:15:59 -07003303 swp_entry_t entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304 pte_t pte;
Michel Lespinassed065bd82010-10-26 14:21:57 -07003305 int locked;
Rik van Rielad8c2ee2010-08-09 17:19:48 -07003306 int exclusive = 0;
Laurent Dufour5835d872018-04-17 16:33:12 +02003307 vm_fault_t ret;
Joonsoo Kimaae466b2020-08-11 18:30:50 -07003308 void *shadow = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309
Laurent Dufour5835d872018-04-17 16:33:12 +02003310 ret = pte_unmap_same(vmf);
3311 if (ret) {
3312 /*
3313 * If pte != orig_pte, this means another thread did the
3314 * swap operation in our back.
3315 * So nothing else to do.
3316 */
3317 if (ret == VM_FAULT_PTNOTSAME)
3318 ret = 0;
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003319 goto out;
Laurent Dufour5835d872018-04-17 16:33:12 +02003320 }
Hugh Dickins65500d22005-10-29 18:15:59 -07003321
Jan Kara29943022016-12-14 15:07:16 -08003322 entry = pte_to_swp_entry(vmf->orig_pte);
Andi Kleend1737fd2009-09-16 11:50:06 +02003323 if (unlikely(non_swap_entry(entry))) {
3324 if (is_migration_entry(entry)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003325 migration_entry_wait(vma->vm_mm, vmf->pmd,
3326 vmf->address);
Jérôme Glisse5042db42017-09-08 16:11:43 -07003327 } else if (is_device_private_entry(entry)) {
Christoph Hellwig897e6362019-06-26 14:27:11 +02003328 vmf->page = device_private_entry_to_page(entry);
3329 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
Andi Kleend1737fd2009-09-16 11:50:06 +02003330 } else if (is_hwpoison_entry(entry)) {
3331 ret = VM_FAULT_HWPOISON;
3332 } else {
Jan Kara29943022016-12-14 15:07:16 -08003333 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
Hugh Dickinsd99be1a2009-12-14 17:59:04 -08003334 ret = VM_FAULT_SIGBUS;
Andi Kleend1737fd2009-09-16 11:50:06 +02003335 }
Christoph Lameter06972122006-06-23 02:03:35 -07003336 goto out;
3337 }
Minchan Kim0bcac062017-11-15 17:33:07 -08003338
3339
Shailabh Nagar0ff92242006-07-14 00:24:37 -07003340 delayacct_set_flag(DELAYACCT_PF_SWAPIN);
Minchan Kimeaf649eb2018-04-05 16:23:39 -07003341 page = lookup_swap_cache(entry, vma, vmf->address);
3342 swapcache = page;
Minchan Kimf8020772018-01-18 16:33:50 -08003343
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344 if (!page) {
Minchan Kim0bcac062017-11-15 17:33:07 -08003345 struct swap_info_struct *si = swp_swap_info(entry);
3346
Qian Caia449bf52020-08-14 17:31:31 -07003347 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3348 __swap_count(entry) == 1) {
Minchan Kim0bcac062017-11-15 17:33:07 -08003349 /* skip swapcache */
Chris Goldsworthy62e32cf2020-11-09 22:26:47 -08003350 gfp_t flags = GFP_HIGHUSER_MOVABLE;
3351
3352 trace_android_rvh_set_skip_swapcache_flags(&flags);
3353 page = alloc_page_vma(flags, vma, vmf->address);
Minchan Kim0bcac062017-11-15 17:33:07 -08003354 if (page) {
Johannes Weiner4c6355b2020-06-03 16:02:17 -07003355 int err;
3356
Minchan Kim0bcac062017-11-15 17:33:07 -08003357 __SetPageLocked(page);
3358 __SetPageSwapBacked(page);
3359 set_page_private(page, entry.val);
Johannes Weiner4c6355b2020-06-03 16:02:17 -07003360
3361 /* Tell memcg to use swap ownership records */
3362 SetPageSwapCache(page);
3363 err = mem_cgroup_charge(page, vma->vm_mm,
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07003364 GFP_KERNEL);
Johannes Weiner4c6355b2020-06-03 16:02:17 -07003365 ClearPageSwapCache(page);
Michal Hocko545b1b02020-06-25 20:29:21 -07003366 if (err) {
3367 ret = VM_FAULT_OOM;
Johannes Weiner4c6355b2020-06-03 16:02:17 -07003368 goto out_page;
Michal Hocko545b1b02020-06-25 20:29:21 -07003369 }
Johannes Weiner4c6355b2020-06-03 16:02:17 -07003370
Joonsoo Kimaae466b2020-08-11 18:30:50 -07003371 shadow = get_shadow_from_swap_cache(entry);
3372 if (shadow)
3373 workingset_refault(page, shadow);
Minchan Kim0bcac062017-11-15 17:33:07 -08003374
Johannes Weiner6058eae2020-06-03 16:02:40 -07003375 lru_cache_add(page);
Minchan Kim0bcac062017-11-15 17:33:07 -08003376 swap_readpage(page, true);
3377 }
Minchan Kimaa8d22a2017-11-15 17:33:11 -08003378 } else {
Minchan Kime9e9b7e2018-04-05 16:23:42 -07003379 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
3380 vmf);
Minchan Kimaa8d22a2017-11-15 17:33:11 -08003381 swapcache = page;
Minchan Kim0bcac062017-11-15 17:33:07 -08003382 }
3383
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 if (!page) {
3385 /*
Peter Zijlstraafeec972018-04-17 16:33:10 +02003386 * Back out if the VMA has changed in our back during
3387 * a speculative page fault or if somebody else
3388 * faulted in this pte while we released the pte lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389 */
Peter Zijlstraafeec972018-04-17 16:33:10 +02003390 if (!pte_map_lock(vmf)) {
3391 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
3392 ret = VM_FAULT_RETRY;
3393 goto out;
3394 }
3395
Jan Kara29943022016-12-14 15:07:16 -08003396 if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397 ret = VM_FAULT_OOM;
Shailabh Nagar0ff92242006-07-14 00:24:37 -07003398 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
Hugh Dickins65500d22005-10-29 18:15:59 -07003399 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 }
3401
3402 /* Had to read the page from swap area: Major fault */
3403 ret = VM_FAULT_MAJOR;
Christoph Lameterf8891e52006-06-30 01:55:45 -07003404 count_vm_event(PGMAJFAULT);
Roman Gushchin22621852017-07-06 15:40:25 -07003405 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
Andi Kleend1737fd2009-09-16 11:50:06 +02003406 } else if (PageHWPoison(page)) {
Wu Fengguang71f72522009-12-16 12:19:58 +01003407 /*
3408 * hwpoisoned dirty swapcache pages are kept for killing
3409 * owner processes (which may be unknown at hwpoison time)
3410 */
Andi Kleend1737fd2009-09-16 11:50:06 +02003411 ret = VM_FAULT_HWPOISON;
3412 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
Andi Kleen4779cb32009-10-14 01:51:41 +02003413 goto out_release;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414 }
3415
Jan Kara82b0f8c2016-12-14 15:06:58 -08003416 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
Rik van Riele709ffd2012-05-29 15:06:18 -07003417
Balbir Singh20a10222007-11-14 17:00:33 -08003418 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
Michel Lespinassed065bd82010-10-26 14:21:57 -07003419 if (!locked) {
3420 ret |= VM_FAULT_RETRY;
3421 goto out_release;
3422 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003424 /*
Hugh Dickins31c4a3d2010-09-19 19:40:22 -07003425 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
3426 * release the swapcache from under us. The page pin, and pte_same
3427 * test below, are not enough to exclude that. Even if it is still
3428 * swapcache, we need to check that the page's swap has not changed.
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003429 */
Minchan Kim0bcac062017-11-15 17:33:07 -08003430 if (unlikely((!PageSwapCache(page) ||
3431 page_private(page) != entry.val)) && swapcache)
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003432 goto out_page;
3433
Jan Kara82b0f8c2016-12-14 15:06:58 -08003434 page = ksm_might_need_to_copy(page, vma, vmf->address);
Hugh Dickinscbf86cf2013-02-22 16:35:08 -08003435 if (unlikely(!page)) {
3436 ret = VM_FAULT_OOM;
3437 page = swapcache;
Hugh Dickinscbf86cf2013-02-22 16:35:08 -08003438 goto out_page;
Hugh Dickins5ad64682009-12-14 17:59:24 -08003439 }
3440
Johannes Weiner9d82c692020-06-03 16:02:04 -07003441 cgroup_throttle_swaprate(page, GFP_KERNEL);
KAMEZAWA Hiroyuki073e5872008-10-18 20:28:08 -07003442
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443 /*
Peter Zijlstraafeec972018-04-17 16:33:10 +02003444 * Back out if the VMA has changed in our back during a speculative
3445 * page fault or if somebody else already faulted in this pte.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446 */
Peter Zijlstraafeec972018-04-17 16:33:10 +02003447 if (!pte_map_lock(vmf)) {
3448 ret = VM_FAULT_RETRY;
3449 goto out_page;
3450 }
Jan Kara29943022016-12-14 15:07:16 -08003451 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
Kirill Korotaevb8107482005-05-16 21:53:50 -07003452 goto out_nomap;
Kirill Korotaevb8107482005-05-16 21:53:50 -07003453
3454 if (unlikely(!PageUptodate(page))) {
3455 ret = VM_FAULT_SIGBUS;
3456 goto out_nomap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457 }
3458
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003459 /*
3460 * The page isn't present yet, go ahead with the fault.
3461 *
3462 * Be careful about the sequence of operations here.
3463 * To get its accounting right, reuse_swap_page() must be called
3464 * while the page is counted on swap but not yet in mapcount i.e.
3465 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
3466 * must be called after the swap_free(), or it will never succeed.
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003467 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003469 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3470 dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
Laurent Dufour32507b62018-04-17 16:33:18 +02003471 pte = mk_pte(page, vmf->vma_page_prot);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003472 if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
Laurent Dufour32507b62018-04-17 16:33:18 +02003473 pte = maybe_mkwrite(pte_mkdirty(pte), vmf->vma_flags);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003474 vmf->flags &= ~FAULT_FLAG_WRITE;
Andrea Arcangeli9a5b4892010-08-09 17:19:49 -07003475 ret |= VM_FAULT_WRITE;
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08003476 exclusive = RMAP_EXCLUSIVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478 flush_icache_page(vma, page);
Jan Kara29943022016-12-14 15:07:16 -08003479 if (pte_swp_soft_dirty(vmf->orig_pte))
Cyrill Gorcunov179ef712013-08-13 16:00:49 -07003480 pte = pte_mksoft_dirty(pte);
Peter Xuf45ec5f2020-04-06 20:06:01 -07003481 if (pte_swp_uffd_wp(vmf->orig_pte)) {
3482 pte = pte_mkuffd_wp(pte);
3483 pte = pte_wrprotect(pte);
3484 }
Jan Kara82b0f8c2016-12-14 15:06:58 -08003485 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
Khalid Azizca827d52018-02-21 10:15:44 -07003486 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
Jan Kara29943022016-12-14 15:07:16 -08003487 vmf->orig_pte = pte;
Minchan Kim0bcac062017-11-15 17:33:07 -08003488
3489 /* ksm created a completely new copy */
3490 if (unlikely(page != swapcache && swapcache)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003491 page_add_new_anon_rmap(page, vma, vmf->address, false);
Joonsoo Kimb5181542020-08-11 18:30:40 -07003492 lru_cache_add_inactive_or_unevictable(page, vma);
Minchan Kim0bcac062017-11-15 17:33:07 -08003493 } else {
3494 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
Johannes Weiner00501b52014-08-08 14:19:20 -07003495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496
Hugh Dickinsc475a8a2005-06-21 17:15:12 -07003497 swap_free(entry);
Vladimir Davydov5ccc5ab2016-01-20 15:03:10 -08003498 if (mem_cgroup_swap_full(page) ||
Laurent Dufour32507b62018-04-17 16:33:18 +02003499 (vmf->vma_flags & VM_LOCKED) || PageMlocked(page))
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -08003500 try_to_free_swap(page);
Hugh Dickinsc475a8a2005-06-21 17:15:12 -07003501 unlock_page(page);
Minchan Kim0bcac062017-11-15 17:33:07 -08003502 if (page != swapcache && swapcache) {
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003503 /*
3504 * Hold the lock to avoid the swap entry to be reused
3505 * until we take the PT lock for the pte_same() check
3506 * (to avoid false positives from pte_same). For
3507 * further safety release the lock after the swap_free
3508 * so that the swap count won't change under a
3509 * parallel locked swapcache.
3510 */
3511 unlock_page(swapcache);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003512 put_page(swapcache);
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003513 }
Hugh Dickinsc475a8a2005-06-21 17:15:12 -07003514
Jan Kara82b0f8c2016-12-14 15:06:58 -08003515 if (vmf->flags & FAULT_FLAG_WRITE) {
Jan Kara29943022016-12-14 15:07:16 -08003516 ret |= do_wp_page(vmf);
Hugh Dickins61469f12008-03-04 14:29:04 -08003517 if (ret & VM_FAULT_ERROR)
3518 ret &= VM_FAULT_ERROR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 goto out;
3520 }
3521
3522 /* No need to invalidate - it was non-present before */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003523 update_mmu_cache(vma, vmf->address, vmf->pte);
Hugh Dickins65500d22005-10-29 18:15:59 -07003524unlock:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003525 pte_unmap_unlock(vmf->pte, vmf->ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526out:
3527 return ret;
Kirill Korotaevb8107482005-05-16 21:53:50 -07003528out_nomap:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003529 pte_unmap_unlock(vmf->pte, vmf->ptl);
Johannes Weinerbc43f752009-04-30 15:08:08 -07003530out_page:
Kirill Korotaevb8107482005-05-16 21:53:50 -07003531 unlock_page(page);
Andi Kleen4779cb32009-10-14 01:51:41 +02003532out_release:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003533 put_page(page);
Minchan Kim0bcac062017-11-15 17:33:07 -08003534 if (page != swapcache && swapcache) {
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003535 unlock_page(swapcache);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003536 put_page(swapcache);
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003537 }
Hugh Dickins65500d22005-10-29 18:15:59 -07003538 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539}
3540
3541/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003542 * We enter with non-exclusive mmap_lock (to exclude vma changes,
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003543 * but allow concurrent faults), and pte mapped but not yet locked.
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003544 * We return with mmap_lock still held, but pte unmapped and unlocked.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003546static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003548 struct vm_area_struct *vma = vmf->vma;
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003549 struct page *page;
Souptick Joarder2b740302018-08-23 17:01:36 -07003550 vm_fault_t ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551 pte_t entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552
Kirill A. Shutemov6b7339f2015-07-06 23:18:37 +03003553 /* File mapping without ->vm_ops ? */
Laurent Dufour32507b62018-04-17 16:33:18 +02003554 if (vmf->vma_flags & VM_SHARED)
Kirill A. Shutemov6b7339f2015-07-06 23:18:37 +03003555 return VM_FAULT_SIGBUS;
3556
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003557 /*
3558 * Use pte_alloc() instead of pte_alloc_map(). We can't run
3559 * pte_offset_map() on pmds where a huge pmd might be created
3560 * from a different thread.
3561 *
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07003562 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003563 * parallel threads are excluded by other means.
3564 *
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07003565 * Here we only have mmap_read_lock(mm).
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003566 */
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08003567 if (pte_alloc(vma->vm_mm, vmf->pmd))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003568 return VM_FAULT_OOM;
3569
3570 /* See the comment in pte_alloc_one_map() */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003571 if (unlikely(pmd_trans_unstable(vmf->pmd)))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003572 return 0;
3573
Linus Torvalds11ac5522010-08-14 11:44:56 -07003574 /* Use the zero-page for reads */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003575 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003576 !mm_forbids_zeropage(vma->vm_mm)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003577 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
Laurent Dufour32507b62018-04-17 16:33:18 +02003578 vmf->vma_page_prot));
Peter Zijlstraafeec972018-04-17 16:33:10 +02003579 if (!pte_map_lock(vmf))
3580 return VM_FAULT_RETRY;
Bibo Mao7df67692020-05-27 10:25:18 +08003581 if (!pte_none(*vmf->pte)) {
3582 update_mmu_tlb(vma, vmf->address, vmf->pte);
Hugh Dickinsa13ea5b2009-09-21 17:03:30 -07003583 goto unlock;
Bibo Mao7df67692020-05-27 10:25:18 +08003584 }
Michal Hocko6b31d592017-08-18 15:16:15 -07003585 ret = check_stable_address_space(vma->vm_mm);
3586 if (ret)
3587 goto unlock;
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -07003588 /* Deliver the page fault to userland, check inside PT lock */
3589 if (userfaultfd_missing(vma)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003590 pte_unmap_unlock(vmf->pte, vmf->ptl);
3591 return handle_userfault(vmf, VM_UFFD_MISSING);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -07003592 }
Hugh Dickinsa13ea5b2009-09-21 17:03:30 -07003593 goto setpte;
3594 }
3595
Nick Piggin557ed1f2007-10-16 01:24:40 -07003596 /* Allocate our own private page. */
Nick Piggin557ed1f2007-10-16 01:24:40 -07003597 if (unlikely(anon_vma_prepare(vma)))
3598 goto oom;
Jan Kara82b0f8c2016-12-14 15:06:58 -08003599 page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
Nick Piggin557ed1f2007-10-16 01:24:40 -07003600 if (!page)
3601 goto oom;
Mel Gormaneb3c24f2015-06-24 16:57:27 -07003602
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07003603 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
Mel Gormaneb3c24f2015-06-24 16:57:27 -07003604 goto oom_free_page;
Johannes Weiner9d82c692020-06-03 16:02:04 -07003605 cgroup_throttle_swaprate(page, GFP_KERNEL);
Mel Gormaneb3c24f2015-06-24 16:57:27 -07003606
Minchan Kim52f37622013-04-29 15:08:15 -07003607 /*
3608 * The memory barrier inside __SetPageUptodate makes sure that
Wei Yangf4f53292019-11-30 17:58:17 -08003609 * preceding stores to the page contents become visible before
Minchan Kim52f37622013-04-29 15:08:15 -07003610 * the set_pte_at() write.
3611 */
Nick Piggin0ed361d2008-02-04 22:29:34 -08003612 __SetPageUptodate(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613
Laurent Dufour32507b62018-04-17 16:33:18 +02003614 entry = mk_pte(page, vmf->vma_page_prot);
Bibo Mao44bf4312020-05-27 10:25:19 +08003615 entry = pte_sw_mkyoung(entry);
Laurent Dufour32507b62018-04-17 16:33:18 +02003616 if (vmf->vma_flags & VM_WRITE)
Hugh Dickins1ac0cb52009-09-21 17:03:29 -07003617 entry = pte_mkwrite(pte_mkdirty(entry));
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003618
Peter Zijlstraafeec972018-04-17 16:33:10 +02003619 if (!pte_map_lock(vmf)) {
3620 ret = VM_FAULT_RETRY;
3621 goto release;
3622 }
3623
Bibo Mao7df67692020-05-27 10:25:18 +08003624 if (!pte_none(*vmf->pte)) {
3625 update_mmu_cache(vma, vmf->address, vmf->pte);
Peter Zijlstraafeec972018-04-17 16:33:10 +02003626 goto unlock_and_release;
Bibo Mao7df67692020-05-27 10:25:18 +08003627 }
Hugh Dickins9ba69292009-09-21 17:02:20 -07003628
Michal Hocko6b31d592017-08-18 15:16:15 -07003629 ret = check_stable_address_space(vma->vm_mm);
3630 if (ret)
Peter Zijlstraafeec972018-04-17 16:33:10 +02003631 goto unlock_and_release;
Michal Hocko6b31d592017-08-18 15:16:15 -07003632
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -07003633 /* Deliver the page fault to userland, check inside PT lock */
3634 if (userfaultfd_missing(vma)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003635 pte_unmap_unlock(vmf->pte, vmf->ptl);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003636 put_page(page);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003637 return handle_userfault(vmf, VM_UFFD_MISSING);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -07003638 }
3639
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003640 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003641 page_add_new_anon_rmap(page, vma, vmf->address, false);
Joonsoo Kimb5181542020-08-11 18:30:40 -07003642 lru_cache_add_inactive_or_unevictable(page, vma);
Hugh Dickinsa13ea5b2009-09-21 17:03:30 -07003643setpte:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003644 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645
3646 /* No need to invalidate - it was non-present before */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003647 update_mmu_cache(vma, vmf->address, vmf->pte);
Hugh Dickins65500d22005-10-29 18:15:59 -07003648unlock:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003649 pte_unmap_unlock(vmf->pte, vmf->ptl);
Michal Hocko6b31d592017-08-18 15:16:15 -07003650 return ret;
Peter Zijlstraafeec972018-04-17 16:33:10 +02003651unlock_and_release:
3652 pte_unmap_unlock(vmf->pte, vmf->ptl);
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003653release:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003654 put_page(page);
Peter Zijlstraafeec972018-04-17 16:33:10 +02003655 return ret;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08003656oom_free_page:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003657 put_page(page);
Hugh Dickins65500d22005-10-29 18:15:59 -07003658oom:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003659 return VM_FAULT_OOM;
3660}
3661
Paul Cassella9a95f3c2014-08-06 16:07:24 -07003662/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003663 * The mmap_lock must have been held on entry, and may have been
Paul Cassella9a95f3c2014-08-06 16:07:24 -07003664 * released depending on flags and vma->vm_ops->fault() return value.
3665 * See filemap_fault() and __lock_page_retry().
3666 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003667static vm_fault_t __do_fault(struct vm_fault *vmf)
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003668{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003669 struct vm_area_struct *vma = vmf->vma;
Souptick Joarder2b740302018-08-23 17:01:36 -07003670 vm_fault_t ret;
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003671
Michal Hocko63f36552019-01-08 15:23:07 -08003672 /*
3673 * Preallocate pte before we take page_lock because this might lead to
3674 * deadlocks for memcg reclaim which waits for pages under writeback:
3675 * lock_page(A)
3676 * SetPageWriteback(A)
3677 * unlock_page(A)
3678 * lock_page(B)
3679 * lock_page(B)
Yanfei Xud3838072020-10-13 16:53:26 -07003680 * pte_alloc_one
Michal Hocko63f36552019-01-08 15:23:07 -08003681 * shrink_page_list
3682 * wait_on_page_writeback(A)
3683 * SetPageWriteback(B)
3684 * unlock_page(B)
3685 * # flush A, B to clear the writeback
3686 */
3687 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
Yanfei Xua7069ee2020-10-13 16:53:29 -07003688 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
Michal Hocko63f36552019-01-08 15:23:07 -08003689 if (!vmf->prealloc_pte)
3690 return VM_FAULT_OOM;
3691 smp_wmb(); /* See comment in __pte_alloc() */
3692 }
3693
Dave Jiang11bac802017-02-24 14:56:41 -08003694 ret = vma->vm_ops->fault(vmf);
Jan Kara39170482016-12-14 15:07:18 -08003695 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
Jan Karab1aa8122016-12-14 15:07:24 -08003696 VM_FAULT_DONE_COW)))
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003697 return ret;
3698
Jan Kara667240e2016-12-14 15:07:07 -08003699 if (unlikely(PageHWPoison(vmf->page))) {
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003700 if (ret & VM_FAULT_LOCKED)
Jan Kara667240e2016-12-14 15:07:07 -08003701 unlock_page(vmf->page);
3702 put_page(vmf->page);
Jan Kara936ca802016-12-14 15:07:10 -08003703 vmf->page = NULL;
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003704 return VM_FAULT_HWPOISON;
3705 }
3706
3707 if (unlikely(!(ret & VM_FAULT_LOCKED)))
Jan Kara667240e2016-12-14 15:07:07 -08003708 lock_page(vmf->page);
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003709 else
Jan Kara667240e2016-12-14 15:07:07 -08003710 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003711
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003712 return ret;
3713}
3714
Ross Zwislerd0f0931d2017-06-02 14:46:34 -07003715/*
3716 * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
3717 * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
3718 * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
3719 * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
3720 */
3721static int pmd_devmap_trans_unstable(pmd_t *pmd)
3722{
3723 return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
3724}
3725
Souptick Joarder2b740302018-08-23 17:01:36 -07003726static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003727{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003728 struct vm_area_struct *vma = vmf->vma;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003729
Jan Kara82b0f8c2016-12-14 15:06:58 -08003730 if (!pmd_none(*vmf->pmd))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003731 goto map_pte;
Jan Kara82b0f8c2016-12-14 15:06:58 -08003732 if (vmf->prealloc_pte) {
3733 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3734 if (unlikely(!pmd_none(*vmf->pmd))) {
3735 spin_unlock(vmf->ptl);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003736 goto map_pte;
3737 }
3738
Kirill A. Shutemovc4812902017-11-15 17:35:37 -08003739 mm_inc_nr_ptes(vma->vm_mm);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003740 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3741 spin_unlock(vmf->ptl);
Tobin C Harding7f2b6ce2017-02-24 14:58:59 -08003742 vmf->prealloc_pte = NULL;
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08003743 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003744 return VM_FAULT_OOM;
3745 }
3746map_pte:
3747 /*
3748 * If a huge pmd materialized under us just retry later. Use
Ross Zwislerd0f0931d2017-06-02 14:46:34 -07003749 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
3750 * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
3751 * under us and then back to pmd_none, as a result of MADV_DONTNEED
3752 * running immediately after a huge pmd fault in a different thread of
3753 * this mm, in turn leading to a misleading pmd_trans_huge() retval.
3754 * All we have to ensure is that it is a regular pmd that we can walk
3755 * with pte_offset_map() and we can do that through an atomic read in
3756 * C, which is what pmd_trans_unstable() provides.
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003757 */
Ross Zwislerd0f0931d2017-06-02 14:46:34 -07003758 if (pmd_devmap_trans_unstable(vmf->pmd))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003759 return VM_FAULT_NOPAGE;
3760
Ross Zwislerd0f0931d2017-06-02 14:46:34 -07003761 /*
3762 * At this point we know that our vmf->pmd points to a page of ptes
3763 * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
3764 * for the duration of the fault. If a racing MADV_DONTNEED runs and
3765 * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
3766 * be valid and we will re-check to make sure the vmf->pte isn't
3767 * pte_none() under vmf->ptl protection when we return to
3768 * alloc_set_pte().
3769 */
Peter Zijlstraafeec972018-04-17 16:33:10 +02003770 if (!pte_map_lock(vmf))
3771 return VM_FAULT_RETRY;
3772
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003773 return 0;
3774}
3775
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07003776#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Jan Kara82b0f8c2016-12-14 15:06:58 -08003777static void deposit_prealloc_pte(struct vm_fault *vmf)
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003778{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003779 struct vm_area_struct *vma = vmf->vma;
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003780
Jan Kara82b0f8c2016-12-14 15:06:58 -08003781 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003782 /*
3783 * We are going to consume the prealloc table,
3784 * count that as nr_ptes.
3785 */
Kirill A. Shutemovc4812902017-11-15 17:35:37 -08003786 mm_inc_nr_ptes(vma->vm_mm);
Tobin C Harding7f2b6ce2017-02-24 14:58:59 -08003787 vmf->prealloc_pte = NULL;
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003788}
3789
Souptick Joarder2b740302018-08-23 17:01:36 -07003790static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003791{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003792 struct vm_area_struct *vma = vmf->vma;
3793 bool write = vmf->flags & FAULT_FLAG_WRITE;
3794 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003795 pmd_t entry;
Souptick Joarder2b740302018-08-23 17:01:36 -07003796 int i;
Matthew Wilcox (Oracle)d01ac3c2020-10-15 20:05:26 -07003797 vm_fault_t ret = VM_FAULT_FALLBACK;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003798
3799 if (!transhuge_vma_suitable(vma, haddr))
Matthew Wilcox (Oracle)d01ac3c2020-10-15 20:05:26 -07003800 return ret;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003801
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003802 page = compound_head(page);
Matthew Wilcox (Oracle)d01ac3c2020-10-15 20:05:26 -07003803 if (compound_order(page) != HPAGE_PMD_ORDER)
3804 return ret;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003805
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003806 /*
3807 * Archs like ppc64 need additonal space to store information
3808 * related to pte entry. Use the preallocated table for that.
3809 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003810 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08003811 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003812 if (!vmf->prealloc_pte)
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003813 return VM_FAULT_OOM;
3814 smp_wmb(); /* See comment in __pte_alloc() */
3815 }
3816
Jan Kara82b0f8c2016-12-14 15:06:58 -08003817 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3818 if (unlikely(!pmd_none(*vmf->pmd)))
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003819 goto out;
3820
3821 for (i = 0; i < HPAGE_PMD_NR; i++)
3822 flush_icache_page(vma, page + i);
3823
Laurent Dufour32507b62018-04-17 16:33:18 +02003824 entry = mk_huge_pmd(page, vmf->vma_page_prot);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003825 if (write)
Linus Torvaldsf55e1012017-11-29 09:01:01 -08003826 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003827
Yang Shifadae292018-08-17 15:44:55 -07003828 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003829 page_add_file_rmap(page, true);
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003830 /*
3831 * deposit and withdraw with pmd lock held
3832 */
3833 if (arch_needs_pgtable_deposit())
Jan Kara82b0f8c2016-12-14 15:06:58 -08003834 deposit_prealloc_pte(vmf);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003835
Jan Kara82b0f8c2016-12-14 15:06:58 -08003836 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003837
Jan Kara82b0f8c2016-12-14 15:06:58 -08003838 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003839
3840 /* fault is handled */
3841 ret = 0;
Kirill A. Shutemov95ecedc2016-07-26 15:25:31 -07003842 count_vm_event(THP_FILE_MAPPED);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003843out:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003844 spin_unlock(vmf->ptl);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003845 return ret;
3846}
3847#else
Souptick Joarder2b740302018-08-23 17:01:36 -07003848static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003849{
3850 BUILD_BUG();
3851 return 0;
3852}
3853#endif
3854
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003855/**
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003856 * alloc_set_pte - setup new PTE entry for given page and add reverse page
Randy Dunlapf1dc1682020-10-13 16:54:01 -07003857 * mapping. If needed, the function allocates page table or use pre-allocated.
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003858 *
Jan Kara82b0f8c2016-12-14 15:06:58 -08003859 * @vmf: fault environment
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003860 * @page: page to map
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003861 *
Jan Kara82b0f8c2016-12-14 15:06:58 -08003862 * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
3863 * return.
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003864 *
3865 * Target users are page handler itself and implementations of
3866 * vm_ops->map_pages.
Mike Rapoporta862f682019-03-05 15:48:42 -08003867 *
3868 * Return: %0 on success, %VM_FAULT_ code in case of error.
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003869 */
Johannes Weiner9d82c692020-06-03 16:02:04 -07003870vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page)
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003871{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003872 struct vm_area_struct *vma = vmf->vma;
3873 bool write = vmf->flags & FAULT_FLAG_WRITE;
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003874 pte_t entry;
Souptick Joarder2b740302018-08-23 17:01:36 -07003875 vm_fault_t ret;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003876
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07003877 if (pmd_none(*vmf->pmd) && PageTransCompound(page)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003878 ret = do_set_pmd(vmf, page);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003879 if (ret != VM_FAULT_FALLBACK)
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08003880 return ret;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003881 }
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003882
Jan Kara82b0f8c2016-12-14 15:06:58 -08003883 if (!vmf->pte) {
3884 ret = pte_alloc_one_map(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003885 if (ret)
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08003886 return ret;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003887 }
3888
3889 /* Re-check under ptl */
Bibo Mao7df67692020-05-27 10:25:18 +08003890 if (unlikely(!pte_none(*vmf->pte))) {
3891 update_mmu_tlb(vma, vmf->address, vmf->pte);
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08003892 return VM_FAULT_NOPAGE;
Bibo Mao7df67692020-05-27 10:25:18 +08003893 }
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003894
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003895 flush_icache_page(vma, page);
Laurent Dufour32507b62018-04-17 16:33:18 +02003896 entry = mk_pte(page, vmf->vma_page_prot);
Bibo Mao44bf4312020-05-27 10:25:19 +08003897 entry = pte_sw_mkyoung(entry);
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003898 if (write)
Laurent Dufour32507b62018-04-17 16:33:18 +02003899 entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003900 /* copy-on-write page */
Laurent Dufour32507b62018-04-17 16:33:18 +02003901 if (write && !(vmf->vma_flags & VM_SHARED)) {
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003902 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003903 page_add_new_anon_rmap(page, vma, vmf->address, false);
Joonsoo Kimb5181542020-08-11 18:30:40 -07003904 lru_cache_add_inactive_or_unevictable(page, vma);
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003905 } else {
Jerome Marchandeca56ff2016-01-14 15:19:26 -08003906 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
Kirill A. Shutemovdd78fed2016-07-26 15:25:26 -07003907 page_add_file_rmap(page, false);
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003908 }
Jan Kara82b0f8c2016-12-14 15:06:58 -08003909 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003910
3911 /* no need to invalidate: a not-present page won't be cached */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003912 update_mmu_cache(vma, vmf->address, vmf->pte);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003913
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08003914 return 0;
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003915}
3916
Jan Kara9118c0c2016-12-14 15:07:21 -08003917
3918/**
3919 * finish_fault - finish page fault once we have prepared the page to fault
3920 *
3921 * @vmf: structure describing the fault
3922 *
3923 * This function handles all that is needed to finish a page fault once the
3924 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
3925 * given page, adds reverse page mapping, handles memcg charges and LRU
Mike Rapoporta862f682019-03-05 15:48:42 -08003926 * addition.
Jan Kara9118c0c2016-12-14 15:07:21 -08003927 *
3928 * The function expects the page to be locked and on success it consumes a
3929 * reference of a page being mapped (for the PTE which maps it).
Mike Rapoporta862f682019-03-05 15:48:42 -08003930 *
3931 * Return: %0 on success, %VM_FAULT_ code in case of error.
Jan Kara9118c0c2016-12-14 15:07:21 -08003932 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003933vm_fault_t finish_fault(struct vm_fault *vmf)
Jan Kara9118c0c2016-12-14 15:07:21 -08003934{
3935 struct page *page;
Souptick Joarder2b740302018-08-23 17:01:36 -07003936 vm_fault_t ret = 0;
Jan Kara9118c0c2016-12-14 15:07:21 -08003937
3938 /* Did we COW the page? */
3939 if ((vmf->flags & FAULT_FLAG_WRITE) &&
Laurent Dufour32507b62018-04-17 16:33:18 +02003940 !(vmf->vma_flags & VM_SHARED))
Jan Kara9118c0c2016-12-14 15:07:21 -08003941 page = vmf->cow_page;
3942 else
3943 page = vmf->page;
Michal Hocko6b31d592017-08-18 15:16:15 -07003944
3945 /*
3946 * check even for read faults because we might have lost our CoWed
3947 * page
3948 */
3949 if (!(vmf->vma->vm_flags & VM_SHARED))
3950 ret = check_stable_address_space(vmf->vma->vm_mm);
3951 if (!ret)
Johannes Weiner9d82c692020-06-03 16:02:04 -07003952 ret = alloc_set_pte(vmf, page);
Jan Kara9118c0c2016-12-14 15:07:21 -08003953 if (vmf->pte)
3954 pte_unmap_unlock(vmf->pte, vmf->ptl);
3955 return ret;
3956}
3957
Kirill A. Shutemov3a910532014-08-06 16:08:07 -07003958static unsigned long fault_around_bytes __read_mostly =
3959 rounddown_pow_of_two(65536);
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003960
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003961#ifdef CONFIG_DEBUG_FS
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003962static int fault_around_bytes_get(void *data, u64 *val)
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003963{
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003964 *val = fault_around_bytes;
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003965 return 0;
3966}
3967
Andrey Ryabininb4903d62014-07-30 16:08:35 -07003968/*
William Kucharskida391d62018-01-31 16:21:11 -08003969 * fault_around_bytes must be rounded down to the nearest page order as it's
3970 * what do_fault_around() expects to see.
Andrey Ryabininb4903d62014-07-30 16:08:35 -07003971 */
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003972static int fault_around_bytes_set(void *data, u64 val)
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003973{
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003974 if (val / PAGE_SIZE > PTRS_PER_PTE)
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003975 return -EINVAL;
Andrey Ryabininb4903d62014-07-30 16:08:35 -07003976 if (val > PAGE_SIZE)
3977 fault_around_bytes = rounddown_pow_of_two(val);
3978 else
3979 fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003980 return 0;
3981}
Yevgen Pronenko0a1345f2017-07-10 15:47:17 -07003982DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003983 fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003984
3985static int __init fault_around_debugfs(void)
3986{
Greg Kroah-Hartmand9f79792019-03-05 15:46:09 -08003987 debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
3988 &fault_around_bytes_fops);
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003989 return 0;
3990}
3991late_initcall(fault_around_debugfs);
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003992#endif
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003993
Kirill A. Shutemov1fdb4122014-06-04 16:10:55 -07003994/*
3995 * do_fault_around() tries to map few pages around the fault address. The hope
3996 * is that the pages will be needed soon and this will lower the number of
3997 * faults to handle.
3998 *
3999 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
4000 * not ready to be mapped: not up-to-date, locked, etc.
4001 *
4002 * This function is called with the page table lock taken. In the split ptlock
4003 * case the page table lock only protects only those entries which belong to
4004 * the page table corresponding to the fault address.
4005 *
4006 * This function doesn't cross the VMA boundaries, in order to call map_pages()
4007 * only once.
4008 *
William Kucharskida391d62018-01-31 16:21:11 -08004009 * fault_around_bytes defines how many bytes we'll try to map.
4010 * do_fault_around() expects it to be set to a power of two less than or equal
4011 * to PTRS_PER_PTE.
Kirill A. Shutemov1fdb4122014-06-04 16:10:55 -07004012 *
William Kucharskida391d62018-01-31 16:21:11 -08004013 * The virtual address of the area that we map is naturally aligned to
4014 * fault_around_bytes rounded down to the machine page size
4015 * (and therefore to page order). This way it's easier to guarantee
4016 * that we don't cross page table boundaries.
Kirill A. Shutemov1fdb4122014-06-04 16:10:55 -07004017 */
Souptick Joarder2b740302018-08-23 17:01:36 -07004018static vm_fault_t do_fault_around(struct vm_fault *vmf)
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07004019{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004020 unsigned long address = vmf->address, nr_pages, mask;
Jan Kara0721ec82016-12-14 15:07:04 -08004021 pgoff_t start_pgoff = vmf->pgoff;
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004022 pgoff_t end_pgoff;
Souptick Joarder2b740302018-08-23 17:01:36 -07004023 int off;
4024 vm_fault_t ret = 0;
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07004025
Jason Low4db0c3c2015-04-15 16:14:08 -07004026 nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
Kirill A. Shutemovaecd6f42014-08-06 16:08:05 -07004027 mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
4028
Jan Kara82b0f8c2016-12-14 15:06:58 -08004029 vmf->address = max(address & mask, vmf->vma->vm_start);
4030 off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004031 start_pgoff -= off;
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07004032
4033 /*
William Kucharskida391d62018-01-31 16:21:11 -08004034 * end_pgoff is either the end of the page table, the end of
4035 * the vma or nr_pages from start_pgoff, depending what is nearest.
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07004036 */
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004037 end_pgoff = start_pgoff -
Jan Kara82b0f8c2016-12-14 15:06:58 -08004038 ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07004039 PTRS_PER_PTE - 1;
Jan Kara82b0f8c2016-12-14 15:06:58 -08004040 end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004041 start_pgoff + nr_pages - 1);
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07004042
Jan Kara82b0f8c2016-12-14 15:06:58 -08004043 if (pmd_none(*vmf->pmd)) {
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08004044 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004045 if (!vmf->prealloc_pte)
Vegard Nossumc5f88bd2016-08-02 14:02:22 -07004046 goto out;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004047 smp_wmb(); /* See comment in __pte_alloc() */
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07004048 }
4049
Jan Kara82b0f8c2016-12-14 15:06:58 -08004050 vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004051
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004052 /* Huge page is mapped? Page fault is solved */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004053 if (pmd_trans_huge(*vmf->pmd)) {
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004054 ret = VM_FAULT_NOPAGE;
4055 goto out;
4056 }
4057
4058 /* ->map_pages() haven't done anything useful. Cold page cache? */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004059 if (!vmf->pte)
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004060 goto out;
4061
4062 /* check if the page fault is solved */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004063 vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
4064 if (!pte_none(*vmf->pte))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004065 ret = VM_FAULT_NOPAGE;
Jan Kara82b0f8c2016-12-14 15:06:58 -08004066 pte_unmap_unlock(vmf->pte, vmf->ptl);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004067out:
Jan Kara82b0f8c2016-12-14 15:06:58 -08004068 vmf->address = address;
4069 vmf->pte = NULL;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004070 return ret;
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07004071}
4072
Souptick Joarder2b740302018-08-23 17:01:36 -07004073static vm_fault_t do_read_fault(struct vm_fault *vmf)
Kirill A. Shutemove655fb22014-04-03 14:48:11 -07004074{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004075 struct vm_area_struct *vma = vmf->vma;
Souptick Joarder2b740302018-08-23 17:01:36 -07004076 vm_fault_t ret = 0;
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07004077
4078 /*
4079 * Let's call ->map_pages() first and use ->fault() as fallback
4080 * if page by the offset is not ready to be mapped (cold cache or
4081 * something).
4082 */
Kirill A. Shutemov9b4bdd22015-02-10 14:09:51 -08004083 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
Jan Kara0721ec82016-12-14 15:07:04 -08004084 ret = do_fault_around(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004085 if (ret)
4086 return ret;
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07004087 }
Kirill A. Shutemove655fb22014-04-03 14:48:11 -07004088
Jan Kara936ca802016-12-14 15:07:10 -08004089 ret = __do_fault(vmf);
Kirill A. Shutemove655fb22014-04-03 14:48:11 -07004090 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4091 return ret;
4092
Jan Kara9118c0c2016-12-14 15:07:21 -08004093 ret |= finish_fault(vmf);
Jan Kara936ca802016-12-14 15:07:10 -08004094 unlock_page(vmf->page);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004095 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
Jan Kara936ca802016-12-14 15:07:10 -08004096 put_page(vmf->page);
Kirill A. Shutemove655fb22014-04-03 14:48:11 -07004097 return ret;
4098}
4099
Souptick Joarder2b740302018-08-23 17:01:36 -07004100static vm_fault_t do_cow_fault(struct vm_fault *vmf)
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004101{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004102 struct vm_area_struct *vma = vmf->vma;
Souptick Joarder2b740302018-08-23 17:01:36 -07004103 vm_fault_t ret;
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004104
4105 if (unlikely(anon_vma_prepare(vma)))
4106 return VM_FAULT_OOM;
4107
Jan Kara936ca802016-12-14 15:07:10 -08004108 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4109 if (!vmf->cow_page)
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004110 return VM_FAULT_OOM;
4111
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07004112 if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
Jan Kara936ca802016-12-14 15:07:10 -08004113 put_page(vmf->cow_page);
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004114 return VM_FAULT_OOM;
4115 }
Johannes Weiner9d82c692020-06-03 16:02:04 -07004116 cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004117
Jan Kara936ca802016-12-14 15:07:10 -08004118 ret = __do_fault(vmf);
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004119 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4120 goto uncharge_out;
Jan Kara39170482016-12-14 15:07:18 -08004121 if (ret & VM_FAULT_DONE_COW)
4122 return ret;
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004123
Jan Karab1aa8122016-12-14 15:07:24 -08004124 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
Jan Kara936ca802016-12-14 15:07:10 -08004125 __SetPageUptodate(vmf->cow_page);
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004126
Jan Kara9118c0c2016-12-14 15:07:21 -08004127 ret |= finish_fault(vmf);
Jan Karab1aa8122016-12-14 15:07:24 -08004128 unlock_page(vmf->page);
4129 put_page(vmf->page);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004130 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4131 goto uncharge_out;
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004132 return ret;
4133uncharge_out:
Jan Kara936ca802016-12-14 15:07:10 -08004134 put_page(vmf->cow_page);
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004135 return ret;
4136}
4137
Souptick Joarder2b740302018-08-23 17:01:36 -07004138static vm_fault_t do_shared_fault(struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004139{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004140 struct vm_area_struct *vma = vmf->vma;
Souptick Joarder2b740302018-08-23 17:01:36 -07004141 vm_fault_t ret, tmp;
KAMEZAWA Hiroyuki1d65f862011-07-25 17:12:27 -07004142
Jan Kara936ca802016-12-14 15:07:10 -08004143 ret = __do_fault(vmf);
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07004144 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
Kirill A. Shutemovf0c6d4d2014-04-03 14:48:13 -07004145 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146
4147 /*
Kirill A. Shutemovf0c6d4d2014-04-03 14:48:13 -07004148 * Check if the backing address space wants to know that the page is
4149 * about to become writable
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150 */
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07004151 if (vma->vm_ops->page_mkwrite) {
Jan Kara936ca802016-12-14 15:07:10 -08004152 unlock_page(vmf->page);
Jan Kara38b8cb72016-12-14 15:07:30 -08004153 tmp = do_page_mkwrite(vmf);
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07004154 if (unlikely(!tmp ||
4155 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
Jan Kara936ca802016-12-14 15:07:10 -08004156 put_page(vmf->page);
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07004157 return tmp;
4158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159 }
4160
Jan Kara9118c0c2016-12-14 15:07:21 -08004161 ret |= finish_fault(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004162 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4163 VM_FAULT_RETRY))) {
Jan Kara936ca802016-12-14 15:07:10 -08004164 unlock_page(vmf->page);
4165 put_page(vmf->page);
Kirill A. Shutemovf0c6d4d2014-04-03 14:48:13 -07004166 return ret;
Peter Zijlstrad08b3852006-09-25 23:30:57 -07004167 }
Kirill A. Shutemovf0c6d4d2014-04-03 14:48:13 -07004168
Johannes Weiner89b15332019-11-30 17:50:22 -08004169 ret |= fault_dirty_shared_page(vmf);
KAMEZAWA Hiroyuki1d65f862011-07-25 17:12:27 -07004170 return ret;
Nick Piggin54cb8822007-07-19 01:46:59 -07004171}
Nick Piggind00806b2007-07-19 01:46:57 -07004172
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004173/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004174 * We enter with non-exclusive mmap_lock (to exclude vma changes,
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004175 * but allow concurrent faults).
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004176 * The mmap_lock may have been released depending on flags and our
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004177 * return value. See filemap_fault() and __lock_page_or_retry().
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004178 * If mmap_lock is released, vma may become invalid (for example
Jan Stancekfc8efd22019-03-05 15:50:08 -08004179 * by other thread calling munmap()).
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004180 */
Souptick Joarder2b740302018-08-23 17:01:36 -07004181static vm_fault_t do_fault(struct vm_fault *vmf)
Nick Piggin54cb8822007-07-19 01:46:59 -07004182{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004183 struct vm_area_struct *vma = vmf->vma;
Jan Stancekfc8efd22019-03-05 15:50:08 -08004184 struct mm_struct *vm_mm = vma->vm_mm;
Souptick Joarder2b740302018-08-23 17:01:36 -07004185 vm_fault_t ret;
Nick Piggin54cb8822007-07-19 01:46:59 -07004186
Aneesh Kumar K.Vff09d7e2018-10-26 15:09:01 -07004187 /*
4188 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4189 */
4190 if (!vma->vm_ops->fault) {
4191 /*
4192 * If we find a migration pmd entry or a none pmd entry, which
4193 * should never happen, return SIGBUS
4194 */
4195 if (unlikely(!pmd_present(*vmf->pmd)))
4196 ret = VM_FAULT_SIGBUS;
4197 else {
4198 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
4199 vmf->pmd,
4200 vmf->address,
4201 &vmf->ptl);
4202 /*
4203 * Make sure this is not a temporary clearing of pte
4204 * by holding ptl and checking again. A R/M/W update
4205 * of pte involves: take ptl, clearing the pte so that
4206 * we don't have concurrent modification by hardware
4207 * followed by an update.
4208 */
4209 if (unlikely(pte_none(*vmf->pte)))
4210 ret = VM_FAULT_SIGBUS;
4211 else
4212 ret = VM_FAULT_NOPAGE;
4213
4214 pte_unmap_unlock(vmf->pte, vmf->ptl);
4215 }
4216 } else if (!(vmf->flags & FAULT_FLAG_WRITE))
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08004217 ret = do_read_fault(vmf);
Laurent Dufour32507b62018-04-17 16:33:18 +02004218 else if (!(vmf->vma_flags & VM_SHARED))
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08004219 ret = do_cow_fault(vmf);
4220 else
4221 ret = do_shared_fault(vmf);
4222
4223 /* preallocated pagetable is unused: free it */
4224 if (vmf->prealloc_pte) {
Jan Stancekfc8efd22019-03-05 15:50:08 -08004225 pte_free(vm_mm, vmf->prealloc_pte);
Tobin C Harding7f2b6ce2017-02-24 14:58:59 -08004226 vmf->prealloc_pte = NULL;
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08004227 }
4228 return ret;
Nick Piggin54cb8822007-07-19 01:46:59 -07004229}
4230
Rashika Kheriab19a9932014-04-03 14:48:02 -07004231static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
Rik van Riel04bb2f92013-10-07 11:29:36 +01004232 unsigned long addr, int page_nid,
4233 int *flags)
Mel Gorman9532fec2012-11-15 01:24:32 +00004234{
4235 get_page(page);
4236
4237 count_vm_numa_event(NUMA_HINT_FAULTS);
Rik van Riel04bb2f92013-10-07 11:29:36 +01004238 if (page_nid == numa_node_id()) {
Mel Gorman9532fec2012-11-15 01:24:32 +00004239 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
Rik van Riel04bb2f92013-10-07 11:29:36 +01004240 *flags |= TNF_FAULT_LOCAL;
4241 }
Mel Gorman9532fec2012-11-15 01:24:32 +00004242
4243 return mpol_misplaced(page, vma, addr);
4244}
4245
Souptick Joarder2b740302018-08-23 17:01:36 -07004246static vm_fault_t do_numa_page(struct vm_fault *vmf)
Mel Gormand10e63f2012-10-25 14:16:31 +02004247{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004248 struct vm_area_struct *vma = vmf->vma;
Mel Gorman4daae3b2012-11-02 11:33:45 +00004249 struct page *page = NULL;
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08004250 int page_nid = NUMA_NO_NODE;
Peter Zijlstra90572892013-10-07 11:29:20 +01004251 int last_cpupid;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02004252 int target_nid;
Mel Gormanb8593bf2012-11-21 01:18:23 +00004253 bool migrated = false;
Aneesh Kumar K.V04a86452019-03-05 15:46:29 -08004254 pte_t pte, old_pte;
Aneesh Kumar K.V288bc542017-02-24 14:59:16 -08004255 bool was_writable = pte_savedwrite(vmf->orig_pte);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004256 int flags = 0;
Mel Gormand10e63f2012-10-25 14:16:31 +02004257
4258 /*
Tobin C Harding166f61b2017-02-24 14:59:01 -08004259 * The "pte" at this point cannot be used safely without
4260 * validation through pte_unmap_same(). It's of NUMA type but
4261 * the pfn may be screwed if the read is non atomic.
Tobin C Harding166f61b2017-02-24 14:59:01 -08004262 */
Laurent Dufourb23ffc12018-04-17 16:33:11 +02004263 if (!pte_spinlock(vmf))
4264 return VM_FAULT_RETRY;
Aneesh Kumar K.Vcee216a2017-02-24 14:59:13 -08004265 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004266 pte_unmap_unlock(vmf->pte, vmf->ptl);
Mel Gorman4daae3b2012-11-02 11:33:45 +00004267 goto out;
4268 }
4269
Aneesh Kumar K.Vcee216a2017-02-24 14:59:13 -08004270 /*
4271 * Make it present again, Depending on how arch implementes non
4272 * accessible ptes, some can allow access by kernel mode.
4273 */
Aneesh Kumar K.V04a86452019-03-05 15:46:29 -08004274 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
Laurent Dufour32507b62018-04-17 16:33:18 +02004275 pte = pte_modify(old_pte, vmf->vma_page_prot);
Mel Gorman4d942462015-02-12 14:58:28 -08004276 pte = pte_mkyoung(pte);
Mel Gormanb191f9b2015-03-25 15:55:40 -07004277 if (was_writable)
4278 pte = pte_mkwrite(pte);
Aneesh Kumar K.V04a86452019-03-05 15:46:29 -08004279 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004280 update_mmu_cache(vma, vmf->address, vmf->pte);
Mel Gormand10e63f2012-10-25 14:16:31 +02004281
Jan Kara82b0f8c2016-12-14 15:06:58 -08004282 page = vm_normal_page(vma, vmf->address, pte);
Mel Gormand10e63f2012-10-25 14:16:31 +02004283 if (!page) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004284 pte_unmap_unlock(vmf->pte, vmf->ptl);
Mel Gormand10e63f2012-10-25 14:16:31 +02004285 return 0;
4286 }
4287
Kirill A. Shutemove81c4802016-01-15 16:53:49 -08004288 /* TODO: handle PTE-mapped THP */
4289 if (PageCompound(page)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004290 pte_unmap_unlock(vmf->pte, vmf->ptl);
Kirill A. Shutemove81c4802016-01-15 16:53:49 -08004291 return 0;
4292 }
4293
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004294 /*
Mel Gormanbea66fb2015-03-25 15:55:37 -07004295 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4296 * much anyway since they can be in shared cache state. This misses
4297 * the case where a mapping is writable but the process never writes
4298 * to it but pte_write gets cleared during protection updates and
4299 * pte_dirty has unpredictable behaviour between PTE scan updates,
4300 * background writeback, dirty balancing and application behaviour.
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004301 */
Rik van Rield59dc7b2016-09-08 21:30:53 -04004302 if (!pte_write(pte))
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004303 flags |= TNF_NO_GROUP;
4304
Rik van Rieldabe1d92013-10-07 11:29:34 +01004305 /*
4306 * Flag if the page is shared between multiple address spaces. This
4307 * is later used when determining whether to group tasks together
4308 */
Laurent Dufour32507b62018-04-17 16:33:18 +02004309 if (page_mapcount(page) > 1 && (vmf->vma_flags & VM_SHARED))
Rik van Rieldabe1d92013-10-07 11:29:34 +01004310 flags |= TNF_SHARED;
4311
Peter Zijlstra90572892013-10-07 11:29:20 +01004312 last_cpupid = page_cpupid_last(page);
Mel Gorman8191acb2013-10-07 11:28:45 +01004313 page_nid = page_to_nid(page);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004314 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004315 &flags);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004316 pte_unmap_unlock(vmf->pte, vmf->ptl);
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08004317 if (target_nid == NUMA_NO_NODE) {
Mel Gorman4daae3b2012-11-02 11:33:45 +00004318 put_page(page);
4319 goto out;
4320 }
4321
4322 /* Migrate to the requested node */
Mel Gorman1bc115d2013-10-07 11:29:05 +01004323 migrated = migrate_misplaced_page(page, vma, target_nid);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004324 if (migrated) {
Mel Gorman8191acb2013-10-07 11:28:45 +01004325 page_nid = target_nid;
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004326 flags |= TNF_MIGRATED;
Mel Gorman074c2382015-03-25 15:55:42 -07004327 } else
4328 flags |= TNF_MIGRATE_FAIL;
Mel Gorman4daae3b2012-11-02 11:33:45 +00004329
4330out:
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08004331 if (page_nid != NUMA_NO_NODE)
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004332 task_numa_fault(last_cpupid, page_nid, 1, flags);
Mel Gormand10e63f2012-10-25 14:16:31 +02004333 return 0;
4334}
4335
Souptick Joarder2b740302018-08-23 17:01:36 -07004336static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
Matthew Wilcoxb96375f2015-09-08 14:58:48 -07004337{
Dave Jiangf4200392017-02-22 15:40:06 -08004338 if (vma_is_anonymous(vmf->vma))
Jan Kara82b0f8c2016-12-14 15:06:58 -08004339 return do_huge_pmd_anonymous_page(vmf);
Dave Jianga2d58162017-02-24 14:56:59 -08004340 if (vmf->vma->vm_ops->huge_fault)
Dave Jiangc791ace2017-02-24 14:57:08 -08004341 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
Matthew Wilcoxb96375f2015-09-08 14:58:48 -07004342 return VM_FAULT_FALLBACK;
4343}
4344
Geert Uytterhoeven183f24a2017-12-14 15:32:52 -08004345/* `inline' is required to avoid gcc 4.1.2 build error */
Souptick Joarder2b740302018-08-23 17:01:36 -07004346static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
Matthew Wilcoxb96375f2015-09-08 14:58:48 -07004347{
Andrea Arcangeli529b9302020-04-06 20:05:29 -07004348 if (vma_is_anonymous(vmf->vma)) {
Peter Xu292924b2020-04-06 20:05:49 -07004349 if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd))
Andrea Arcangeli529b9302020-04-06 20:05:29 -07004350 return handle_userfault(vmf, VM_UFFD_WP);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004351 return do_huge_pmd_wp_page(vmf, orig_pmd);
Andrea Arcangeli529b9302020-04-06 20:05:29 -07004352 }
Thomas Hellstrom (VMware)327e9fd2020-03-24 18:47:47 +01004353 if (vmf->vma->vm_ops->huge_fault) {
4354 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
Kirill A. Shutemovaf9e4d52016-07-26 15:25:40 -07004355
Thomas Hellstrom (VMware)327e9fd2020-03-24 18:47:47 +01004356 if (!(ret & VM_FAULT_FALLBACK))
4357 return ret;
4358 }
4359
4360 /* COW or write-notify handled on pte level: split pmd. */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004361 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
Kirill A. Shutemovaf9e4d52016-07-26 15:25:40 -07004362
Matthew Wilcoxb96375f2015-09-08 14:58:48 -07004363 return VM_FAULT_FALLBACK;
4364}
4365
Souptick Joarder2b740302018-08-23 17:01:36 -07004366static vm_fault_t create_huge_pud(struct vm_fault *vmf)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004367{
Thomas Hellstrom (VMware)327e9fd2020-03-24 18:47:47 +01004368#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
4369 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004370 /* No support for anonymous transparent PUD pages yet */
4371 if (vma_is_anonymous(vmf->vma))
Thomas Hellstrom (VMware)327e9fd2020-03-24 18:47:47 +01004372 goto split;
4373 if (vmf->vma->vm_ops->huge_fault) {
4374 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4375
4376 if (!(ret & VM_FAULT_FALLBACK))
4377 return ret;
4378 }
4379split:
4380 /* COW or write-notify not handled on PUD level: split pud.*/
4381 __split_huge_pud(vmf->vma, vmf->pud, vmf->address);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004382#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4383 return VM_FAULT_FALLBACK;
4384}
4385
Souptick Joarder2b740302018-08-23 17:01:36 -07004386static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004387{
4388#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4389 /* No support for anonymous transparent PUD pages yet */
4390 if (vma_is_anonymous(vmf->vma))
4391 return VM_FAULT_FALLBACK;
4392 if (vmf->vma->vm_ops->huge_fault)
Dave Jiangc791ace2017-02-24 14:57:08 -08004393 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004394#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4395 return VM_FAULT_FALLBACK;
4396}
4397
Linus Torvalds1da177e2005-04-16 15:20:36 -07004398/*
4399 * These routines also need to handle stuff like marking pages dirty
4400 * and/or accessed for architectures that don't do it in hardware (most
4401 * RISC architectures). The early dirtying is also good on the i386.
4402 *
4403 * There is also a hook called "update_mmu_cache()" that architectures
4404 * with external mmu caches can use to update those (ie the Sparc or
4405 * PowerPC hashed page tables that act as extended TLBs).
4406 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004407 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004408 * concurrent faults).
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004409 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004410 * The mmap_lock may have been released depending on flags and our return value.
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004411 * See filemap_fault() and __lock_page_or_retry().
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412 */
Souptick Joarder2b740302018-08-23 17:01:36 -07004413static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414{
4415 pte_t entry;
4416
Jan Kara82b0f8c2016-12-14 15:06:58 -08004417 if (unlikely(pmd_none(*vmf->pmd))) {
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004418 /*
4419 * Leave __pte_alloc() until later: because vm_ops->fault may
4420 * want to allocate huge page, and if we expose page table
4421 * for an instant, it will be difficult to retract from
4422 * concurrent faults and from rmap lookups.
4423 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004424 vmf->pte = NULL;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004425 } else {
4426 /* See comment in pte_alloc_one_map() */
Ross Zwislerd0f0931d2017-06-02 14:46:34 -07004427 if (pmd_devmap_trans_unstable(vmf->pmd))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004428 return 0;
4429 /*
4430 * A regular pmd is established and it can't morph into a huge
4431 * pmd from under us anymore at this point because we hold the
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004432 * mmap_lock read mode and khugepaged takes it in write mode.
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004433 * So now it's safe to run pte_offset_map().
4434 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004435 vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
Jan Kara29943022016-12-14 15:07:16 -08004436 vmf->orig_pte = *vmf->pte;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004437
4438 /*
4439 * some architectures can have larger ptes than wordsize,
4440 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
Paul E. McKenneyb03a0fe2017-10-23 14:07:25 -07004441 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
4442 * accesses. The code below just needs a consistent view
4443 * for the ifs and we later double check anyway with the
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004444 * ptl lock held. So here a barrier will do.
4445 */
4446 barrier();
Jan Kara29943022016-12-14 15:07:16 -08004447 if (pte_none(vmf->orig_pte)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004448 pte_unmap(vmf->pte);
4449 vmf->pte = NULL;
Hugh Dickins65500d22005-10-29 18:15:59 -07004450 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451 }
4452
Jan Kara82b0f8c2016-12-14 15:06:58 -08004453 if (!vmf->pte) {
4454 if (vma_is_anonymous(vmf->vma))
4455 return do_anonymous_page(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004456 else
Jan Kara82b0f8c2016-12-14 15:06:58 -08004457 return do_fault(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004458 }
4459
Jan Kara29943022016-12-14 15:07:16 -08004460 if (!pte_present(vmf->orig_pte))
4461 return do_swap_page(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004462
Jan Kara29943022016-12-14 15:07:16 -08004463 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4464 return do_numa_page(vmf);
Mel Gormand10e63f2012-10-25 14:16:31 +02004465
Laurent Dufourb23ffc12018-04-17 16:33:11 +02004466 if (!pte_spinlock(vmf))
4467 return VM_FAULT_RETRY;
Jan Kara29943022016-12-14 15:07:16 -08004468 entry = vmf->orig_pte;
Bibo Mao7df67692020-05-27 10:25:18 +08004469 if (unlikely(!pte_same(*vmf->pte, entry))) {
4470 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
Hugh Dickins8f4e2102005-10-29 18:16:26 -07004471 goto unlock;
Bibo Mao7df67692020-05-27 10:25:18 +08004472 }
Jan Kara82b0f8c2016-12-14 15:06:58 -08004473 if (vmf->flags & FAULT_FLAG_WRITE) {
Linus Torvaldsf6f37322017-12-15 18:53:22 -08004474 if (!pte_write(entry))
Jan Kara29943022016-12-14 15:07:16 -08004475 return do_wp_page(vmf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 entry = pte_mkdirty(entry);
4477 }
4478 entry = pte_mkyoung(entry);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004479 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4480 vmf->flags & FAULT_FLAG_WRITE)) {
4481 update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
Andrea Arcangeli1a44e142005-10-29 18:16:48 -07004482 } else {
Yang Shib7333b52020-08-14 21:30:41 -07004483 /* Skip spurious TLB flush for retried page fault */
4484 if (vmf->flags & FAULT_FLAG_TRIED)
4485 goto unlock;
Andrea Arcangeli1a44e142005-10-29 18:16:48 -07004486 /*
4487 * This is needed only for protection faults but the arch code
4488 * is not yet telling us if this is a protection fault or not.
4489 * This still avoids useless tlb flushes for .text page faults
4490 * with threads.
4491 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004492 if (vmf->flags & FAULT_FLAG_WRITE)
4493 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
Andrea Arcangeli1a44e142005-10-29 18:16:48 -07004494 }
Hugh Dickins8f4e2102005-10-29 18:16:26 -07004495unlock:
Jan Kara82b0f8c2016-12-14 15:06:58 -08004496 pte_unmap_unlock(vmf->pte, vmf->ptl);
Nick Piggin83c54072007-07-19 01:47:05 -07004497 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498}
4499
4500/*
4501 * By the time we get here, we already hold the mm semaphore
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004502 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004503 * The mmap_lock may have been released depending on flags and our
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004504 * return value. See filemap_fault() and __lock_page_or_retry().
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505 */
Souptick Joarder2b740302018-08-23 17:01:36 -07004506static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
4507 unsigned long address, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004509 struct vm_fault vmf = {
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004510 .vma = vma,
Jan Kara1a29d852016-12-14 15:07:01 -08004511 .address = address & PAGE_MASK,
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004512 .flags = flags,
Jan Kara0721ec82016-12-14 15:07:04 -08004513 .pgoff = linear_page_index(vma, address),
Jan Kara667240e2016-12-14 15:07:07 -08004514 .gfp_mask = __get_fault_gfp_mask(vma),
Laurent Dufour32507b62018-04-17 16:33:18 +02004515 .vma_flags = vma->vm_flags,
4516 .vma_page_prot = vma->vm_page_prot,
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004517 };
Anshuman Khandualfde26be2017-09-08 16:12:45 -07004518 unsigned int dirty = flags & FAULT_FLAG_WRITE;
Kirill A. Shutemovdcddffd2016-07-26 15:25:18 -07004519 struct mm_struct *mm = vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004521 p4d_t *p4d;
Souptick Joarder2b740302018-08-23 17:01:36 -07004522 vm_fault_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524 pgd = pgd_offset(mm, address);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004525 p4d = p4d_alloc(mm, pgd, address);
4526 if (!p4d)
4527 return VM_FAULT_OOM;
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004528
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004529 vmf.pud = pud_alloc(mm, p4d, address);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004530 if (!vmf.pud)
Hugh Dickinsc74df322005-10-29 18:16:23 -07004531 return VM_FAULT_OOM;
Thomas Hellstrom625110b2019-11-30 17:51:32 -08004532retry_pud:
Michal Hocko7635d9c2018-12-28 00:38:21 -08004533 if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004534 ret = create_huge_pud(&vmf);
4535 if (!(ret & VM_FAULT_FALLBACK))
4536 return ret;
4537 } else {
4538 pud_t orig_pud = *vmf.pud;
4539
4540 barrier();
4541 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004542
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004543 /* NUMA case for anonymous PUDs would go here */
4544
Linus Torvaldsf6f37322017-12-15 18:53:22 -08004545 if (dirty && !pud_write(orig_pud)) {
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004546 ret = wp_huge_pud(&vmf, orig_pud);
4547 if (!(ret & VM_FAULT_FALLBACK))
4548 return ret;
4549 } else {
4550 huge_pud_set_accessed(&vmf, orig_pud);
4551 return 0;
4552 }
4553 }
4554 }
4555
4556 vmf.pmd = pmd_alloc(mm, vmf.pud, address);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004557 if (!vmf.pmd)
Hugh Dickinsc74df322005-10-29 18:16:23 -07004558 return VM_FAULT_OOM;
Thomas Hellstrom625110b2019-11-30 17:51:32 -08004559
4560 /* Huge pud page fault raced with pmd_alloc? */
4561 if (pud_trans_unstable(vmf.pud))
4562 goto retry_pud;
4563
Michal Hocko7635d9c2018-12-28 00:38:21 -08004564 if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
Dave Jianga2d58162017-02-24 14:56:59 -08004565 ret = create_huge_pmd(&vmf);
Kirill A. Shutemovc0292552013-09-12 15:14:05 -07004566 if (!(ret & VM_FAULT_FALLBACK))
4567 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08004568 } else {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004569 pmd_t orig_pmd = *vmf.pmd;
David Rientjes1f1d06c2012-05-29 15:06:23 -07004570
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08004571 barrier();
Zi Yan84c3fc42017-09-08 16:11:01 -07004572 if (unlikely(is_swap_pmd(orig_pmd))) {
4573 VM_BUG_ON(thp_migration_supported() &&
4574 !is_pmd_migration_entry(orig_pmd));
4575 if (is_pmd_migration_entry(orig_pmd))
4576 pmd_migration_entry_wait(mm, vmf.pmd);
4577 return 0;
4578 }
Dan Williams5c7fb562016-01-15 16:56:52 -08004579 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
Lorenzo Stoakes38e08852016-09-11 23:54:25 +01004580 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
Jan Kara82b0f8c2016-12-14 15:06:58 -08004581 return do_huge_pmd_numa_page(&vmf, orig_pmd);
Mel Gormand10e63f2012-10-25 14:16:31 +02004582
Linus Torvaldsf6f37322017-12-15 18:53:22 -08004583 if (dirty && !pmd_write(orig_pmd)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004584 ret = wp_huge_pmd(&vmf, orig_pmd);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08004585 if (!(ret & VM_FAULT_FALLBACK))
4586 return ret;
Will Deacona1dd4502012-12-11 16:01:27 -08004587 } else {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004588 huge_pmd_set_accessed(&vmf, orig_pmd);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08004589 return 0;
David Rientjes1f1d06c2012-05-29 15:06:23 -07004590 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08004591 }
4592 }
4593
Jan Kara82b0f8c2016-12-14 15:06:58 -08004594 return handle_pte_fault(&vmf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004595}
4596
Peter Xubce617e2020-08-11 18:37:44 -07004597/**
4598 * mm_account_fault - Do page fault accountings
4599 *
4600 * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting
4601 * of perf event counters, but we'll still do the per-task accounting to
4602 * the task who triggered this page fault.
4603 * @address: the faulted address.
4604 * @flags: the fault flags.
4605 * @ret: the fault retcode.
4606 *
4607 * This will take care of most of the page fault accountings. Meanwhile, it
4608 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
4609 * updates. However note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
4610 * still be in per-arch page fault handlers at the entry of page fault.
4611 */
4612static inline void mm_account_fault(struct pt_regs *regs,
4613 unsigned long address, unsigned int flags,
4614 vm_fault_t ret)
4615{
4616 bool major;
4617
4618 /*
4619 * We don't do accounting for some specific faults:
4620 *
4621 * - Unsuccessful faults (e.g. when the address wasn't valid). That
4622 * includes arch_vma_access_permitted() failing before reaching here.
4623 * So this is not a "this many hardware page faults" counter. We
4624 * should use the hw profiling for that.
4625 *
4626 * - Incomplete faults (VM_FAULT_RETRY). They will only be counted
4627 * once they're completed.
4628 */
4629 if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY))
4630 return;
4631
4632 /*
4633 * We define the fault as a major fault when the final successful fault
4634 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
4635 * handle it immediately previously).
4636 */
4637 major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
4638
Peter Xua2beb5f2020-08-11 18:38:57 -07004639 if (major)
4640 current->maj_flt++;
4641 else
4642 current->min_flt++;
4643
Peter Xubce617e2020-08-11 18:37:44 -07004644 /*
Peter Xua2beb5f2020-08-11 18:38:57 -07004645 * If the fault is done for GUP, regs will be NULL. We only do the
4646 * accounting for the per thread fault counters who triggered the
4647 * fault, and we skip the perf event updates.
Peter Xubce617e2020-08-11 18:37:44 -07004648 */
4649 if (!regs)
4650 return;
4651
Peter Xua2beb5f2020-08-11 18:38:57 -07004652 if (major)
Peter Xubce617e2020-08-11 18:37:44 -07004653 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
Peter Xua2beb5f2020-08-11 18:38:57 -07004654 else
Peter Xubce617e2020-08-11 18:37:44 -07004655 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
Peter Xubce617e2020-08-11 18:37:44 -07004656}
4657
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004658/*
4659 * By the time we get here, we already hold the mm semaphore
4660 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004661 * The mmap_lock may have been released depending on flags and our
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004662 * return value. See filemap_fault() and __lock_page_or_retry().
4663 */
Souptick Joarder2b740302018-08-23 17:01:36 -07004664vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
Peter Xubce617e2020-08-11 18:37:44 -07004665 unsigned int flags, struct pt_regs *regs)
Johannes Weiner519e5242013-09-12 15:13:42 -07004666{
Souptick Joarder2b740302018-08-23 17:01:36 -07004667 vm_fault_t ret;
Johannes Weiner519e5242013-09-12 15:13:42 -07004668
4669 __set_current_state(TASK_RUNNING);
4670
4671 count_vm_event(PGFAULT);
Roman Gushchin22621852017-07-06 15:40:25 -07004672 count_memcg_event_mm(vma->vm_mm, PGFAULT);
Johannes Weiner519e5242013-09-12 15:13:42 -07004673
4674 /* do counter updates before entering really critical section. */
4675 check_sync_rss_stat(current);
4676
Laurent Dufourde0c7992017-09-08 16:13:12 -07004677 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
4678 flags & FAULT_FLAG_INSTRUCTION,
4679 flags & FAULT_FLAG_REMOTE))
4680 return VM_FAULT_SIGSEGV;
4681
Johannes Weiner519e5242013-09-12 15:13:42 -07004682 /*
4683 * Enable the memcg OOM handling for faults triggered in user
4684 * space. Kernel faults are handled more gracefully.
4685 */
4686 if (flags & FAULT_FLAG_USER)
Michal Hocko29ef6802018-08-17 15:47:11 -07004687 mem_cgroup_enter_user_fault();
Johannes Weiner519e5242013-09-12 15:13:42 -07004688
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004689 if (unlikely(is_vm_hugetlb_page(vma)))
4690 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
4691 else
4692 ret = __handle_mm_fault(vma, address, flags);
Johannes Weiner519e5242013-09-12 15:13:42 -07004693
Johannes Weiner49426422013-10-16 13:46:59 -07004694 if (flags & FAULT_FLAG_USER) {
Michal Hocko29ef6802018-08-17 15:47:11 -07004695 mem_cgroup_exit_user_fault();
Tobin C Harding166f61b2017-02-24 14:59:01 -08004696 /*
4697 * The task may have entered a memcg OOM situation but
4698 * if the allocation error was handled gracefully (no
4699 * VM_FAULT_OOM), there is no need to kill anything.
4700 * Just clean up the OOM state peacefully.
4701 */
4702 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
4703 mem_cgroup_oom_synchronize(false);
Johannes Weiner49426422013-10-16 13:46:59 -07004704 }
Johannes Weiner3812c8c2013-09-12 15:13:44 -07004705
Peter Xubce617e2020-08-11 18:37:44 -07004706 mm_account_fault(regs, address, flags, ret);
4707
Johannes Weiner519e5242013-09-12 15:13:42 -07004708 return ret;
4709}
Jesse Barnese1d6d012014-12-12 16:55:27 -08004710EXPORT_SYMBOL_GPL(handle_mm_fault);
Johannes Weiner519e5242013-09-12 15:13:42 -07004711
Kirill A. Shutemov90eceff2017-03-09 17:24:08 +03004712#ifndef __PAGETABLE_P4D_FOLDED
4713/*
4714 * Allocate p4d page table.
4715 * We've already handled the fast-path in-line.
4716 */
4717int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
4718{
4719 p4d_t *new = p4d_alloc_one(mm, address);
4720 if (!new)
4721 return -ENOMEM;
4722
4723 smp_wmb(); /* See comment in __pte_alloc */
4724
4725 spin_lock(&mm->page_table_lock);
4726 if (pgd_present(*pgd)) /* Another has populated it */
4727 p4d_free(mm, new);
4728 else
4729 pgd_populate(mm, pgd, new);
4730 spin_unlock(&mm->page_table_lock);
4731 return 0;
4732}
4733#endif /* __PAGETABLE_P4D_FOLDED */
4734
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735#ifndef __PAGETABLE_PUD_FOLDED
4736/*
4737 * Allocate page upper directory.
Hugh Dickins872fec12005-10-29 18:16:21 -07004738 * We've already handled the fast-path in-line.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739 */
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004740int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741{
Hugh Dickinsc74df322005-10-29 18:16:23 -07004742 pud_t *new = pud_alloc_one(mm, address);
4743 if (!new)
Hugh Dickins1bb36302005-10-29 18:16:22 -07004744 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745
Nick Piggin362a61a2008-05-14 06:37:36 +02004746 smp_wmb(); /* See comment in __pte_alloc */
4747
Hugh Dickins872fec12005-10-29 18:16:21 -07004748 spin_lock(&mm->page_table_lock);
Kirill A. Shutemovb4e98d92017-11-15 17:35:33 -08004749 if (!p4d_present(*p4d)) {
4750 mm_inc_nr_puds(mm);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004751 p4d_populate(mm, p4d, new);
Kirill A. Shutemovb4e98d92017-11-15 17:35:33 -08004752 } else /* Another has populated it */
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004753 pud_free(mm, new);
Hugh Dickinsc74df322005-10-29 18:16:23 -07004754 spin_unlock(&mm->page_table_lock);
Hugh Dickins1bb36302005-10-29 18:16:22 -07004755 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004756}
4757#endif /* __PAGETABLE_PUD_FOLDED */
4758
4759#ifndef __PAGETABLE_PMD_FOLDED
4760/*
4761 * Allocate page middle directory.
Hugh Dickins872fec12005-10-29 18:16:21 -07004762 * We've already handled the fast-path in-line.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004763 */
Hugh Dickins1bb36302005-10-29 18:16:22 -07004764int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004765{
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004766 spinlock_t *ptl;
Hugh Dickinsc74df322005-10-29 18:16:23 -07004767 pmd_t *new = pmd_alloc_one(mm, address);
4768 if (!new)
Hugh Dickins1bb36302005-10-29 18:16:22 -07004769 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004770
Nick Piggin362a61a2008-05-14 06:37:36 +02004771 smp_wmb(); /* See comment in __pte_alloc */
4772
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004773 ptl = pud_lock(mm, pud);
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -08004774 if (!pud_present(*pud)) {
4775 mm_inc_nr_pmds(mm);
Hugh Dickins1bb36302005-10-29 18:16:22 -07004776 pud_populate(mm, pud, new);
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -08004777 } else /* Another has populated it */
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -08004778 pmd_free(mm, new);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004779 spin_unlock(ptl);
Hugh Dickins1bb36302005-10-29 18:16:22 -07004780 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004781}
4782#endif /* __PAGETABLE_PMD_FOLDED */
4783
Ross Zwisler09796392017-01-10 16:57:21 -08004784static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004785 struct mmu_notifier_range *range,
Jérôme Glissea4d1a882017-08-31 17:17:26 -04004786 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004787{
4788 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004789 p4d_t *p4d;
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004790 pud_t *pud;
4791 pmd_t *pmd;
4792 pte_t *ptep;
4793
4794 pgd = pgd_offset(mm, address);
4795 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
4796 goto out;
4797
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004798 p4d = p4d_offset(pgd, address);
4799 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
4800 goto out;
4801
4802 pud = pud_offset(p4d, address);
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004803 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
4804 goto out;
4805
4806 pmd = pmd_offset(pud, address);
Andrea Arcangelif66055ab2011-01-13 15:46:54 -08004807 VM_BUG_ON(pmd_trans_huge(*pmd));
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004808
Ross Zwisler09796392017-01-10 16:57:21 -08004809 if (pmd_huge(*pmd)) {
4810 if (!pmdpp)
4811 goto out;
4812
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004813 if (range) {
Jérôme Glisse7269f992019-05-13 17:20:53 -07004814 mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07004815 NULL, mm, address & PMD_MASK,
4816 (address & PMD_MASK) + PMD_SIZE);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004817 mmu_notifier_invalidate_range_start(range);
Jérôme Glissea4d1a882017-08-31 17:17:26 -04004818 }
Ross Zwisler09796392017-01-10 16:57:21 -08004819 *ptlp = pmd_lock(mm, pmd);
4820 if (pmd_huge(*pmd)) {
4821 *pmdpp = pmd;
4822 return 0;
4823 }
4824 spin_unlock(*ptlp);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004825 if (range)
4826 mmu_notifier_invalidate_range_end(range);
Ross Zwisler09796392017-01-10 16:57:21 -08004827 }
4828
4829 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004830 goto out;
4831
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004832 if (range) {
Jérôme Glisse7269f992019-05-13 17:20:53 -07004833 mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07004834 address & PAGE_MASK,
4835 (address & PAGE_MASK) + PAGE_SIZE);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004836 mmu_notifier_invalidate_range_start(range);
Jérôme Glissea4d1a882017-08-31 17:17:26 -04004837 }
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004838 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004839 if (!pte_present(*ptep))
4840 goto unlock;
4841 *ptepp = ptep;
4842 return 0;
4843unlock:
4844 pte_unmap_unlock(ptep, *ptlp);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004845 if (range)
4846 mmu_notifier_invalidate_range_end(range);
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004847out:
4848 return -EINVAL;
4849}
4850
Ross Zwislerf729c8c2017-01-10 16:57:24 -08004851static inline int follow_pte(struct mm_struct *mm, unsigned long address,
4852 pte_t **ptepp, spinlock_t **ptlp)
Namhyung Kim1b36ba82010-10-26 14:22:00 -07004853{
4854 int res;
4855
4856 /* (void) is needed to make gcc happy */
4857 (void) __cond_lock(*ptlp,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004858 !(res = __follow_pte_pmd(mm, address, NULL,
Jérôme Glissea4d1a882017-08-31 17:17:26 -04004859 ptepp, NULL, ptlp)));
Namhyung Kim1b36ba82010-10-26 14:22:00 -07004860 return res;
4861}
4862
Ross Zwisler09796392017-01-10 16:57:21 -08004863int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004864 struct mmu_notifier_range *range,
4865 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
Ross Zwisler09796392017-01-10 16:57:21 -08004866{
4867 int res;
4868
4869 /* (void) is needed to make gcc happy */
4870 (void) __cond_lock(*ptlp,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004871 !(res = __follow_pte_pmd(mm, address, range,
Jérôme Glissea4d1a882017-08-31 17:17:26 -04004872 ptepp, pmdpp, ptlp)));
Ross Zwisler09796392017-01-10 16:57:21 -08004873 return res;
4874}
4875EXPORT_SYMBOL(follow_pte_pmd);
4876
Johannes Weiner3b6748e2009-06-16 15:32:35 -07004877/**
4878 * follow_pfn - look up PFN at a user virtual address
4879 * @vma: memory mapping
4880 * @address: user virtual address
4881 * @pfn: location to store found PFN
4882 *
4883 * Only IO mappings and raw PFN mappings are allowed.
4884 *
Mike Rapoporta862f682019-03-05 15:48:42 -08004885 * Return: zero and the pfn at @pfn on success, -ve otherwise.
Johannes Weiner3b6748e2009-06-16 15:32:35 -07004886 */
4887int follow_pfn(struct vm_area_struct *vma, unsigned long address,
4888 unsigned long *pfn)
4889{
4890 int ret = -EINVAL;
4891 spinlock_t *ptl;
4892 pte_t *ptep;
4893
4894 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4895 return ret;
4896
4897 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
4898 if (ret)
4899 return ret;
4900 *pfn = pte_pfn(*ptep);
4901 pte_unmap_unlock(ptep, ptl);
4902 return 0;
4903}
4904EXPORT_SYMBOL(follow_pfn);
4905
Rik van Riel28b2ee22008-07-23 21:27:05 -07004906#ifdef CONFIG_HAVE_IOREMAP_PROT
venkatesh.pallipadi@intel.comd87fe662008-12-19 13:47:27 -08004907int follow_phys(struct vm_area_struct *vma,
4908 unsigned long address, unsigned int flags,
4909 unsigned long *prot, resource_size_t *phys)
Rik van Riel28b2ee22008-07-23 21:27:05 -07004910{
Johannes Weiner03668a42009-06-16 15:32:34 -07004911 int ret = -EINVAL;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004912 pte_t *ptep, pte;
4913 spinlock_t *ptl;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004914
venkatesh.pallipadi@intel.comd87fe662008-12-19 13:47:27 -08004915 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4916 goto out;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004917
Johannes Weiner03668a42009-06-16 15:32:34 -07004918 if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
venkatesh.pallipadi@intel.comd87fe662008-12-19 13:47:27 -08004919 goto out;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004920 pte = *ptep;
Johannes Weiner03668a42009-06-16 15:32:34 -07004921
Linus Torvaldsf6f37322017-12-15 18:53:22 -08004922 if ((flags & FOLL_WRITE) && !pte_write(pte))
Rik van Riel28b2ee22008-07-23 21:27:05 -07004923 goto unlock;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004924
4925 *prot = pgprot_val(pte_pgprot(pte));
Johannes Weiner03668a42009-06-16 15:32:34 -07004926 *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004927
Johannes Weiner03668a42009-06-16 15:32:34 -07004928 ret = 0;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004929unlock:
4930 pte_unmap_unlock(ptep, ptl);
4931out:
venkatesh.pallipadi@intel.comd87fe662008-12-19 13:47:27 -08004932 return ret;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004933}
4934
4935int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
4936 void *buf, int len, int write)
4937{
4938 resource_size_t phys_addr;
4939 unsigned long prot = 0;
KOSAKI Motohiro2bc72732009-01-06 14:39:43 -08004940 void __iomem *maddr;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004941 int offset = addr & (PAGE_SIZE-1);
4942
venkatesh.pallipadi@intel.comd87fe662008-12-19 13:47:27 -08004943 if (follow_phys(vma, addr, write, &prot, &phys_addr))
Rik van Riel28b2ee22008-07-23 21:27:05 -07004944 return -EINVAL;
4945
Grazvydas Ignotas9cb12d72015-02-12 15:00:19 -08004946 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
jie@chenjie6@huwei.com24eee1e2018-08-10 17:23:06 -07004947 if (!maddr)
4948 return -ENOMEM;
4949
Rik van Riel28b2ee22008-07-23 21:27:05 -07004950 if (write)
4951 memcpy_toio(maddr + offset, buf, len);
4952 else
4953 memcpy_fromio(buf, maddr + offset, len);
4954 iounmap(maddr);
4955
4956 return len;
4957}
Uwe Kleine-König5a736332013-08-07 13:02:52 +02004958EXPORT_SYMBOL_GPL(generic_access_phys);
Rik van Riel28b2ee22008-07-23 21:27:05 -07004959#endif
4960
David Howells0ec76a12006-09-27 01:50:15 -07004961/*
Stephen Wilson206cb632011-03-13 15:49:19 -04004962 * Access another process' address space as given in mm. If non-NULL, use the
4963 * given task for page fault accounting.
David Howells0ec76a12006-09-27 01:50:15 -07004964 */
Eric W. Biederman84d77d32016-11-22 12:06:50 -06004965int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
Lorenzo Stoakes442486e2016-10-13 01:20:18 +01004966 unsigned long addr, void *buf, int len, unsigned int gup_flags)
David Howells0ec76a12006-09-27 01:50:15 -07004967{
David Howells0ec76a12006-09-27 01:50:15 -07004968 struct vm_area_struct *vma;
David Howells0ec76a12006-09-27 01:50:15 -07004969 void *old_buf = buf;
Lorenzo Stoakes442486e2016-10-13 01:20:18 +01004970 int write = gup_flags & FOLL_WRITE;
David Howells0ec76a12006-09-27 01:50:15 -07004971
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07004972 if (mmap_read_lock_killable(mm))
Konstantin Khlebnikov1e426fe2019-07-11 21:00:07 -07004973 return 0;
4974
Simon Arlott183ff222007-10-20 01:27:18 +02004975 /* ignore errors, just check how much was successfully transferred */
David Howells0ec76a12006-09-27 01:50:15 -07004976 while (len) {
4977 int bytes, ret, offset;
4978 void *maddr;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004979 struct page *page = NULL;
David Howells0ec76a12006-09-27 01:50:15 -07004980
Peter Xu64019a22020-08-11 18:39:01 -07004981 ret = get_user_pages_remote(mm, addr, 1,
Lorenzo Stoakes5b56d492016-12-14 15:06:52 -08004982 gup_flags, &page, &vma, NULL);
Rik van Riel28b2ee22008-07-23 21:27:05 -07004983 if (ret <= 0) {
Rik van Rieldbffcd02014-08-06 16:08:12 -07004984#ifndef CONFIG_HAVE_IOREMAP_PROT
4985 break;
4986#else
Rik van Riel28b2ee22008-07-23 21:27:05 -07004987 /*
4988 * Check if this is a VM_IO | VM_PFNMAP VMA, which
4989 * we can access using slightly different code.
4990 */
Rik van Riel28b2ee22008-07-23 21:27:05 -07004991 vma = find_vma(mm, addr);
Michael Ellermanfe936df2011-04-14 15:22:10 -07004992 if (!vma || vma->vm_start > addr)
Rik van Riel28b2ee22008-07-23 21:27:05 -07004993 break;
4994 if (vma->vm_ops && vma->vm_ops->access)
4995 ret = vma->vm_ops->access(vma, addr, buf,
4996 len, write);
4997 if (ret <= 0)
Rik van Riel28b2ee22008-07-23 21:27:05 -07004998 break;
4999 bytes = ret;
Rik van Rieldbffcd02014-08-06 16:08:12 -07005000#endif
David Howells0ec76a12006-09-27 01:50:15 -07005001 } else {
Rik van Riel28b2ee22008-07-23 21:27:05 -07005002 bytes = len;
5003 offset = addr & (PAGE_SIZE-1);
5004 if (bytes > PAGE_SIZE-offset)
5005 bytes = PAGE_SIZE-offset;
5006
5007 maddr = kmap(page);
5008 if (write) {
5009 copy_to_user_page(vma, page, addr,
5010 maddr + offset, buf, bytes);
5011 set_page_dirty_lock(page);
5012 } else {
5013 copy_from_user_page(vma, page, addr,
5014 buf, maddr + offset, bytes);
5015 }
5016 kunmap(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005017 put_page(page);
David Howells0ec76a12006-09-27 01:50:15 -07005018 }
David Howells0ec76a12006-09-27 01:50:15 -07005019 len -= bytes;
5020 buf += bytes;
5021 addr += bytes;
5022 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07005023 mmap_read_unlock(mm);
David Howells0ec76a12006-09-27 01:50:15 -07005024
5025 return buf - old_buf;
5026}
Andi Kleen03252912008-01-30 13:33:18 +01005027
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04005028/**
Randy Dunlapae91dbf2011-03-26 13:27:01 -07005029 * access_remote_vm - access another process' address space
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04005030 * @mm: the mm_struct of the target address space
5031 * @addr: start address to access
5032 * @buf: source or destination buffer
5033 * @len: number of bytes to transfer
Lorenzo Stoakes6347e8d2016-10-13 01:20:19 +01005034 * @gup_flags: flags modifying lookup behaviour
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04005035 *
5036 * The caller must hold a reference on @mm.
Mike Rapoporta862f682019-03-05 15:48:42 -08005037 *
5038 * Return: number of bytes copied from source to destination.
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04005039 */
5040int access_remote_vm(struct mm_struct *mm, unsigned long addr,
Lorenzo Stoakes6347e8d2016-10-13 01:20:19 +01005041 void *buf, int len, unsigned int gup_flags)
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04005042{
Lorenzo Stoakes6347e8d2016-10-13 01:20:19 +01005043 return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04005044}
5045
Andi Kleen03252912008-01-30 13:33:18 +01005046/*
Stephen Wilson206cb632011-03-13 15:49:19 -04005047 * Access another process' address space.
5048 * Source/target buffer must be kernel space,
5049 * Do not walk the page table directly, use get_user_pages
5050 */
5051int access_process_vm(struct task_struct *tsk, unsigned long addr,
Lorenzo Stoakesf307ab62016-10-13 01:20:20 +01005052 void *buf, int len, unsigned int gup_flags)
Stephen Wilson206cb632011-03-13 15:49:19 -04005053{
5054 struct mm_struct *mm;
5055 int ret;
5056
5057 mm = get_task_mm(tsk);
5058 if (!mm)
5059 return 0;
5060
Lorenzo Stoakesf307ab62016-10-13 01:20:20 +01005061 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
Lorenzo Stoakes442486e2016-10-13 01:20:18 +01005062
Stephen Wilson206cb632011-03-13 15:49:19 -04005063 mmput(mm);
5064
5065 return ret;
5066}
Catalin Marinasfcd35852016-11-01 14:43:25 -07005067EXPORT_SYMBOL_GPL(access_process_vm);
Stephen Wilson206cb632011-03-13 15:49:19 -04005068
Andi Kleen03252912008-01-30 13:33:18 +01005069/*
5070 * Print the name of a VMA.
5071 */
5072void print_vma_addr(char *prefix, unsigned long ip)
5073{
5074 struct mm_struct *mm = current->mm;
5075 struct vm_area_struct *vma;
5076
Ingo Molnare8bff742008-02-13 20:21:06 +01005077 /*
Michal Hocko0a7f6822017-11-15 17:38:59 -08005078 * we might be running from an atomic context so we cannot sleep
Ingo Molnare8bff742008-02-13 20:21:06 +01005079 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07005080 if (!mmap_read_trylock(mm))
Ingo Molnare8bff742008-02-13 20:21:06 +01005081 return;
5082
Andi Kleen03252912008-01-30 13:33:18 +01005083 vma = find_vma(mm, ip);
5084 if (vma && vma->vm_file) {
5085 struct file *f = vma->vm_file;
Michal Hocko0a7f6822017-11-15 17:38:59 -08005086 char *buf = (char *)__get_free_page(GFP_NOWAIT);
Andi Kleen03252912008-01-30 13:33:18 +01005087 if (buf) {
Andy Shevchenko2fbc57c2012-12-17 16:01:23 -08005088 char *p;
Andi Kleen03252912008-01-30 13:33:18 +01005089
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02005090 p = file_path(f, buf, PAGE_SIZE);
Andi Kleen03252912008-01-30 13:33:18 +01005091 if (IS_ERR(p))
5092 p = "?";
Andy Shevchenko2fbc57c2012-12-17 16:01:23 -08005093 printk("%s%s[%lx+%lx]", prefix, kbasename(p),
Andi Kleen03252912008-01-30 13:33:18 +01005094 vma->vm_start,
5095 vma->vm_end - vma->vm_start);
5096 free_page((unsigned long)buf);
5097 }
5098 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07005099 mmap_read_unlock(mm);
Andi Kleen03252912008-01-30 13:33:18 +01005100}
Nick Piggin3ee1afa2008-09-10 13:37:17 +02005101
Michael S. Tsirkin662bbcb2013-05-26 17:32:23 +03005102#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
David Hildenbrand9ec23532015-05-11 17:52:07 +02005103void __might_fault(const char *file, int line)
Nick Piggin3ee1afa2008-09-10 13:37:17 +02005104{
Peter Zijlstra95156f02009-01-12 13:02:11 +01005105 /*
5106 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07005107 * holding the mmap_lock, this is safe because kernel memory doesn't
Peter Zijlstra95156f02009-01-12 13:02:11 +01005108 * get paged out, therefore we'll never actually fault, and the
5109 * below annotations will generate false positives.
5110 */
Al Virodb68ce12017-03-20 21:08:07 -04005111 if (uaccess_kernel())
Peter Zijlstra95156f02009-01-12 13:02:11 +01005112 return;
David Hildenbrand9ec23532015-05-11 17:52:07 +02005113 if (pagefault_disabled())
Michael S. Tsirkin662bbcb2013-05-26 17:32:23 +03005114 return;
David Hildenbrand9ec23532015-05-11 17:52:07 +02005115 __might_sleep(file, line, 0);
5116#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
Michael S. Tsirkin662bbcb2013-05-26 17:32:23 +03005117 if (current->mm)
Michel Lespinasseda1c55f2020-06-08 21:33:47 -07005118 might_lock_read(&current->mm->mmap_lock);
David Hildenbrand9ec23532015-05-11 17:52:07 +02005119#endif
Nick Piggin3ee1afa2008-09-10 13:37:17 +02005120}
David Hildenbrand9ec23532015-05-11 17:52:07 +02005121EXPORT_SYMBOL(__might_fault);
Nick Piggin3ee1afa2008-09-10 13:37:17 +02005122#endif
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005123
5124#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
Huang Yingc6ddfb62018-08-17 15:45:46 -07005125/*
5126 * Process all subpages of the specified huge page with the specified
5127 * operation. The target subpage will be processed last to keep its
5128 * cache lines hot.
5129 */
5130static inline void process_huge_page(
5131 unsigned long addr_hint, unsigned int pages_per_huge_page,
5132 void (*process_subpage)(unsigned long addr, int idx, void *arg),
5133 void *arg)
5134{
5135 int i, n, base, l;
5136 unsigned long addr = addr_hint &
5137 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5138
5139 /* Process target subpage last to keep its cache lines hot */
5140 might_sleep();
5141 n = (addr_hint - addr) / PAGE_SIZE;
5142 if (2 * n <= pages_per_huge_page) {
5143 /* If target subpage in first half of huge page */
5144 base = 0;
5145 l = n;
5146 /* Process subpages at the end of huge page */
5147 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5148 cond_resched();
5149 process_subpage(addr + i * PAGE_SIZE, i, arg);
5150 }
5151 } else {
5152 /* If target subpage in second half of huge page */
5153 base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5154 l = pages_per_huge_page - n;
5155 /* Process subpages at the begin of huge page */
5156 for (i = 0; i < base; i++) {
5157 cond_resched();
5158 process_subpage(addr + i * PAGE_SIZE, i, arg);
5159 }
5160 }
5161 /*
5162 * Process remaining subpages in left-right-left-right pattern
5163 * towards the target subpage
5164 */
5165 for (i = 0; i < l; i++) {
5166 int left_idx = base + i;
5167 int right_idx = base + 2 * l - 1 - i;
5168
5169 cond_resched();
5170 process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5171 cond_resched();
5172 process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5173 }
5174}
5175
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005176static void clear_gigantic_page(struct page *page,
5177 unsigned long addr,
5178 unsigned int pages_per_huge_page)
5179{
5180 int i;
5181 struct page *p = page;
5182
5183 might_sleep();
5184 for (i = 0; i < pages_per_huge_page;
5185 i++, p = mem_map_next(p, page, i)) {
5186 cond_resched();
5187 clear_user_highpage(p, addr + i * PAGE_SIZE);
5188 }
5189}
Huang Yingc6ddfb62018-08-17 15:45:46 -07005190
5191static void clear_subpage(unsigned long addr, int idx, void *arg)
5192{
5193 struct page *page = arg;
5194
5195 clear_user_highpage(page + idx, addr);
5196}
5197
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005198void clear_huge_page(struct page *page,
Huang Yingc79b57e2017-09-06 16:25:04 -07005199 unsigned long addr_hint, unsigned int pages_per_huge_page)
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005200{
Huang Yingc79b57e2017-09-06 16:25:04 -07005201 unsigned long addr = addr_hint &
5202 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005203
5204 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5205 clear_gigantic_page(page, addr, pages_per_huge_page);
5206 return;
5207 }
5208
Huang Yingc6ddfb62018-08-17 15:45:46 -07005209 process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005210}
5211
5212static void copy_user_gigantic_page(struct page *dst, struct page *src,
5213 unsigned long addr,
5214 struct vm_area_struct *vma,
5215 unsigned int pages_per_huge_page)
5216{
5217 int i;
5218 struct page *dst_base = dst;
5219 struct page *src_base = src;
5220
5221 for (i = 0; i < pages_per_huge_page; ) {
5222 cond_resched();
5223 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
5224
5225 i++;
5226 dst = mem_map_next(dst, dst_base, i);
5227 src = mem_map_next(src, src_base, i);
5228 }
5229}
5230
Huang Yingc9f4cd72018-08-17 15:45:49 -07005231struct copy_subpage_arg {
5232 struct page *dst;
5233 struct page *src;
5234 struct vm_area_struct *vma;
5235};
5236
5237static void copy_subpage(unsigned long addr, int idx, void *arg)
5238{
5239 struct copy_subpage_arg *copy_arg = arg;
5240
5241 copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
5242 addr, copy_arg->vma);
5243}
5244
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005245void copy_user_huge_page(struct page *dst, struct page *src,
Huang Yingc9f4cd72018-08-17 15:45:49 -07005246 unsigned long addr_hint, struct vm_area_struct *vma,
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005247 unsigned int pages_per_huge_page)
5248{
Huang Yingc9f4cd72018-08-17 15:45:49 -07005249 unsigned long addr = addr_hint &
5250 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5251 struct copy_subpage_arg arg = {
5252 .dst = dst,
5253 .src = src,
5254 .vma = vma,
5255 };
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005256
5257 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5258 copy_user_gigantic_page(dst, src, addr, vma,
5259 pages_per_huge_page);
5260 return;
5261 }
5262
Huang Yingc9f4cd72018-08-17 15:45:49 -07005263 process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005264}
Mike Kravetzfa4d75c2017-02-22 15:42:49 -08005265
5266long copy_huge_page_from_user(struct page *dst_page,
5267 const void __user *usr_src,
Mike Kravetz810a56b2017-02-22 15:42:58 -08005268 unsigned int pages_per_huge_page,
5269 bool allow_pagefault)
Mike Kravetzfa4d75c2017-02-22 15:42:49 -08005270{
5271 void *src = (void *)usr_src;
5272 void *page_kaddr;
5273 unsigned long i, rc = 0;
5274 unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
5275
5276 for (i = 0; i < pages_per_huge_page; i++) {
Mike Kravetz810a56b2017-02-22 15:42:58 -08005277 if (allow_pagefault)
5278 page_kaddr = kmap(dst_page + i);
5279 else
5280 page_kaddr = kmap_atomic(dst_page + i);
Mike Kravetzfa4d75c2017-02-22 15:42:49 -08005281 rc = copy_from_user(page_kaddr,
5282 (const void __user *)(src + i * PAGE_SIZE),
5283 PAGE_SIZE);
Mike Kravetz810a56b2017-02-22 15:42:58 -08005284 if (allow_pagefault)
5285 kunmap(dst_page + i);
5286 else
5287 kunmap_atomic(page_kaddr);
Mike Kravetzfa4d75c2017-02-22 15:42:49 -08005288
5289 ret_val -= (PAGE_SIZE - rc);
5290 if (rc)
5291 break;
5292
5293 cond_resched();
5294 }
5295 return ret_val;
5296}
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005297#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005298
Olof Johansson40b64ac2013-12-20 14:28:05 -08005299#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
Kirill A. Shutemovb35f1812014-01-21 15:49:07 -08005300
5301static struct kmem_cache *page_ptl_cachep;
5302
5303void __init ptlock_cache_init(void)
5304{
5305 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
5306 SLAB_PANIC, NULL);
5307}
5308
Peter Zijlstra539edb52013-11-14 14:31:52 -08005309bool ptlock_alloc(struct page *page)
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005310{
5311 spinlock_t *ptl;
5312
Kirill A. Shutemovb35f1812014-01-21 15:49:07 -08005313 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005314 if (!ptl)
5315 return false;
Peter Zijlstra539edb52013-11-14 14:31:52 -08005316 page->ptl = ptl;
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005317 return true;
5318}
5319
Peter Zijlstra539edb52013-11-14 14:31:52 -08005320void ptlock_free(struct page *page)
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005321{
Kirill A. Shutemovb35f1812014-01-21 15:49:07 -08005322 kmem_cache_free(page_ptl_cachep, page->ptl);
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005323}
5324#endif