blob: 05e61e6c843f3d82f935c4bdefbc721f659a4ea7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef _ASM_GENERIC_PGTABLE_H
3#define _ASM_GENERIC_PGTABLE_H
4
Dan Williamsf25748e32016-01-15 16:56:43 -08005#include <linux/pfn.h>
6
Rusty Russell673eae82006-09-25 23:32:29 -07007#ifndef __ASSEMBLY__
Greg Ungerer95352392007-08-10 13:01:20 -07008#ifdef CONFIG_MMU
Rusty Russell673eae82006-09-25 23:32:29 -07009
Ben Hutchingsfbd71842011-02-27 05:41:35 +000010#include <linux/mm_types.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050011#include <linux/bug.h>
Toshi Kanie61ce6a2015-04-14 15:47:23 -070012#include <linux/errno.h>
Ben Hutchingsfbd71842011-02-27 05:41:35 +000013
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030014#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
15 defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
16#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
Kirill A. Shutemov235a8f02015-04-14 15:46:17 -070017#endif
18
Hugh Dickins6ee86302013-04-29 15:07:44 -070019/*
20 * On almost all architectures and configurations, 0 can be used as the
21 * upper ceiling to free_pgtables(): on many architectures it has the same
22 * effect as using TASK_SIZE. However, there is one configuration which
23 * must impose a more careful limit, to avoid freeing kernel pgtables.
24 */
25#ifndef USER_PGTABLES_CEILING
26#define USER_PGTABLES_CEILING 0UL
27#endif
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Andrea Arcangelie2cda322011-01-13 15:46:40 -080030extern int ptep_set_access_flags(struct vm_area_struct *vma,
31 unsigned long address, pte_t *ptep,
32 pte_t entry, int dirty);
33#endif
34
35#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
Vineet Guptabd5e88a2015-07-09 17:22:44 +053036#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Andrea Arcangelie2cda322011-01-13 15:46:40 -080037extern int pmdp_set_access_flags(struct vm_area_struct *vma,
38 unsigned long address, pmd_t *pmdp,
39 pmd_t entry, int dirty);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -080040extern int pudp_set_access_flags(struct vm_area_struct *vma,
41 unsigned long address, pud_t *pudp,
42 pud_t entry, int dirty);
Vineet Guptabd5e88a2015-07-09 17:22:44 +053043#else
44static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
45 unsigned long address, pmd_t *pmdp,
46 pmd_t entry, int dirty)
47{
48 BUILD_BUG();
49 return 0;
50}
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -080051static inline int pudp_set_access_flags(struct vm_area_struct *vma,
52 unsigned long address, pud_t *pudp,
53 pud_t entry, int dirty)
54{
55 BUILD_BUG();
56 return 0;
57}
Vineet Guptabd5e88a2015-07-09 17:22:44 +053058#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#endif
60
61#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
Andrea Arcangelie2cda322011-01-13 15:46:40 -080062static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
63 unsigned long address,
64 pte_t *ptep)
65{
66 pte_t pte = *ptep;
67 int r = 1;
68 if (!pte_young(pte))
69 r = 0;
70 else
71 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
72 return r;
73}
74#endif
75
76#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
77#ifdef CONFIG_TRANSPARENT_HUGEPAGE
78static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
79 unsigned long address,
80 pmd_t *pmdp)
81{
82 pmd_t pmd = *pmdp;
83 int r = 1;
84 if (!pmd_young(pmd))
85 r = 0;
86 else
87 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
88 return r;
89}
Vineet Guptabd5e88a2015-07-09 17:22:44 +053090#else
Andrea Arcangelie2cda322011-01-13 15:46:40 -080091static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
92 unsigned long address,
93 pmd_t *pmdp)
94{
Vineet Guptabd5e88a2015-07-09 17:22:44 +053095 BUILD_BUG();
Andrea Arcangelie2cda322011-01-13 15:46:40 -080096 return 0;
97}
98#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#endif
100
101#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800102int ptep_clear_flush_young(struct vm_area_struct *vma,
103 unsigned long address, pte_t *ptep);
104#endif
105
106#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
Vineet Guptabd5e88a2015-07-09 17:22:44 +0530107#ifdef CONFIG_TRANSPARENT_HUGEPAGE
108extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
109 unsigned long address, pmd_t *pmdp);
110#else
111/*
112 * Despite relevant to THP only, this API is called from generic rmap code
113 * under PageTransHuge(), hence needs a dummy implementation for !THP
114 */
115static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
116 unsigned long address, pmd_t *pmdp)
117{
118 BUILD_BUG();
119 return 0;
120}
121#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122#endif
123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800125static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
126 unsigned long address,
127 pte_t *ptep)
128{
129 pte_t pte = *ptep;
130 pte_clear(mm, address, ptep);
131 return pte;
132}
133#endif
134
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800135#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800136#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700137static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
138 unsigned long address,
139 pmd_t *pmdp)
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800140{
141 pmd_t pmd = *pmdp;
Catalin Marinas2d28a222012-10-08 16:32:59 -0700142 pmd_clear(pmdp);
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800143 return pmd;
Nicolas Kaiser49b24d62011-06-15 15:08:34 -0700144}
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800145#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
146#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
147static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
148 unsigned long address,
149 pud_t *pudp)
150{
151 pud_t pud = *pudp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800153 pud_clear(pudp);
154 return pud;
155}
156#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
157#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
158
Martin Schwidefskyfcbe08d62014-10-24 10:52:29 +0200159#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800160#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700161static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
Martin Schwidefskyfcbe08d62014-10-24 10:52:29 +0200162 unsigned long address, pmd_t *pmdp,
163 int full)
164{
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700165 return pmdp_huge_get_and_clear(mm, address, pmdp);
Martin Schwidefskyfcbe08d62014-10-24 10:52:29 +0200166}
Martin Schwidefskyfcbe08d62014-10-24 10:52:29 +0200167#endif
168
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800169#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
170static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
171 unsigned long address, pud_t *pudp,
172 int full)
173{
174 return pudp_huge_get_and_clear(mm, address, pudp);
175}
176#endif
177#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
178
Zachary Amsdena6003882005-09-03 15:55:04 -0700179#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800180static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
181 unsigned long address, pte_t *ptep,
182 int full)
183{
184 pte_t pte;
185 pte = ptep_get_and_clear(mm, address, ptep);
186 return pte;
187}
Zachary Amsdena6003882005-09-03 15:55:04 -0700188#endif
189
Zachary Amsden9888a1c2006-09-30 23:29:31 -0700190/*
191 * Some architectures may be able to avoid expensive synchronization
192 * primitives when modifications are made to PTE's which are already
193 * not present, or in the process of an address space destruction.
194 */
195#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800196static inline void pte_clear_not_present_full(struct mm_struct *mm,
197 unsigned long address,
198 pte_t *ptep,
199 int full)
200{
201 pte_clear(mm, address, ptep);
202}
Zachary Amsdena6003882005-09-03 15:55:04 -0700203#endif
204
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800206extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
207 unsigned long address,
208 pte_t *ptep);
209#endif
210
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700211#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
212extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800213 unsigned long address,
214 pmd_t *pmdp);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800215extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
216 unsigned long address,
217 pud_t *pudp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218#endif
219
220#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
Tim Schmielau8c65b4a2005-11-07 00:59:43 -0800221struct mm_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
223{
224 pte_t old_pte = *ptep;
225 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
226}
227#endif
228
Aneesh Kumar K.V288bc542017-02-24 14:59:16 -0800229#ifndef pte_savedwrite
230#define pte_savedwrite pte_write
231#endif
232
233#ifndef pte_mk_savedwrite
234#define pte_mk_savedwrite pte_mkwrite
235#endif
236
Aneesh Kumar K.V595cd8f2017-02-24 14:59:19 -0800237#ifndef pte_clear_savedwrite
238#define pte_clear_savedwrite pte_wrprotect
239#endif
240
Aneesh Kumar K.V288bc542017-02-24 14:59:16 -0800241#ifndef pmd_savedwrite
242#define pmd_savedwrite pmd_write
243#endif
244
245#ifndef pmd_mk_savedwrite
246#define pmd_mk_savedwrite pmd_mkwrite
247#endif
248
Aneesh Kumar K.V595cd8f2017-02-24 14:59:19 -0800249#ifndef pmd_clear_savedwrite
250#define pmd_clear_savedwrite pmd_wrprotect
251#endif
252
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800253#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
254#ifdef CONFIG_TRANSPARENT_HUGEPAGE
255static inline void pmdp_set_wrprotect(struct mm_struct *mm,
256 unsigned long address, pmd_t *pmdp)
257{
258 pmd_t old_pmd = *pmdp;
259 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
260}
Vineet Guptabd5e88a2015-07-09 17:22:44 +0530261#else
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800262static inline void pmdp_set_wrprotect(struct mm_struct *mm,
263 unsigned long address, pmd_t *pmdp)
264{
Vineet Guptabd5e88a2015-07-09 17:22:44 +0530265 BUILD_BUG();
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800266}
267#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
268#endif
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800269#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
270#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
271static inline void pudp_set_wrprotect(struct mm_struct *mm,
272 unsigned long address, pud_t *pudp)
273{
274 pud_t old_pud = *pudp;
275
276 set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
277}
278#else
279static inline void pudp_set_wrprotect(struct mm_struct *mm,
280 unsigned long address, pud_t *pudp)
281{
282 BUILD_BUG();
283}
284#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
285#endif
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800286
Aneesh Kumar K.V15a25b22015-06-24 16:57:39 -0700287#ifndef pmdp_collapse_flush
288#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Aneesh Kumar K.Vf28b6ff2015-06-24 16:57:42 -0700289extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
290 unsigned long address, pmd_t *pmdp);
Aneesh Kumar K.V15a25b22015-06-24 16:57:39 -0700291#else
292static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
293 unsigned long address,
294 pmd_t *pmdp)
295{
296 BUILD_BUG();
297 return *pmdp;
298}
299#define pmdp_collapse_flush pmdp_collapse_flush
300#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
301#endif
302
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700303#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700304extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
305 pgtable_t pgtable);
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700306#endif
307
308#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700309extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700310#endif
311
Kirill A. Shutemovc58f0bb2018-01-31 16:17:43 -0800312#ifdef CONFIG_TRANSPARENT_HUGEPAGE
313/*
314 * This is an implementation of pmdp_establish() that is only suitable for an
315 * architecture that doesn't have hardware dirty/accessed bits. In this case we
316 * can't race with CPU which sets these bits and non-atomic aproach is fine.
317 */
318static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
319 unsigned long address, pmd_t *pmdp, pmd_t pmd)
320{
321 pmd_t old_pmd = *pmdp;
322 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
323 return old_pmd;
324}
325#endif
326
Gerald Schaefer46dcde72012-10-08 16:30:09 -0700327#ifndef __HAVE_ARCH_PMDP_INVALIDATE
Kirill A. Shutemovd52605d2018-01-31 16:18:16 -0800328extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
Gerald Schaefer46dcde72012-10-08 16:30:09 -0700329 pmd_t *pmdp);
330#endif
331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332#ifndef __HAVE_ARCH_PTE_SAME
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800333static inline int pte_same(pte_t pte_a, pte_t pte_b)
334{
335 return pte_val(pte_a) == pte_val(pte_b);
336}
337#endif
338
Konstantin Weitz45961722013-04-17 13:59:32 +0200339#ifndef __HAVE_ARCH_PTE_UNUSED
340/*
341 * Some architectures provide facilities to virtualization guests
342 * so that they can flag allocated pages as unused. This allows the
343 * host to transparently reclaim unused pages. This function returns
344 * whether the pte's page is unused.
345 */
346static inline int pte_unused(pte_t pte)
347{
348 return 0;
349}
350#endif
351
Kirill A. Shutemove7884f82017-03-16 18:26:50 +0300352#ifndef pte_access_permitted
353#define pte_access_permitted(pte, write) \
354 (pte_present(pte) && (!(write) || pte_write(pte)))
355#endif
356
357#ifndef pmd_access_permitted
358#define pmd_access_permitted(pmd, write) \
359 (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
360#endif
361
362#ifndef pud_access_permitted
363#define pud_access_permitted(pud, write) \
364 (pud_present(pud) && (!(write) || pud_write(pud)))
365#endif
366
367#ifndef p4d_access_permitted
368#define p4d_access_permitted(p4d, write) \
369 (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
370#endif
371
372#ifndef pgd_access_permitted
373#define pgd_access_permitted(pgd, write) \
374 (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
375#endif
376
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800377#ifndef __HAVE_ARCH_PMD_SAME
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800378static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
379{
380 return pmd_val(pmd_a) == pmd_val(pmd_b);
381}
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800382
383static inline int pud_same(pud_t pud_a, pud_t pud_b)
384{
385 return pud_val(pud_a) == pud_val(pud_b);
386}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387#endif
388
Dan Williams0cebbb62018-12-04 13:37:11 -0800389#ifndef __HAVE_ARCH_P4D_SAME
390static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
391{
392 return p4d_val(p4d_a) == p4d_val(p4d_b);
393}
394#endif
395
396#ifndef __HAVE_ARCH_PGD_SAME
397static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
398{
399 return pgd_val(pgd_a) == pgd_val(pgd_b);
400}
401#endif
402
Dan Williams4369dea2018-12-04 13:37:16 -0800403/*
404 * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
405 * TLB flush will be required as a result of the "set". For example, use
406 * in scenarios where it is known ahead of time that the routine is
407 * setting non-present entries, or re-setting an existing entry to the
408 * same value. Otherwise, use the typical "set" helpers and flush the
409 * TLB.
410 */
411#define set_pte_safe(ptep, pte) \
412({ \
413 WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
414 set_pte(ptep, pte); \
415})
416
417#define set_pmd_safe(pmdp, pmd) \
418({ \
419 WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
420 set_pmd(pmdp, pmd); \
421})
422
423#define set_pud_safe(pudp, pud) \
424({ \
425 WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
426 set_pud(pudp, pud); \
427})
428
429#define set_p4d_safe(p4dp, p4d) \
430({ \
431 WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
432 set_p4d(p4dp, p4d); \
433})
434
435#define set_pgd_safe(pgdp, pgd) \
436({ \
437 WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
438 set_pgd(pgdp, pgd); \
439})
440
Khalid Azizca827d52018-02-21 10:15:44 -0700441#ifndef __HAVE_ARCH_DO_SWAP_PAGE
442/*
443 * Some architectures support metadata associated with a page. When a
444 * page is being swapped out, this metadata must be saved so it can be
445 * restored when the page is swapped back in. SPARC M7 and newer
446 * processors support an ADI (Application Data Integrity) tag for the
447 * page as metadata for the page. arch_do_swap_page() can restore this
448 * metadata when a page is swapped back in.
449 */
450static inline void arch_do_swap_page(struct mm_struct *mm,
451 struct vm_area_struct *vma,
452 unsigned long addr,
453 pte_t pte, pte_t oldpte)
454{
455
456}
457#endif
458
459#ifndef __HAVE_ARCH_UNMAP_ONE
460/*
461 * Some architectures support metadata associated with a page. When a
462 * page is being swapped out, this metadata must be saved so it can be
463 * restored when the page is swapped back in. SPARC M7 and newer
464 * processors support an ADI (Application Data Integrity) tag for the
465 * page as metadata for the page. arch_unmap_one() can save this
466 * metadata on a swap-out of a page.
467 */
468static inline int arch_unmap_one(struct mm_struct *mm,
469 struct vm_area_struct *vma,
470 unsigned long addr,
471 pte_t orig_pte)
472{
473 return 0;
474}
475#endif
476
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
478#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
479#endif
480
David S. Miller0b0968a2006-06-01 17:47:25 -0700481#ifndef __HAVE_ARCH_MOVE_PTE
Nick Piggin8b1f3122005-09-27 21:45:18 -0700482#define move_pte(pte, prot, old_addr, new_addr) (pte)
Nick Piggin8b1f3122005-09-27 21:45:18 -0700483#endif
484
Rik van Riel2c3cf552012-10-09 15:31:12 +0200485#ifndef pte_accessible
Rik van Riel20841402013-12-18 17:08:44 -0800486# define pte_accessible(mm, pte) ((void)(pte), 1)
Rik van Riel2c3cf552012-10-09 15:31:12 +0200487#endif
488
Shaohua Li61c77322010-08-16 09:16:55 +0800489#ifndef flush_tlb_fix_spurious_fault
490#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
491#endif
492
Paul Mundt0634a632009-06-23 13:51:19 +0200493#ifndef pgprot_noncached
494#define pgprot_noncached(prot) (prot)
495#endif
496
venkatesh.pallipadi@intel.com2520bd32008-12-18 11:41:32 -0800497#ifndef pgprot_writecombine
498#define pgprot_writecombine pgprot_noncached
499#endif
500
Toshi Kanid1b4bfb2015-06-04 18:55:18 +0200501#ifndef pgprot_writethrough
502#define pgprot_writethrough pgprot_noncached
503#endif
504
Liviu Dudau8b921ac2014-09-29 15:29:30 +0100505#ifndef pgprot_device
506#define pgprot_device pgprot_noncached
507#endif
508
Peter Feiner64e455072014-10-13 15:55:46 -0700509#ifndef pgprot_modify
510#define pgprot_modify pgprot_modify
511static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
512{
513 if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
514 newprot = pgprot_noncached(newprot);
515 if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
516 newprot = pgprot_writecombine(newprot);
517 if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
518 newprot = pgprot_device(newprot);
519 return newprot;
520}
521#endif
522
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523/*
Hugh Dickins8f6c99c2005-04-19 13:29:17 -0700524 * When walking page tables, get the address of the next boundary,
525 * or the end address of the range if that comes earlier. Although no
526 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 */
528
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529#define pgd_addr_end(addr, end) \
530({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
531 (__boundary - 1 < (end) - 1)? __boundary: (end); \
532})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300534#ifndef p4d_addr_end
535#define p4d_addr_end(addr, end) \
536({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
537 (__boundary - 1 < (end) - 1)? __boundary: (end); \
538})
539#endif
540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541#ifndef pud_addr_end
542#define pud_addr_end(addr, end) \
543({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
544 (__boundary - 1 < (end) - 1)? __boundary: (end); \
545})
546#endif
547
548#ifndef pmd_addr_end
549#define pmd_addr_end(addr, end) \
550({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
551 (__boundary - 1 < (end) - 1)? __boundary: (end); \
552})
553#endif
554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555/*
556 * When walking page tables, we usually want to skip any p?d_none entries;
557 * and any p?d_bad entries - reporting the error before resetting to none.
558 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
559 */
560void pgd_clear_bad(pgd_t *);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300561void p4d_clear_bad(p4d_t *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562void pud_clear_bad(pud_t *);
563void pmd_clear_bad(pmd_t *);
564
565static inline int pgd_none_or_clear_bad(pgd_t *pgd)
566{
567 if (pgd_none(*pgd))
568 return 1;
569 if (unlikely(pgd_bad(*pgd))) {
570 pgd_clear_bad(pgd);
571 return 1;
572 }
573 return 0;
574}
575
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300576static inline int p4d_none_or_clear_bad(p4d_t *p4d)
577{
578 if (p4d_none(*p4d))
579 return 1;
580 if (unlikely(p4d_bad(*p4d))) {
581 p4d_clear_bad(p4d);
582 return 1;
583 }
584 return 0;
585}
586
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587static inline int pud_none_or_clear_bad(pud_t *pud)
588{
589 if (pud_none(*pud))
590 return 1;
591 if (unlikely(pud_bad(*pud))) {
592 pud_clear_bad(pud);
593 return 1;
594 }
595 return 0;
596}
597
598static inline int pmd_none_or_clear_bad(pmd_t *pmd)
599{
600 if (pmd_none(*pmd))
601 return 1;
602 if (unlikely(pmd_bad(*pmd))) {
603 pmd_clear_bad(pmd);
604 return 1;
605 }
606 return 0;
607}
Greg Ungerer95352392007-08-10 13:01:20 -0700608
Jeremy Fitzhardinge1ea07042008-06-16 04:30:00 -0700609static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
610 unsigned long addr,
611 pte_t *ptep)
612{
613 /*
614 * Get the current pte state, but zero it out to make it
615 * non-present, preventing the hardware from asynchronously
616 * updating it.
617 */
618 return ptep_get_and_clear(mm, addr, ptep);
619}
620
621static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
622 unsigned long addr,
623 pte_t *ptep, pte_t pte)
624{
625 /*
626 * The pte is non-present, so there's no hardware state to
627 * preserve.
628 */
629 set_pte_at(mm, addr, ptep, pte);
630}
631
632#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
633/*
634 * Start a pte protection read-modify-write transaction, which
635 * protects against asynchronous hardware modifications to the pte.
636 * The intention is not to prevent the hardware from making pte
637 * updates, but to prevent any updates it may make from being lost.
638 *
639 * This does not protect against other software modifications of the
640 * pte; the appropriate pte lock must be held over the transation.
641 *
642 * Note that this interface is intended to be batchable, meaning that
643 * ptep_modify_prot_commit may not actually update the pte, but merely
644 * queue the update to be done at some later time. The update must be
645 * actually committed before the pte lock is released, however.
646 */
647static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
648 unsigned long addr,
649 pte_t *ptep)
650{
651 return __ptep_modify_prot_start(mm, addr, ptep);
652}
653
654/*
655 * Commit an update to a pte, leaving any hardware-controlled bits in
656 * the PTE unmodified.
657 */
658static inline void ptep_modify_prot_commit(struct mm_struct *mm,
659 unsigned long addr,
660 pte_t *ptep, pte_t pte)
661{
662 __ptep_modify_prot_commit(mm, addr, ptep, pte);
663}
664#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
Sebastian Siewiorfe1a6872008-07-15 22:28:46 +0200665#endif /* CONFIG_MMU */
Jeremy Fitzhardinge1ea07042008-06-16 04:30:00 -0700666
Greg Ungerer95352392007-08-10 13:01:20 -0700667/*
Tom Lendacky21729f82017-07-17 16:10:07 -0500668 * No-op macros that just return the current protection value. Defined here
669 * because these macros can be used used even if CONFIG_MMU is not defined.
670 */
671#ifndef pgprot_encrypted
672#define pgprot_encrypted(prot) (prot)
673#endif
674
675#ifndef pgprot_decrypted
676#define pgprot_decrypted(prot) (prot)
677#endif
678
679/*
Greg Ungerer95352392007-08-10 13:01:20 -0700680 * A facility to provide lazy MMU batching. This allows PTE updates and
681 * page invalidations to be delayed until a call to leave lazy MMU mode
682 * is issued. Some architectures may benefit from doing this, and it is
683 * beneficial for both shadow and direct mode hypervisors, which may batch
684 * the PTE updates which happen during this window. Note that using this
685 * interface requires that read hazards be removed from the code. A read
686 * hazard could result in the direct mode hypervisor case, since the actual
687 * write to the page tables may not yet have taken place, so reads though
688 * a raw PTE pointer after it has been modified are not guaranteed to be
689 * up to date. This mode can only be entered and left under the protection of
690 * the page table locks for all page tables which may be modified. In the UP
691 * case, this is required so that preemption is disabled, and in the SMP case,
692 * it must synchronize the delayed page table writes properly on other CPUs.
693 */
694#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
695#define arch_enter_lazy_mmu_mode() do {} while (0)
696#define arch_leave_lazy_mmu_mode() do {} while (0)
697#define arch_flush_lazy_mmu_mode() do {} while (0)
698#endif
699
700/*
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -0800701 * A facility to provide batching of the reload of page tables and
702 * other process state with the actual context switch code for
703 * paravirtualized guests. By convention, only one of the batched
704 * update (lazy) modes (CPU, MMU) should be active at any given time,
705 * entry should never be nested, and entry and exits should always be
706 * paired. This is for sanity of maintaining and reasoning about the
707 * kernel code. In this case, the exit (end of the context switch) is
708 * in architecture-specific code, and so doesn't need a generic
709 * definition.
Greg Ungerer95352392007-08-10 13:01:20 -0700710 */
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -0800711#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800712#define arch_start_context_switch(prev) do {} while (0)
Greg Ungerer95352392007-08-10 13:01:20 -0700713#endif
714
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -0700715#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
716#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
717static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
718{
719 return pmd;
720}
721
722static inline int pmd_swp_soft_dirty(pmd_t pmd)
723{
724 return 0;
725}
726
727static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
728{
729 return pmd;
730}
731#endif
732#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700733static inline int pte_soft_dirty(pte_t pte)
734{
735 return 0;
736}
737
738static inline int pmd_soft_dirty(pmd_t pmd)
739{
740 return 0;
741}
742
743static inline pte_t pte_mksoft_dirty(pte_t pte)
744{
745 return pte;
746}
747
748static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
749{
750 return pmd;
751}
Cyrill Gorcunov179ef712013-08-13 16:00:49 -0700752
Martin Schwidefskya7b76172015-04-22 14:20:47 +0200753static inline pte_t pte_clear_soft_dirty(pte_t pte)
754{
755 return pte;
756}
757
758static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
759{
760 return pmd;
761}
762
Cyrill Gorcunov179ef712013-08-13 16:00:49 -0700763static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
764{
765 return pte;
766}
767
768static inline int pte_swp_soft_dirty(pte_t pte)
769{
770 return 0;
771}
772
773static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
774{
775 return pte;
776}
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -0700777
778static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
779{
780 return pmd;
781}
782
783static inline int pmd_swp_soft_dirty(pmd_t pmd)
784{
785 return 0;
786}
787
788static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
789{
790 return pmd;
791}
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700792#endif
793
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800794#ifndef __HAVE_PFNMAP_TRACKING
795/*
Suresh Siddha5180da42012-10-08 16:28:29 -0700796 * Interfaces that can be used by architecture code to keep track of
797 * memory type of pfn mappings specified by the remap_pfn_range,
Matthew Wilcox67fa1662018-10-26 15:04:26 -0700798 * vmf_insert_pfn.
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800799 */
Suresh Siddha5180da42012-10-08 16:28:29 -0700800
801/*
802 * track_pfn_remap is called when a _new_ pfn mapping is being established
803 * by remap_pfn_range() for physical range indicated by pfn and size.
804 */
805static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -0700806 unsigned long pfn, unsigned long addr,
807 unsigned long size)
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800808{
809 return 0;
810}
811
812/*
Suresh Siddha5180da42012-10-08 16:28:29 -0700813 * track_pfn_insert is called when a _new_ single pfn is established
Matthew Wilcox67fa1662018-10-26 15:04:26 -0700814 * by vmf_insert_pfn().
Suresh Siddha5180da42012-10-08 16:28:29 -0700815 */
Borislav Petkov308a0472016-10-26 19:43:43 +0200816static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
817 pfn_t pfn)
Suresh Siddha5180da42012-10-08 16:28:29 -0700818{
Suresh Siddha5180da42012-10-08 16:28:29 -0700819}
820
821/*
822 * track_pfn_copy is called when vma that is covering the pfnmap gets
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800823 * copied through copy_page_range().
824 */
Suresh Siddha5180da42012-10-08 16:28:29 -0700825static inline int track_pfn_copy(struct vm_area_struct *vma)
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800826{
827 return 0;
828}
829
830/*
Toshi Kanid9fe4fa2015-12-22 17:54:23 -0700831 * untrack_pfn is called while unmapping a pfnmap for a region.
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800832 * untrack can be called for a specific region indicated by pfn and size or
Suresh Siddha5180da42012-10-08 16:28:29 -0700833 * can be for the entire vma (in which case pfn, size are zero).
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800834 */
Suresh Siddha5180da42012-10-08 16:28:29 -0700835static inline void untrack_pfn(struct vm_area_struct *vma,
836 unsigned long pfn, unsigned long size)
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800837{
838}
Toshi Kanid9fe4fa2015-12-22 17:54:23 -0700839
840/*
841 * untrack_pfn_moved is called while mremapping a pfnmap for a new region.
842 */
843static inline void untrack_pfn_moved(struct vm_area_struct *vma)
844{
845}
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800846#else
Suresh Siddha5180da42012-10-08 16:28:29 -0700847extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -0700848 unsigned long pfn, unsigned long addr,
849 unsigned long size);
Borislav Petkov308a0472016-10-26 19:43:43 +0200850extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
851 pfn_t pfn);
Suresh Siddha5180da42012-10-08 16:28:29 -0700852extern int track_pfn_copy(struct vm_area_struct *vma);
853extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
854 unsigned long size);
Toshi Kanid9fe4fa2015-12-22 17:54:23 -0700855extern void untrack_pfn_moved(struct vm_area_struct *vma);
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800856#endif
857
Kirill A. Shutemov816422a2012-12-12 13:52:36 -0800858#ifdef __HAVE_COLOR_ZERO_PAGE
859static inline int is_zero_pfn(unsigned long pfn)
860{
861 extern unsigned long zero_pfn;
862 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
863 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
864}
865
Kirill A. Shutemov2f91ec82012-12-26 03:19:55 +0300866#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
867
Kirill A. Shutemov816422a2012-12-12 13:52:36 -0800868#else
869static inline int is_zero_pfn(unsigned long pfn)
870{
871 extern unsigned long zero_pfn;
872 return pfn == zero_pfn;
873}
874
875static inline unsigned long my_zero_pfn(unsigned long addr)
876{
877 extern unsigned long zero_pfn;
878 return zero_pfn;
879}
880#endif
881
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700882#ifdef CONFIG_MMU
883
Andrea Arcangeli5f6e8da2011-01-13 15:46:40 -0800884#ifndef CONFIG_TRANSPARENT_HUGEPAGE
885static inline int pmd_trans_huge(pmd_t pmd)
886{
887 return 0;
888}
Dan Williamse4e40e02017-11-29 16:10:10 -0800889#ifndef pmd_write
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800890static inline int pmd_write(pmd_t pmd)
891{
892 BUG();
893 return 0;
894}
Dan Williamse4e40e02017-11-29 16:10:10 -0800895#endif /* pmd_write */
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700896#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
897
Dan Williams15018992017-11-29 16:10:06 -0800898#ifndef pud_write
899static inline int pud_write(pud_t pud)
900{
901 BUG();
902 return 0;
903}
904#endif /* pud_write */
905
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800906#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
907 (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
908 !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
909static inline int pud_trans_huge(pud_t pud)
910{
911 return 0;
912}
913#endif
914
Andrea Arcangeli26c19172012-05-29 15:06:49 -0700915#ifndef pmd_read_atomic
916static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
917{
918 /*
919 * Depend on compiler for an atomic pmd read. NOTE: this is
920 * only going to work, if the pmdval_t isn't larger than
921 * an unsigned long.
922 */
923 return *pmdp;
924}
925#endif
926
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -0800927#ifndef arch_needs_pgtable_deposit
928#define arch_needs_pgtable_deposit() (false)
929#endif
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700930/*
931 * This function is meant to be used by sites walking pagetables with
932 * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
933 * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
934 * into a null pmd and the transhuge page fault can convert a null pmd
935 * into an hugepmd or into a regular pmd (if the hugepage allocation
936 * fails). While holding the mmap_sem in read mode the pmd becomes
937 * stable and stops changing under us only if it's not null and not a
938 * transhuge pmd. When those races occurs and this function makes a
939 * difference vs the standard pmd_none_or_clear_bad, the result is
940 * undefined so behaving like if the pmd was none is safe (because it
941 * can return none anyway). The compiler level barrier() is critically
942 * important to compute the two checks atomically on the same pmdval.
Andrea Arcangeli26c19172012-05-29 15:06:49 -0700943 *
944 * For 32bit kernels with a 64bit large pmd_t this automatically takes
945 * care of reading the pmd atomically to avoid SMP race conditions
946 * against pmd_populate() when the mmap_sem is hold for reading by the
947 * caller (a special atomic read not done by "gcc" as in the generic
948 * version above, is also needed when THP is disabled because the page
949 * fault can populate the pmd from under us).
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700950 */
951static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
952{
Andrea Arcangeli26c19172012-05-29 15:06:49 -0700953 pmd_t pmdval = pmd_read_atomic(pmd);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700954 /*
955 * The barrier will stabilize the pmdval in a register or on
956 * the stack so that it will stop changing under the code.
Andrea Arcangelie4eed032012-06-20 12:52:57 -0700957 *
958 * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
959 * pmd_read_atomic is allowed to return a not atomic pmdval
960 * (for example pointing to an hugepage that has never been
961 * mapped in the pmd). The below checks will only care about
962 * the low part of the pmd with 32bit PAE x86 anyway, with the
963 * exception of pmd_none(). So the important thing is that if
964 * the low part of the pmd is found null, the high part will
965 * be also null or the pmd_none() check below would be
966 * confused.
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700967 */
968#ifdef CONFIG_TRANSPARENT_HUGEPAGE
969 barrier();
Andrea Arcangeli5f6e8da2011-01-13 15:46:40 -0800970#endif
Zi Yan84c3fc42017-09-08 16:11:01 -0700971 /*
972 * !pmd_present() checks for pmd migration entries
973 *
974 * The complete check uses is_pmd_migration_entry() in linux/swapops.h
975 * But using that requires moving current function and pmd_trans_unstable()
976 * to linux/swapops.h to resovle dependency, which is too much code move.
977 *
978 * !pmd_present() is equivalent to is_pmd_migration_entry() currently,
979 * because !pmd_present() pages can only be under migration not swapped
980 * out.
981 *
982 * pmd_none() is preseved for future condition checks on pmd migration
983 * entries and not confusing with this function name, although it is
984 * redundant with !pmd_present().
985 */
986 if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
987 (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700988 return 1;
989 if (unlikely(pmd_bad(pmdval))) {
Kirill A. Shutemovee536642013-12-20 15:10:03 +0200990 pmd_clear_bad(pmd);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700991 return 1;
992 }
993 return 0;
994}
995
996/*
997 * This is a noop if Transparent Hugepage Support is not built into
998 * the kernel. Otherwise it is equivalent to
999 * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
1000 * places that already verified the pmd is not none and they want to
1001 * walk ptes while holding the mmap sem in read mode (write mode don't
1002 * need this). If THP is not enabled, the pmd can't go away under the
1003 * code even if MADV_DONTNEED runs, but if THP is enabled we need to
1004 * run a pmd_trans_unstable before walking the ptes after
1005 * split_huge_page_pmd returns (because it may have run when the pmd
1006 * become null, but then a page fault can map in a THP and not a
1007 * regular page).
1008 */
1009static inline int pmd_trans_unstable(pmd_t *pmd)
1010{
1011#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1012 return pmd_none_or_trans_huge_or_clear_bad(pmd);
1013#else
1014 return 0;
1015#endif
1016}
1017
Mel Gormane7bb4b6d2015-02-12 14:58:19 -08001018#ifndef CONFIG_NUMA_BALANCING
1019/*
1020 * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
1021 * the only case the kernel cares is for NUMA balancing and is only ever set
1022 * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
1023 * _PAGE_PROTNONE so by by default, implement the helper as "always no". It
1024 * is the responsibility of the caller to distinguish between PROT_NONE
1025 * protections and NUMA hinting fault protections.
1026 */
1027static inline int pte_protnone(pte_t pte)
1028{
1029 return 0;
1030}
1031
1032static inline int pmd_protnone(pmd_t pmd)
1033{
1034 return 0;
1035}
1036#endif /* CONFIG_NUMA_BALANCING */
1037
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001038#endif /* CONFIG_MMU */
Andrea Arcangeli5f6e8da2011-01-13 15:46:40 -08001039
Toshi Kanie61ce6a2015-04-14 15:47:23 -07001040#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001041
1042#ifndef __PAGETABLE_P4D_FOLDED
1043int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
1044int p4d_clear_huge(p4d_t *p4d);
1045#else
1046static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1047{
1048 return 0;
1049}
1050static inline int p4d_clear_huge(p4d_t *p4d)
1051{
1052 return 0;
1053}
1054#endif /* !__PAGETABLE_P4D_FOLDED */
1055
Toshi Kanie61ce6a2015-04-14 15:47:23 -07001056int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
1057int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
Toshi Kanib9820d82015-04-14 15:47:26 -07001058int pud_clear_huge(pud_t *pud);
1059int pmd_clear_huge(pmd_t *pmd);
Will Deacon8e2d4342018-12-28 00:37:53 -08001060int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
Chintan Pandya785a19f2018-06-27 08:13:47 -06001061int pud_free_pmd_page(pud_t *pud, unsigned long addr);
1062int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
Toshi Kanie61ce6a2015-04-14 15:47:23 -07001063#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001064static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1065{
1066 return 0;
1067}
Toshi Kanie61ce6a2015-04-14 15:47:23 -07001068static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1069{
1070 return 0;
1071}
1072static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1073{
1074 return 0;
1075}
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001076static inline int p4d_clear_huge(p4d_t *p4d)
1077{
1078 return 0;
1079}
Toshi Kanib9820d82015-04-14 15:47:26 -07001080static inline int pud_clear_huge(pud_t *pud)
1081{
1082 return 0;
1083}
1084static inline int pmd_clear_huge(pmd_t *pmd)
1085{
1086 return 0;
1087}
Will Deacon8e2d4342018-12-28 00:37:53 -08001088static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1089{
1090 return 0;
1091}
Chintan Pandya785a19f2018-06-27 08:13:47 -06001092static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
Toshi Kanib6bdb752018-03-22 16:17:20 -07001093{
1094 return 0;
1095}
Chintan Pandya785a19f2018-06-27 08:13:47 -06001096static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
Toshi Kanib6bdb752018-03-22 16:17:20 -07001097{
1098 return 0;
1099}
Toshi Kanie61ce6a2015-04-14 15:47:23 -07001100#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
1101
Aneesh Kumar K.V458aa762016-03-17 14:18:56 -07001102#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
1103#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1104/*
1105 * ARCHes with special requirements for evicting THP backing TLB entries can
1106 * implement this. Otherwise also, it can help optimize normal TLB flush in
1107 * THP regime. stock flush_tlb_range() typically has optimization to nuke the
1108 * entire TLB TLB if flush span is greater than a threshold, which will
1109 * likely be true for a single huge page. Thus a single thp flush will
1110 * invalidate the entire TLB which is not desitable.
1111 * e.g. see arch/arc: flush_pmd_tlb_range
1112 */
1113#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001114#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
Aneesh Kumar K.V458aa762016-03-17 14:18:56 -07001115#else
1116#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001117#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
Aneesh Kumar K.V458aa762016-03-17 14:18:56 -07001118#endif
1119#endif
1120
Baoyou Xie08ea8c02016-10-07 17:00:55 -07001121struct file;
1122int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
1123 unsigned long size, pgprot_t *vma_prot);
Thomas Gleixner613e3962017-12-17 10:56:29 +01001124
1125#ifndef CONFIG_X86_ESPFIX64
1126static inline void init_espfix_bsp(void) { }
1127#endif
1128
Jiri Kosina6c26fcd2018-07-14 21:56:13 +02001129#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
1130static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
1131{
1132 return true;
1133}
1134
1135static inline bool arch_has_pfn_modify_check(void)
1136{
1137 return false;
1138}
1139#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
1140
Luis R. Rodrigueza3266bd2018-08-17 15:46:29 -07001141/*
1142 * Architecture PAGE_KERNEL_* fallbacks
1143 *
1144 * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
1145 * because they really don't support them, or the port needs to be updated to
1146 * reflect the required functionality. Below are a set of relatively safe
1147 * fallbacks, as best effort, which we can count on in lieu of the architectures
1148 * not defining them on their own yet.
1149 */
1150
1151#ifndef PAGE_KERNEL_RO
1152# define PAGE_KERNEL_RO PAGE_KERNEL
1153#endif
1154
Luis R. Rodriguez1a9b4b32018-08-17 15:46:32 -07001155#ifndef PAGE_KERNEL_EXEC
1156# define PAGE_KERNEL_EXEC PAGE_KERNEL
1157#endif
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159#endif /* !__ASSEMBLY__ */
1160
Al Viro40d158e2013-05-11 12:13:10 -04001161#ifndef io_remap_pfn_range
1162#define io_remap_pfn_range remap_pfn_range
1163#endif
1164
Hugh Dickinsfd8cfd32016-05-19 17:13:00 -07001165#ifndef has_transparent_hugepage
1166#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1167#define has_transparent_hugepage() 1
1168#else
1169#define has_transparent_hugepage() 0
1170#endif
1171#endif
1172
Martin Schwidefsky1071fc572018-10-15 10:25:57 +02001173/*
1174 * On some architectures it depends on the mm if the p4d/pud or pmd
1175 * layer of the page table hierarchy is folded or not.
1176 */
1177#ifndef mm_p4d_folded
1178#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
1179#endif
1180
1181#ifndef mm_pud_folded
1182#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
1183#endif
1184
1185#ifndef mm_pmd_folded
1186#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
1187#endif
1188
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189#endif /* _ASM_GENERIC_PGTABLE_H */