blob: 84fbb52b42242ad06b5fdccb2909c10246448420 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Catalin Marinas4f04d8f2012-03-05 11:49:27 +00002/*
3 * Copyright (C) 2012 ARM Ltd.
Catalin Marinas4f04d8f2012-03-05 11:49:27 +00004 */
5#ifndef __ASM_PGTABLE_H
6#define __ASM_PGTABLE_H
7
Catalin Marinas2f4b8292015-07-10 17:24:28 +01008#include <asm/bug.h>
Catalin Marinas4f04d8f2012-03-05 11:49:27 +00009#include <asm/proc-fns.h>
10
11#include <asm/memory.h>
Catalin Marinas34bfeea2020-05-04 14:42:36 +010012#include <asm/mte.h>
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000013#include <asm/pgtable-hwdef.h>
Mark Rutland3eca86e2016-02-26 14:31:32 +000014#include <asm/pgtable-prot.h>
Alex Van Brunt3403e562018-10-29 14:55:58 +053015#include <asm/tlbflush.h>
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000016
17/*
Ard Biesheuvel3e1907d2016-03-30 16:46:00 +020018 * VMALLOC range.
Catalin Marinas08375192014-07-16 17:42:43 +010019 *
Ard Biesheuvelf9040772016-02-16 13:52:40 +010020 * VMALLOC_START: beginning of the kernel vmalloc space
Mark Browna5315812019-10-24 13:01:43 +010021 * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
Ard Biesheuvel3e1907d2016-03-30 16:46:00 +020022 * and fixed mappings
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000023 */
Ard Biesheuvelf9040772016-02-16 13:52:40 +010024#define VMALLOC_START (MODULES_END)
Ard Biesheuvel9ad7c6d2020-10-08 17:36:02 +020025#define VMALLOC_END (VMEMMAP_START - SZ_256M)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000026
Ard Biesheuvel7bc1a0f2020-10-08 17:35:59 +020027#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
28
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000029#ifndef __ASSEMBLY__
Catalin Marinas2f4b8292015-07-10 17:24:28 +010030
Catalin Marinas3bbf7152017-06-26 14:27:36 +010031#include <asm/cmpxchg.h>
Mark Rutland961faac2016-01-25 11:45:07 +000032#include <asm/fixmap.h>
Catalin Marinas2f4b8292015-07-10 17:24:28 +010033#include <linux/mmdebug.h>
Will Deacon86c9e812017-12-12 10:48:54 +000034#include <linux/mm_types.h>
35#include <linux/sched.h>
Catalin Marinas2f4b8292015-07-10 17:24:28 +010036
Zhenyu Yea7ac1cf2020-06-25 16:03:14 +080037#ifdef CONFIG_TRANSPARENT_HUGEPAGE
38#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
39
40/* Set stride and tlb_level in flush_*_tlb_range */
41#define flush_pmd_tlb_range(vma, addr, end) \
42 __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
43#define flush_pud_tlb_range(vma, addr, end) \
44 __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
45#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
46
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000047/*
Will Deacon6a1bdb12020-09-30 13:20:40 +010048 * Outside of a few very special situations (e.g. hibernation), we always
49 * use broadcast TLB invalidation instructions, therefore a spurious page
50 * fault on one CPU which has been handled concurrently by another CPU
51 * does not need to perform additional invalidation.
52 */
53#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
54
55/*
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000056 * ZERO_PAGE is a global shared page that is always zero: used
57 * for zero-mapped memory areas etc..
58 */
Mark Rutland5227cfa2016-01-25 11:44:57 +000059extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
Laura Abbott2077be62017-01-10 13:35:49 -080060#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000061
Gavin Shan2cf660e2020-09-14 09:47:30 +100062#define pte_ERROR(e) \
63 pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
Catalin Marinas7078db42014-07-21 14:52:49 +010064
Kristina Martsenko75387b92017-12-13 17:07:21 +000065/*
66 * Macros to convert between a physical address and its placement in a
67 * page table entry, taking care of 52-bit addresses.
68 */
69#ifdef CONFIG_ARM64_PA_BITS_52
70#define __pte_to_phys(pte) \
71 ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36))
72#define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK)
73#else
74#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
75#define __phys_to_pte_val(phys) (phys)
76#endif
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000077
Kristina Martsenko75387b92017-12-13 17:07:21 +000078#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
79#define pfn_pte(pfn,prot) \
80 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000081
82#define pte_none(pte) (!pte_val(pte))
83#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
84#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
Catalin Marinas7078db42014-07-21 14:52:49 +010085
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000086/*
87 * The following only work if pte_present(). Undefined behaviour otherwise.
88 */
Steve Capper84fe6822014-02-25 11:38:53 +000089#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
Steve Capper84fe6822014-02-25 11:38:53 +000090#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
91#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
92#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
Catalin Marinasec663d92017-01-27 10:54:12 +000093#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
Jeremy Linton93ef6662015-10-07 12:00:21 -050094#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
Robin Murphy73b20c82019-07-16 16:30:51 -070095#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
Catalin Marinas34bfeea2020-05-04 14:42:36 +010096#define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \
97 PTE_ATTRINDX(MT_NORMAL_TAGGED))
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000098
Ard Biesheuveld27cfa12017-03-09 21:52:09 +010099#define pte_cont_addr_end(addr, end) \
100({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
101 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
102})
103
104#define pmd_cont_addr_end(addr, end) \
105({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
106 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
107})
108
Catalin Marinasb8474152015-09-11 18:22:00 +0100109#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100110#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
111#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
112
Will Deacon766ffb62015-07-28 16:14:03 +0100113#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
Vladimir Murzin18107f82021-03-12 17:38:10 +0000114/*
115 * Execute-only user mappings do not have the PTE_USER bit set. All valid
116 * kernel mappings have the PTE_UXN bit set.
117 */
Catalin Marinasec663d92017-01-27 10:54:12 +0000118#define pte_valid_not_user(pte) \
Vladimir Murzin18107f82021-03-12 17:38:10 +0000119 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
Will Deacon76c714b2015-10-30 18:56:19 +0000120/*
121 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
122 * so that we don't erroneously return false for pages that have been
123 * remapped as PROT_NONE but are yet to be flushed from the TLB.
Will Deacon07509e12020-11-20 13:28:01 +0000124 * Note that we can't make any assumptions based on the state of the access
125 * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
126 * TLB.
Will Deacon76c714b2015-10-30 18:56:19 +0000127 */
128#define pte_accessible(mm, pte) \
Will Deacon07509e12020-11-20 13:28:01 +0000129 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000130
Catalin Marinas6218f962017-10-26 18:36:47 +0100131/*
Vladimir Murzin18107f82021-03-12 17:38:10 +0000132 * p??_access_permitted() is true for valid user mappings (PTE_USER
133 * bit set, subject to the write permission check). For execute-only
134 * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
135 * not set) must return false. PROT_NONE mappings do not have the
136 * PTE_VALID bit set.
Catalin Marinas6218f962017-10-26 18:36:47 +0100137 */
138#define pte_access_permitted(pte, write) \
Vladimir Murzin18107f82021-03-12 17:38:10 +0000139 (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
Catalin Marinas6218f962017-10-26 18:36:47 +0100140#define pmd_access_permitted(pmd, write) \
141 (pte_access_permitted(pmd_pte(pmd), (write)))
142#define pud_access_permitted(pud, write) \
143 (pte_access_permitted(pud_pte(pud), (write)))
144
Laura Abbottb6d4f282014-08-19 20:41:42 +0100145static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
146{
147 pte_val(pte) &= ~pgprot_val(prot);
148 return pte;
149}
150
151static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
152{
153 pte_val(pte) |= pgprot_val(prot);
154 return pte;
155}
156
Anshuman Khandualb65399f2020-09-09 10:23:02 +0530157static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
158{
159 pmd_val(pmd) &= ~pgprot_val(prot);
160 return pmd;
161}
162
163static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
164{
165 pmd_val(pmd) |= pgprot_val(prot);
166 return pmd;
167}
168
Steve Capper44b6dfc2014-01-15 14:07:12 +0000169static inline pte_t pte_mkwrite(pte_t pte)
170{
Catalin Marinas73e86cb2017-07-04 19:04:18 +0100171 pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
172 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
173 return pte;
Steve Capper44b6dfc2014-01-15 14:07:12 +0000174}
175
176static inline pte_t pte_mkclean(pte_t pte)
177{
Steve Capper8781bcbc2017-12-01 17:22:14 +0000178 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
179 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
180
181 return pte;
Steve Capper44b6dfc2014-01-15 14:07:12 +0000182}
183
184static inline pte_t pte_mkdirty(pte_t pte)
185{
Steve Capper8781bcbc2017-12-01 17:22:14 +0000186 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
187
188 if (pte_write(pte))
189 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
190
191 return pte;
Steve Capper44b6dfc2014-01-15 14:07:12 +0000192}
193
Will Deaconff1712f2020-11-20 13:57:48 +0000194static inline pte_t pte_wrprotect(pte_t pte)
195{
196 /*
197 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
198 * clear), set the PTE_DIRTY bit.
199 */
200 if (pte_hw_dirty(pte))
201 pte = pte_mkdirty(pte);
202
203 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
204 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
205 return pte;
206}
207
Steve Capper44b6dfc2014-01-15 14:07:12 +0000208static inline pte_t pte_mkold(pte_t pte)
209{
Laura Abbottb6d4f282014-08-19 20:41:42 +0100210 return clear_pte_bit(pte, __pgprot(PTE_AF));
Steve Capper44b6dfc2014-01-15 14:07:12 +0000211}
212
213static inline pte_t pte_mkyoung(pte_t pte)
214{
Laura Abbottb6d4f282014-08-19 20:41:42 +0100215 return set_pte_bit(pte, __pgprot(PTE_AF));
Steve Capper44b6dfc2014-01-15 14:07:12 +0000216}
217
218static inline pte_t pte_mkspecial(pte_t pte)
219{
Laura Abbottb6d4f282014-08-19 20:41:42 +0100220 return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
Steve Capper44b6dfc2014-01-15 14:07:12 +0000221}
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000222
Jeremy Linton93ef6662015-10-07 12:00:21 -0500223static inline pte_t pte_mkcont(pte_t pte)
224{
David Woods66b39232015-12-17 14:31:26 -0500225 pte = set_pte_bit(pte, __pgprot(PTE_CONT));
226 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
Jeremy Linton93ef6662015-10-07 12:00:21 -0500227}
228
229static inline pte_t pte_mknoncont(pte_t pte)
230{
231 return clear_pte_bit(pte, __pgprot(PTE_CONT));
232}
233
James Morse5ebe3a42016-08-24 18:27:30 +0100234static inline pte_t pte_mkpresent(pte_t pte)
235{
236 return set_pte_bit(pte, __pgprot(PTE_VALID));
237}
238
David Woods66b39232015-12-17 14:31:26 -0500239static inline pmd_t pmd_mkcont(pmd_t pmd)
240{
241 return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
242}
243
Robin Murphy73b20c82019-07-16 16:30:51 -0700244static inline pte_t pte_mkdevmap(pte_t pte)
245{
Jia He30e23532019-08-07 12:58:51 +0800246 return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
Robin Murphy73b20c82019-07-16 16:30:51 -0700247}
248
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000249static inline void set_pte(pte_t *ptep, pte_t pte)
250{
Will Deacon20a004e2018-02-15 11:14:56 +0000251 WRITE_ONCE(*ptep, pte);
Catalin Marinas7f0b1bf2014-06-09 11:55:03 +0100252
253 /*
254 * Only if the new pte is valid and kernel, otherwise TLB maintenance
255 * or update_mmu_cache() have the necessary barriers.
256 */
Will Deacond0b7a302019-08-22 14:58:37 +0100257 if (pte_valid_not_user(pte)) {
Catalin Marinas7f0b1bf2014-06-09 11:55:03 +0100258 dsb(ishst);
Will Deacond0b7a302019-08-22 14:58:37 +0100259 isb();
260 }
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000261}
262
Shaokun Zhang907e21c2018-04-17 20:03:09 +0800263extern void __sync_icache_dcache(pte_t pteval);
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000264
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100265/*
266 * PTE bits configuration in the presence of hardware Dirty Bit Management
267 * (PTE_WRITE == PTE_DBM):
268 *
269 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
270 * 0 0 | 1 0 0
271 * 0 1 | 1 1 0
272 * 1 0 | 1 0 1
273 * 1 1 | 0 1 x
274 *
275 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
276 * the page fault mechanism. Checking the dirty status of a pte becomes:
277 *
Catalin Marinasb8474152015-09-11 18:22:00 +0100278 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100279 */
Mark Rutland9b604722019-06-10 13:41:07 +0100280
281static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
282 pte_t pte)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000283{
Will Deacon20a004e2018-02-15 11:14:56 +0000284 pte_t old_pte;
285
Mark Rutland9b604722019-06-10 13:41:07 +0100286 if (!IS_ENABLED(CONFIG_DEBUG_VM))
287 return;
288
289 old_pte = READ_ONCE(*ptep);
290
291 if (!pte_valid(old_pte) || !pte_valid(pte))
292 return;
293 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
294 return;
295
296 /*
297 * Check for potential race with hardware updates of the pte
298 * (ptep_set_access_flags safely changes valid ptes without going
299 * through an invalid entry).
300 */
301 VM_WARN_ONCE(!pte_young(pte),
302 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
303 __func__, pte_val(old_pte), pte_val(pte));
304 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
305 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
306 __func__, pte_val(old_pte), pte_val(pte));
307}
308
309static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
310 pte_t *ptep, pte_t pte)
311{
Catalin Marinas73e86cb2017-07-04 19:04:18 +0100312 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
Shaokun Zhang907e21c2018-04-17 20:03:09 +0800313 __sync_icache_dcache(pte);
Will Deacon02522462013-01-09 11:08:10 +0000314
Steven Price69e3b842021-06-21 12:17:11 +0100315 /*
316 * If the PTE would provide user space access to the tags associated
317 * with it then ensure that the MTE tags are synchronised. Although
318 * pte_access_permitted() returns false for exec only mappings, they
319 * don't expose tags (instruction fetches don't check tags).
320 */
321 if (system_supports_mte() && pte_access_permitted(pte, false) &&
322 !pte_special(pte)) {
323 pte_t old_pte = READ_ONCE(*ptep);
324 /*
325 * We only need to synchronise if the new PTE has tags enabled
326 * or if swapping in (in which case another mapping may have
327 * set tags in the past even if this PTE isn't tagged).
328 * (!pte_none() && !pte_present()) is an open coded version of
329 * is_swap_pte()
330 */
331 if (pte_tagged(pte) || (!pte_none(old_pte) && !pte_present(old_pte)))
332 mte_sync_tags(old_pte, pte);
333 }
Catalin Marinas34bfeea2020-05-04 14:42:36 +0100334
Mark Rutland9b604722019-06-10 13:41:07 +0100335 __check_racy_pte_update(mm, ptep, pte);
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100336
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000337 set_pte(ptep, pte);
338}
339
340/*
341 * Huge pte definitions.
342 */
Steve Capper084bd292013-04-10 13:48:00 +0100343#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
344
345/*
346 * Hugetlb definitions.
347 */
David Woods66b39232015-12-17 14:31:26 -0500348#define HUGE_MAX_HSTATE 4
Steve Capper084bd292013-04-10 13:48:00 +0100349#define HPAGE_SHIFT PMD_SHIFT
350#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
351#define HPAGE_MASK (~(HPAGE_SIZE - 1))
352#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000353
Kristina Martsenko75387b92017-12-13 17:07:21 +0000354static inline pte_t pgd_pte(pgd_t pgd)
355{
356 return __pte(pgd_val(pgd));
357}
358
Mike Rapoporte9f63762020-06-04 16:46:23 -0700359static inline pte_t p4d_pte(p4d_t p4d)
360{
361 return __pte(p4d_val(p4d));
362}
363
Steve Capper29e56942014-10-09 15:29:25 -0700364static inline pte_t pud_pte(pud_t pud)
365{
366 return __pte(pud_val(pud));
367}
368
Punit Agrawaleb3f06242018-12-11 17:10:39 +0000369static inline pud_t pte_pud(pte_t pte)
370{
371 return __pud(pte_val(pte));
372}
373
Steve Capper29e56942014-10-09 15:29:25 -0700374static inline pmd_t pud_pmd(pud_t pud)
375{
376 return __pmd(pud_val(pud));
377}
378
Steve Capper9c7e5352014-02-25 10:02:13 +0000379static inline pte_t pmd_pte(pmd_t pmd)
380{
381 return __pte(pmd_val(pmd));
382}
Steve Capperaf074842013-04-19 16:23:57 +0100383
Steve Capper9c7e5352014-02-25 10:02:13 +0000384static inline pmd_t pte_pmd(pte_t pte)
385{
386 return __pmd(pte_val(pte));
387}
Steve Capperaf074842013-04-19 16:23:57 +0100388
Anshuman Khandualf7f00972019-05-27 09:28:15 +0530389static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200390{
Anshuman Khandualf7f00972019-05-27 09:28:15 +0530391 return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
392}
393
394static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
395{
396 return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200397}
398
Ganapatrao Kulkarni56166232016-04-08 15:50:28 -0700399#ifdef CONFIG_NUMA_BALANCING
400/*
Mike Rapoportca5999f2020-06-08 21:32:38 -0700401 * See the comment in include/linux/pgtable.h
Ganapatrao Kulkarni56166232016-04-08 15:50:28 -0700402 */
403static inline int pte_protnone(pte_t pte)
404{
405 return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
406}
407
408static inline int pmd_protnone(pmd_t pmd)
409{
410 return pte_protnone(pmd_pte(pmd));
411}
412#endif
413
Anshuman Khandualb65399f2020-09-09 10:23:02 +0530414#define pmd_present_invalid(pmd) (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
415
416static inline int pmd_present(pmd_t pmd)
417{
418 return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
419}
420
Steve Capperaf074842013-04-19 16:23:57 +0100421/*
422 * THP definitions.
423 */
Steve Capperaf074842013-04-19 16:23:57 +0100424
425#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Anshuman Khandualb65399f2020-09-09 10:23:02 +0530426static inline int pmd_trans_huge(pmd_t pmd)
427{
428 return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
429}
Steve Capper29e56942014-10-09 15:29:25 -0700430#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Steve Capperaf074842013-04-19 16:23:57 +0100431
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800432#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
Steve Capper9c7e5352014-02-25 10:02:13 +0000433#define pmd_young(pmd) pte_young(pmd_pte(pmd))
Will Deacon0795eda2018-08-22 21:36:31 +0100434#define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
Peter Zijlstrad55863d2020-11-13 11:46:06 +0100435#define pmd_cont(pmd) pte_cont(pmd_pte(pmd))
Steve Capper9c7e5352014-02-25 10:02:13 +0000436#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
Steve Capper9c7e5352014-02-25 10:02:13 +0000437#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
438#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
Catalin Marinasab4db1f2016-05-05 10:44:01 +0100439#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
Steve Capper9c7e5352014-02-25 10:02:13 +0000440#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
441#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
Anshuman Khandualb65399f2020-09-09 10:23:02 +0530442
443static inline pmd_t pmd_mkinvalid(pmd_t pmd)
444{
445 pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
446 pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
447
448 return pmd;
449}
Steve Capperaf074842013-04-19 16:23:57 +0100450
Suzuki K Poulose0dbd3b12016-03-15 10:46:34 +0000451#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
452
Steve Capper9c7e5352014-02-25 10:02:13 +0000453#define pmd_write(pmd) pte_write(pmd_pte(pmd))
Steve Capperaf074842013-04-19 16:23:57 +0100454
455#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
456
Robin Murphy73b20c82019-07-16 16:30:51 -0700457#ifdef CONFIG_TRANSPARENT_HUGEPAGE
458#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
459#endif
Jia He30e23532019-08-07 12:58:51 +0800460static inline pmd_t pmd_mkdevmap(pmd_t pmd)
461{
462 return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
463}
Robin Murphy73b20c82019-07-16 16:30:51 -0700464
Kristina Martsenko75387b92017-12-13 17:07:21 +0000465#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
466#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
467#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
468#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
Steve Capperaf074842013-04-19 16:23:57 +0100469#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
470
Punit Agrawal35a63962018-12-11 17:10:40 +0000471#define pud_young(pud) pte_young(pud_pte(pud))
Punit Agrawaleb3f06242018-12-11 17:10:39 +0000472#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
Steve Capper29e56942014-10-09 15:29:25 -0700473#define pud_write(pud) pte_write(pud_pte(pud))
Kristina Martsenko75387b92017-12-13 17:07:21 +0000474
Punit Agrawalb8e0ba72018-12-11 17:10:41 +0000475#define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
476
Kristina Martsenko75387b92017-12-13 17:07:21 +0000477#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
478#define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
479#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
480#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
Steve Capperaf074842013-04-19 16:23:57 +0100481
Will Deaconceb21832014-05-27 19:11:58 +0100482#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
Kalesh Singhf5308c82020-12-14 19:07:35 -0800483#define set_pud_at(mm, addr, pudp, pud) set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud))
Steve Capperaf074842013-04-19 16:23:57 +0100484
Mike Rapoporte9f63762020-06-04 16:46:23 -0700485#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
486#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
487
Kristina Martsenko75387b92017-12-13 17:07:21 +0000488#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
489#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
490
Catalin Marinasa501e322014-04-03 15:57:15 +0100491#define __pgprot_modify(prot,mask,bits) \
492 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
493
Christoph Hellwigcca98e92020-06-01 21:51:32 -0700494#define pgprot_nx(prot) \
Will Deacon034aa9c2020-06-15 16:27:43 +0100495 __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
Christoph Hellwigcca98e92020-06-01 21:51:32 -0700496
Steve Capperaf074842013-04-19 16:23:57 +0100497/*
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000498 * Mark the prot value as uncacheable and unbufferable.
499 */
500#define pgprot_noncached(prot) \
Catalin Marinasde2db742014-03-12 16:07:06 +0000501 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000502#define pgprot_writecombine(prot) \
Catalin Marinasde2db742014-03-12 16:07:06 +0000503 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
Liviu Dudaud1e6dc92014-09-29 15:29:31 +0100504#define pgprot_device(prot) \
505 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
Catalin Marinasd15dfd32021-03-09 12:26:01 +0000506#define pgprot_tagged(prot) \
507 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
508#define pgprot_mhp pgprot_tagged
Christoph Hellwig3e4e1d3f2019-08-03 12:38:31 +0300509/*
510 * DMA allocations for non-coherent devices use what the Arm architecture calls
511 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
512 * and merging of writes. This is different from "Device-nGnR[nE]" memory which
513 * is intended for MMIO and thus forbids speculation, preserves access size,
514 * requires strict alignment and can also force write responses to come from the
515 * endpoint.
516 */
Christoph Hellwig419e2f12019-08-26 09:03:44 +0200517#define pgprot_dmacoherent(prot) \
518 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \
519 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
520
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000521#define __HAVE_PHYS_MEM_ACCESS_PROT
522struct file;
523extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
524 unsigned long size, pgprot_t vma_prot);
525
526#define pmd_none(pmd) (!pmd_val(pmd))
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000527
Marc Zyngier36311602012-12-07 18:35:41 +0000528#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
529 PMD_TYPE_TABLE)
530#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
531 PMD_TYPE_SECT)
Steven Price8aa82df2020-02-03 17:35:14 -0800532#define pmd_leaf(pmd) pmd_sect(pmd)
Anshuman Khanduale377ab822021-05-10 16:37:51 +0530533#define pmd_bad(pmd) (!pmd_table(pmd))
Marc Zyngier36311602012-12-07 18:35:41 +0000534
Peter Zijlstrad55863d2020-11-13 11:46:06 +0100535#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
536#define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
537
Catalin Marinascac4b8c2016-02-25 15:53:44 +0000538#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
Qian Cai7d4e2dc2019-07-31 16:05:45 -0400539static inline bool pud_sect(pud_t pud) { return false; }
540static inline bool pud_table(pud_t pud) { return true; }
Steve Capper206a2a72014-05-06 14:02:27 +0100541#else
542#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
543 PUD_TYPE_SECT)
zhichang.yuan523d6e92014-12-09 07:26:47 +0000544#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
545 PUD_TYPE_TABLE)
Steve Capper206a2a72014-05-06 14:02:27 +0100546#endif
Marc Zyngier36311602012-12-07 18:35:41 +0000547
Jun Yao2330b7c2018-09-24 17:15:02 +0100548extern pgd_t init_pg_dir[PTRS_PER_PGD];
549extern pgd_t init_pg_end[];
550extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
551extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
Gavin Shan9d2d75e2020-04-28 09:57:00 +1000552extern pgd_t idmap_pg_end[];
Jun Yao2330b7c2018-09-24 17:15:02 +0100553extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
Mark Rutland833be852020-11-03 10:22:29 +0000554extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
Jun Yao2330b7c2018-09-24 17:15:02 +0100555
556extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
557
558static inline bool in_swapper_pgdir(void *addr)
559{
560 return ((unsigned long)addr & PAGE_MASK) ==
561 ((unsigned long)swapper_pg_dir & PAGE_MASK);
562}
563
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000564static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
565{
James Morsee9ed8212018-10-05 14:49:16 +0100566#ifdef __PAGETABLE_PMD_FOLDED
567 if (in_swapper_pgdir(pmdp)) {
Jun Yao2330b7c2018-09-24 17:15:02 +0100568 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
569 return;
570 }
James Morsee9ed8212018-10-05 14:49:16 +0100571#endif /* __PAGETABLE_PMD_FOLDED */
Jun Yao2330b7c2018-09-24 17:15:02 +0100572
Will Deacon20a004e2018-02-15 11:14:56 +0000573 WRITE_ONCE(*pmdp, pmd);
Will Deacon0795eda2018-08-22 21:36:31 +0100574
Will Deacond0b7a302019-08-22 14:58:37 +0100575 if (pmd_valid(pmd)) {
Will Deacon0795eda2018-08-22 21:36:31 +0100576 dsb(ishst);
Will Deacond0b7a302019-08-22 14:58:37 +0100577 isb();
578 }
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000579}
580
581static inline void pmd_clear(pmd_t *pmdp)
582{
583 set_pmd(pmdp, __pmd(0));
584}
585
Mark Rutlanddca56dc2016-01-25 11:45:04 +0000586static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000587{
Kristina Martsenko75387b92017-12-13 17:07:21 +0000588 return __pmd_to_phys(pmd);
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000589}
590
Mike Rapoport974b9b22020-06-08 21:33:10 -0700591static inline unsigned long pmd_page_vaddr(pmd_t pmd)
592{
593 return (unsigned long)__va(pmd_page_paddr(pmd));
594}
Qian Cai74dd0222019-04-29 13:37:01 -0400595
Mark Rutland053520f2016-01-25 11:45:03 +0000596/* Find an entry in the third-level page table. */
Will Deaconf069fab2017-09-29 11:29:55 +0100597#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
Mark Rutland053520f2016-01-25 11:45:03 +0000598
Mark Rutland961faac2016-01-25 11:45:07 +0000599#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
600#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
601#define pte_clear_fixmap() clear_fixmap(FIX_PTE)
602
Gavin Shan68ecabd2020-04-28 09:46:55 +1000603#define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd))
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000604
Ard Biesheuvel65339452016-02-16 13:52:37 +0100605/* use ONLY for statically allocated translation tables */
606#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
607
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000608/*
609 * Conversion functions: convert a page and protection to a page entry,
610 * and a page entry and page directory to the page they refer to.
611 */
612#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
613
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700614#if CONFIG_PGTABLE_LEVELS > 2
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000615
Gavin Shan2cf660e2020-09-14 09:47:30 +1000616#define pmd_ERROR(e) \
617 pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
Catalin Marinas7078db42014-07-21 14:52:49 +0100618
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000619#define pud_none(pud) (!pud_val(pud))
Anshuman Khanduale377ab822021-05-10 16:37:51 +0530620#define pud_bad(pud) (!pud_table(pud))
Punit Agrawalf02ab082017-06-08 18:25:26 +0100621#define pud_present(pud) pte_present(pud_pte(pud))
Steven Price8aa82df2020-02-03 17:35:14 -0800622#define pud_leaf(pud) pud_sect(pud)
Will Deacon0795eda2018-08-22 21:36:31 +0100623#define pud_valid(pud) pte_valid(pud_pte(pud))
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000624
625static inline void set_pud(pud_t *pudp, pud_t pud)
626{
James Morsee9ed8212018-10-05 14:49:16 +0100627#ifdef __PAGETABLE_PUD_FOLDED
628 if (in_swapper_pgdir(pudp)) {
Jun Yao2330b7c2018-09-24 17:15:02 +0100629 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
630 return;
631 }
James Morsee9ed8212018-10-05 14:49:16 +0100632#endif /* __PAGETABLE_PUD_FOLDED */
Jun Yao2330b7c2018-09-24 17:15:02 +0100633
Will Deacon20a004e2018-02-15 11:14:56 +0000634 WRITE_ONCE(*pudp, pud);
Will Deacon0795eda2018-08-22 21:36:31 +0100635
Will Deacond0b7a302019-08-22 14:58:37 +0100636 if (pud_valid(pud)) {
Will Deacon0795eda2018-08-22 21:36:31 +0100637 dsb(ishst);
Will Deacond0b7a302019-08-22 14:58:37 +0100638 isb();
639 }
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000640}
641
642static inline void pud_clear(pud_t *pudp)
643{
644 set_pud(pudp, __pud(0));
645}
646
Mark Rutlanddca56dc2016-01-25 11:45:04 +0000647static inline phys_addr_t pud_page_paddr(pud_t pud)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000648{
Kristina Martsenko75387b92017-12-13 17:07:21 +0000649 return __pud_to_phys(pud);
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000650}
651
Aneesh Kumar K.V9cf6fa22021-07-07 18:09:53 -0700652static inline pmd_t *pud_pgtable(pud_t pud)
Mike Rapoport974b9b22020-06-08 21:33:10 -0700653{
Aneesh Kumar K.V9cf6fa22021-07-07 18:09:53 -0700654 return (pmd_t *)__va(pud_page_paddr(pud));
Mike Rapoport974b9b22020-06-08 21:33:10 -0700655}
Catalin Marinas7078db42014-07-21 14:52:49 +0100656
Mike Rapoport974b9b22020-06-08 21:33:10 -0700657/* Find an entry in the second-level page table. */
Will Deacon20a004e2018-02-15 11:14:56 +0000658#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
Catalin Marinas7078db42014-07-21 14:52:49 +0100659
Mark Rutland961faac2016-01-25 11:45:07 +0000660#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
661#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
662#define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000663
Gavin Shan68ecabd2020-04-28 09:46:55 +1000664#define pud_page(pud) phys_to_page(__pud_to_phys(pud))
Steve Capper29e56942014-10-09 15:29:25 -0700665
Ard Biesheuvel65339452016-02-16 13:52:37 +0100666/* use ONLY for statically allocated translation tables */
667#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
668
Mark Rutlanddca56dc2016-01-25 11:45:04 +0000669#else
670
671#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
672
Mark Rutland961faac2016-01-25 11:45:07 +0000673/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
674#define pmd_set_fixmap(addr) NULL
675#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
676#define pmd_clear_fixmap()
677
Ard Biesheuvel65339452016-02-16 13:52:37 +0100678#define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
679
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700680#endif /* CONFIG_PGTABLE_LEVELS > 2 */
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000681
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700682#if CONFIG_PGTABLE_LEVELS > 3
Jungseok Leec79b954b2014-05-12 18:40:51 +0900683
Gavin Shan2cf660e2020-09-14 09:47:30 +1000684#define pud_ERROR(e) \
685 pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
Catalin Marinas7078db42014-07-21 14:52:49 +0100686
Mike Rapoporte9f63762020-06-04 16:46:23 -0700687#define p4d_none(p4d) (!p4d_val(p4d))
688#define p4d_bad(p4d) (!(p4d_val(p4d) & 2))
689#define p4d_present(p4d) (p4d_val(p4d))
Jungseok Leec79b954b2014-05-12 18:40:51 +0900690
Mike Rapoporte9f63762020-06-04 16:46:23 -0700691static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
Jungseok Leec79b954b2014-05-12 18:40:51 +0900692{
Mike Rapoporte9f63762020-06-04 16:46:23 -0700693 if (in_swapper_pgdir(p4dp)) {
694 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
Jun Yao2330b7c2018-09-24 17:15:02 +0100695 return;
696 }
697
Mike Rapoporte9f63762020-06-04 16:46:23 -0700698 WRITE_ONCE(*p4dp, p4d);
Jungseok Leec79b954b2014-05-12 18:40:51 +0900699 dsb(ishst);
Will Deaconeb6a4dc2019-08-23 13:03:55 +0100700 isb();
Jungseok Leec79b954b2014-05-12 18:40:51 +0900701}
702
Mike Rapoporte9f63762020-06-04 16:46:23 -0700703static inline void p4d_clear(p4d_t *p4dp)
Jungseok Leec79b954b2014-05-12 18:40:51 +0900704{
Mike Rapoporte9f63762020-06-04 16:46:23 -0700705 set_p4d(p4dp, __p4d(0));
Jungseok Leec79b954b2014-05-12 18:40:51 +0900706}
707
Mike Rapoporte9f63762020-06-04 16:46:23 -0700708static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
Jungseok Leec79b954b2014-05-12 18:40:51 +0900709{
Mike Rapoporte9f63762020-06-04 16:46:23 -0700710 return __p4d_to_phys(p4d);
Jungseok Leec79b954b2014-05-12 18:40:51 +0900711}
712
Aneesh Kumar K.Vdc4875f2021-07-07 18:09:56 -0700713static inline pud_t *p4d_pgtable(p4d_t p4d)
Mike Rapoport974b9b22020-06-08 21:33:10 -0700714{
Aneesh Kumar K.Vdc4875f2021-07-07 18:09:56 -0700715 return (pud_t *)__va(p4d_page_paddr(p4d));
Mike Rapoport974b9b22020-06-08 21:33:10 -0700716}
Catalin Marinas7078db42014-07-21 14:52:49 +0100717
Xujun Leng5845e7032021-08-25 23:05:26 +0800718/* Find an entry in the first-level page table. */
Mike Rapoporte9f63762020-06-04 16:46:23 -0700719#define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
Catalin Marinas7078db42014-07-21 14:52:49 +0100720
Mark Rutland961faac2016-01-25 11:45:07 +0000721#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
Mike Rapoporte9f63762020-06-04 16:46:23 -0700722#define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr))
Mark Rutland961faac2016-01-25 11:45:07 +0000723#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
Jungseok Leec79b954b2014-05-12 18:40:51 +0900724
Mike Rapoporte9f63762020-06-04 16:46:23 -0700725#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
Jungseok Lee5d96e0c2014-12-20 00:49:40 +0000726
Ard Biesheuvel65339452016-02-16 13:52:37 +0100727/* use ONLY for statically allocated translation tables */
728#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
729
Mark Rutlanddca56dc2016-01-25 11:45:04 +0000730#else
731
Mike Rapoporte9f63762020-06-04 16:46:23 -0700732#define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;})
Mark Rutlanddca56dc2016-01-25 11:45:04 +0000733#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
734
Mark Rutland961faac2016-01-25 11:45:07 +0000735/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
736#define pud_set_fixmap(addr) NULL
737#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
738#define pud_clear_fixmap()
739
Ard Biesheuvel65339452016-02-16 13:52:37 +0100740#define pud_offset_kimg(dir,addr) ((pud_t *)dir)
741
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700742#endif /* CONFIG_PGTABLE_LEVELS > 3 */
Jungseok Leec79b954b2014-05-12 18:40:51 +0900743
Gavin Shan2cf660e2020-09-14 09:47:30 +1000744#define pgd_ERROR(e) \
745 pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
Catalin Marinas7078db42014-07-21 14:52:49 +0100746
Mark Rutland961faac2016-01-25 11:45:07 +0000747#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
748#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
749
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000750static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
751{
Catalin Marinas9f341932019-11-27 10:00:27 +0000752 /*
753 * Normal and Normal-Tagged are two different memory types and indices
754 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
755 */
Will Deacona6fadf72012-12-18 14:15:15 +0000756 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
Catalin Marinas9f341932019-11-27 10:00:27 +0000757 PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
758 PTE_ATTRINDX_MASK;
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100759 /* preserve the hardware dirty information */
760 if (pte_hw_dirty(pte))
Catalin Marinas62d96c72015-09-11 18:22:01 +0100761 pte = pte_mkdirty(pte);
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000762 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
763 return pte;
764}
765
Steve Capper9c7e5352014-02-25 10:02:13 +0000766static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
767{
768 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
769}
770
Catalin Marinas66dbd6e2016-04-13 16:01:22 +0100771#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
772extern int ptep_set_access_flags(struct vm_area_struct *vma,
773 unsigned long address, pte_t *ptep,
774 pte_t entry, int dirty);
775
Catalin Marinas282aa702016-05-05 10:44:00 +0100776#ifdef CONFIG_TRANSPARENT_HUGEPAGE
777#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
778static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
779 unsigned long address, pmd_t *pmdp,
780 pmd_t entry, int dirty)
781{
782 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
783}
Robin Murphy73b20c82019-07-16 16:30:51 -0700784
785static inline int pud_devmap(pud_t pud)
786{
787 return 0;
788}
789
790static inline int pgd_devmap(pgd_t pgd)
791{
792 return 0;
793}
Catalin Marinas282aa702016-05-05 10:44:00 +0100794#endif
795
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100796/*
797 * Atomic pte/pmd modifications.
798 */
799#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
Catalin Marinas06485052016-04-13 17:57:37 +0100800static inline int __ptep_test_and_clear_young(pte_t *ptep)
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100801{
Catalin Marinas3bbf7152017-06-26 14:27:36 +0100802 pte_t old_pte, pte;
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100803
Catalin Marinas3bbf7152017-06-26 14:27:36 +0100804 pte = READ_ONCE(*ptep);
805 do {
806 old_pte = pte;
807 pte = pte_mkold(pte);
808 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
809 pte_val(old_pte), pte_val(pte));
810 } while (pte_val(pte) != pte_val(old_pte));
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100811
Catalin Marinas3bbf7152017-06-26 14:27:36 +0100812 return pte_young(pte);
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100813}
814
Catalin Marinas06485052016-04-13 17:57:37 +0100815static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
816 unsigned long address,
817 pte_t *ptep)
818{
819 return __ptep_test_and_clear_young(ptep);
820}
821
Alex Van Brunt3403e562018-10-29 14:55:58 +0530822#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
823static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
824 unsigned long address, pte_t *ptep)
825{
826 int young = ptep_test_and_clear_young(vma, address, ptep);
827
828 if (young) {
829 /*
830 * We can elide the trailing DSB here since the worst that can
831 * happen is that a CPU continues to use the young entry in its
832 * TLB and we mistakenly reclaim the associated page. The
833 * window for such an event is bounded by the next
834 * context-switch, which provides a DSB to complete the TLB
835 * invalidation.
836 */
837 flush_tlb_page_nosync(vma, address);
838 }
839
840 return young;
841}
842
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100843#ifdef CONFIG_TRANSPARENT_HUGEPAGE
844#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
845static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
846 unsigned long address,
847 pmd_t *pmdp)
848{
849 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
850}
851#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
852
853#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
854static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
855 unsigned long address, pte_t *ptep)
856{
Catalin Marinas3bbf7152017-06-26 14:27:36 +0100857 return __pte(xchg_relaxed(&pte_val(*ptep), 0));
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100858}
859
860#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Catalin Marinas911f56e2016-05-05 10:43:59 +0100861#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
862static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
863 unsigned long address, pmd_t *pmdp)
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100864{
865 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
866}
867#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
868
869/*
Steve Capper8781bcbc2017-12-01 17:22:14 +0000870 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
871 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100872 */
873#define __HAVE_ARCH_PTEP_SET_WRPROTECT
874static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
875{
Catalin Marinas3bbf7152017-06-26 14:27:36 +0100876 pte_t old_pte, pte;
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100877
Catalin Marinas3bbf7152017-06-26 14:27:36 +0100878 pte = READ_ONCE(*ptep);
879 do {
880 old_pte = pte;
Catalin Marinas3bbf7152017-06-26 14:27:36 +0100881 pte = pte_wrprotect(pte);
882 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
883 pte_val(old_pte), pte_val(pte));
884 } while (pte_val(pte) != pte_val(old_pte));
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100885}
886
887#ifdef CONFIG_TRANSPARENT_HUGEPAGE
888#define __HAVE_ARCH_PMDP_SET_WRPROTECT
889static inline void pmdp_set_wrprotect(struct mm_struct *mm,
890 unsigned long address, pmd_t *pmdp)
891{
892 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
893}
Catalin Marinas1d78a622018-01-31 16:17:55 -0800894
895#define pmdp_establish pmdp_establish
896static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
897 unsigned long address, pmd_t *pmdp, pmd_t pmd)
898{
899 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
900}
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100901#endif
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100902
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000903/*
904 * Encode and decode a swap entry:
Catalin Marinas3676f9e2013-11-27 16:59:27 +0000905 * bits 0-1: present (must be zero)
Kirill A. Shutemov9b3e6612015-02-10 14:10:15 -0800906 * bits 2-7: swap type
907 * bits 8-57: swap offset
Catalin Marinasfdc69e72016-03-09 16:31:29 +0000908 * bit 58: PTE_PROT_NONE (must be zero)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000909 */
Kirill A. Shutemov9b3e6612015-02-10 14:10:15 -0800910#define __SWP_TYPE_SHIFT 2
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000911#define __SWP_TYPE_BITS 6
Kirill A. Shutemov9b3e6612015-02-10 14:10:15 -0800912#define __SWP_OFFSET_BITS 50
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000913#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
914#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
Catalin Marinas3676f9e2013-11-27 16:59:27 +0000915#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000916
917#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
Catalin Marinas3676f9e2013-11-27 16:59:27 +0000918#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000919#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
920
921#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
922#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
923
Anshuman Khandual53fa1172020-09-09 10:23:03 +0530924#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
925#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
926#define __swp_entry_to_pmd(swp) __pmd((swp).val)
927#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
928
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000929/*
930 * Ensure that there are not more swap files than can be encoded in the kernel
Geert Uytterhoevenaad90612014-03-11 11:23:39 +0100931 * PTEs.
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000932 */
933#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
934
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000935extern int kern_addr_valid(unsigned long addr);
936
Steven Price36943ab2020-05-13 16:37:50 +0100937#ifdef CONFIG_ARM64_MTE
938
939#define __HAVE_ARCH_PREPARE_TO_SWAP
940static inline int arch_prepare_to_swap(struct page *page)
941{
942 if (system_supports_mte())
943 return mte_save_tags(page);
944 return 0;
945}
946
947#define __HAVE_ARCH_SWAP_INVALIDATE
948static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
949{
950 if (system_supports_mte())
951 mte_invalidate_tags(type, offset);
952}
953
954static inline void arch_swap_invalidate_area(int type)
955{
956 if (system_supports_mte())
957 mte_invalidate_tags_area(type);
958}
959
960#define __HAVE_ARCH_SWAP_RESTORE
961static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
962{
963 if (system_supports_mte() && mte_restore_tags(entry, page))
964 set_bit(PG_mte_tagged, &page->flags);
965}
966
967#endif /* CONFIG_ARM64_MTE */
968
Will Deaconcba35742015-07-16 19:26:02 +0100969/*
970 * On AArch64, the cache coherency is handled via the set_pte_at() function.
971 */
972static inline void update_mmu_cache(struct vm_area_struct *vma,
973 unsigned long addr, pte_t *ptep)
974{
975 /*
Will Deacon120798d2015-10-06 18:46:30 +0100976 * We don't do anything here, so there's a very small chance of
977 * us retaking a user fault which we just fixed up. The alternative
978 * is doing a dsb(ishst), but that penalises the fastpath.
Will Deaconcba35742015-07-16 19:26:02 +0100979 */
Will Deaconcba35742015-07-16 19:26:02 +0100980}
981
982#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
983
Kristina Martsenko529c4b02017-12-13 17:07:18 +0000984#ifdef CONFIG_ARM64_PA_BITS_52
985#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
986#else
987#define phys_to_ttbr(addr) (addr)
988#endif
989
Jia He6af31222019-10-11 22:09:37 +0800990/*
991 * On arm64 without hardware Access Flag, copying from user will fail because
992 * the pte is old and cannot be marked young. So we always end up with zeroed
993 * page after fork() + CoW for pfn mappings. We don't always have a
994 * hardware-managed access flag on arm64.
995 */
996static inline bool arch_faults_on_old_pte(void)
997{
998 WARN_ON(preemptible());
999
1000 return !cpu_has_hw_af();
1001}
Will Deacon0388f9c2020-11-24 18:49:26 +00001002#define arch_faults_on_old_pte arch_faults_on_old_pte
1003
1004/*
1005 * Experimentally, it's cheap to set the access flag in hardware and we
1006 * benefit from prefaulting mappings as 'old' to start with.
1007 */
1008static inline bool arch_wants_old_prefaulted_pte(void)
1009{
1010 return !arch_faults_on_old_pte();
1011}
1012#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte
Jia He6af31222019-10-11 22:09:37 +08001013
Vladimir Murzin18107f82021-03-12 17:38:10 +00001014static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
1015{
1016 if (cpus_have_const_cap(ARM64_HAS_EPAN))
1017 return prot;
1018
1019 if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY))
1020 return prot;
1021
1022 return PAGE_READONLY_EXEC;
1023}
1024
Anshuman Khandualf8b46c42021-09-20 14:59:31 +05301025static inline bool pud_sect_supported(void)
1026{
1027 return PAGE_SIZE == SZ_4K;
1028}
1029
Vladimir Murzin18107f82021-03-12 17:38:10 +00001030
Catalin Marinas4f04d8f2012-03-05 11:49:27 +00001031#endif /* !__ASSEMBLY__ */
1032
1033#endif /* __ASM_PGTABLE_H */