blob: 6dbd267ab93173e864a6e6f474cd65ba83e32f87 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Catalin Marinas4f04d8f2012-03-05 11:49:27 +00002/*
3 * Copyright (C) 2012 ARM Ltd.
Catalin Marinas4f04d8f2012-03-05 11:49:27 +00004 */
5#ifndef __ASM_PGTABLE_H
6#define __ASM_PGTABLE_H
7
Catalin Marinas2f4b8292015-07-10 17:24:28 +01008#include <asm/bug.h>
Catalin Marinas4f04d8f2012-03-05 11:49:27 +00009#include <asm/proc-fns.h>
10
11#include <asm/memory.h>
12#include <asm/pgtable-hwdef.h>
Mark Rutland3eca86e2016-02-26 14:31:32 +000013#include <asm/pgtable-prot.h>
Alex Van Brunt3403e562018-10-29 14:55:58 +053014#include <asm/tlbflush.h>
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000015
16/*
Ard Biesheuvel3e1907d2016-03-30 16:46:00 +020017 * VMALLOC range.
Catalin Marinas08375192014-07-16 17:42:43 +010018 *
Ard Biesheuvelf9040772016-02-16 13:52:40 +010019 * VMALLOC_START: beginning of the kernel vmalloc space
Mark Browna5315812019-10-24 13:01:43 +010020 * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
Ard Biesheuvel3e1907d2016-03-30 16:46:00 +020021 * and fixed mappings
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000022 */
Ard Biesheuvelf9040772016-02-16 13:52:40 +010023#define VMALLOC_START (MODULES_END)
Steve Capper14c127c2019-08-07 16:55:14 +010024#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000025
Kirill A. Shutemovd016bf72015-02-11 15:26:41 -080026#define FIRST_USER_ADDRESS 0UL
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000027
28#ifndef __ASSEMBLY__
Catalin Marinas2f4b8292015-07-10 17:24:28 +010029
Catalin Marinas3bbf7152017-06-26 14:27:36 +010030#include <asm/cmpxchg.h>
Mark Rutland961faac2016-01-25 11:45:07 +000031#include <asm/fixmap.h>
Catalin Marinas2f4b8292015-07-10 17:24:28 +010032#include <linux/mmdebug.h>
Will Deacon86c9e812017-12-12 10:48:54 +000033#include <linux/mm_types.h>
34#include <linux/sched.h>
Catalin Marinas2f4b8292015-07-10 17:24:28 +010035
Steve Capperc8b6d2c2019-08-07 16:55:20 +010036extern struct page *vmemmap;
37
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000038extern void __pte_error(const char *file, int line, unsigned long val);
39extern void __pmd_error(const char *file, int line, unsigned long val);
Jungseok Leec79b954b2014-05-12 18:40:51 +090040extern void __pud_error(const char *file, int line, unsigned long val);
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000041extern void __pgd_error(const char *file, int line, unsigned long val);
42
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000043/*
44 * ZERO_PAGE is a global shared page that is always zero: used
45 * for zero-mapped memory areas etc..
46 */
Mark Rutland5227cfa2016-01-25 11:44:57 +000047extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
Laura Abbott2077be62017-01-10 13:35:49 -080048#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000049
Catalin Marinas7078db42014-07-21 14:52:49 +010050#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
51
Kristina Martsenko75387b92017-12-13 17:07:21 +000052/*
53 * Macros to convert between a physical address and its placement in a
54 * page table entry, taking care of 52-bit addresses.
55 */
56#ifdef CONFIG_ARM64_PA_BITS_52
57#define __pte_to_phys(pte) \
58 ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36))
59#define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK)
60#else
61#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
62#define __phys_to_pte_val(phys) (phys)
63#endif
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000064
Kristina Martsenko75387b92017-12-13 17:07:21 +000065#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
66#define pfn_pte(pfn,prot) \
67 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000068
69#define pte_none(pte) (!pte_val(pte))
70#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
71#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
Catalin Marinas7078db42014-07-21 14:52:49 +010072
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000073/*
74 * The following only work if pte_present(). Undefined behaviour otherwise.
75 */
Steve Capper84fe6822014-02-25 11:38:53 +000076#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
Steve Capper84fe6822014-02-25 11:38:53 +000077#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
78#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
79#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
Catalin Marinasec663d92017-01-27 10:54:12 +000080#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
Jeremy Linton93ef6662015-10-07 12:00:21 -050081#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
Robin Murphy73b20c82019-07-16 16:30:51 -070082#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
Catalin Marinas4f04d8f2012-03-05 11:49:27 +000083
Ard Biesheuveld27cfa12017-03-09 21:52:09 +010084#define pte_cont_addr_end(addr, end) \
85({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
86 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
87})
88
89#define pmd_cont_addr_end(addr, end) \
90({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
91 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
92})
93
Catalin Marinasb8474152015-09-11 18:22:00 +010094#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
Catalin Marinas2f4b8292015-07-10 17:24:28 +010095#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
96#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
97
Will Deacon766ffb62015-07-28 16:14:03 +010098#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
Catalin Marinasec663d92017-01-27 10:54:12 +000099#define pte_valid_not_user(pte) \
Catalin Marinas24cecc32020-01-06 14:35:39 +0000100 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
Will Deacon76c714b2015-10-30 18:56:19 +0000101#define pte_valid_young(pte) \
102 ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
Catalin Marinas6218f962017-10-26 18:36:47 +0100103#define pte_valid_user(pte) \
104 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
Will Deacon76c714b2015-10-30 18:56:19 +0000105
106/*
107 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
108 * so that we don't erroneously return false for pages that have been
109 * remapped as PROT_NONE but are yet to be flushed from the TLB.
110 */
111#define pte_accessible(mm, pte) \
112 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000113
Catalin Marinas6218f962017-10-26 18:36:47 +0100114/*
115 * p??_access_permitted() is true for valid user mappings (subject to the
Catalin Marinas24cecc32020-01-06 14:35:39 +0000116 * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
117 * set.
Catalin Marinas6218f962017-10-26 18:36:47 +0100118 */
119#define pte_access_permitted(pte, write) \
120 (pte_valid_user(pte) && (!(write) || pte_write(pte)))
121#define pmd_access_permitted(pmd, write) \
122 (pte_access_permitted(pmd_pte(pmd), (write)))
123#define pud_access_permitted(pud, write) \
124 (pte_access_permitted(pud_pte(pud), (write)))
125
Laura Abbottb6d4f282014-08-19 20:41:42 +0100126static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
127{
128 pte_val(pte) &= ~pgprot_val(prot);
129 return pte;
130}
131
132static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
133{
134 pte_val(pte) |= pgprot_val(prot);
135 return pte;
136}
137
Steve Capper44b6dfc2014-01-15 14:07:12 +0000138static inline pte_t pte_wrprotect(pte_t pte)
139{
Catalin Marinas73e86cb2017-07-04 19:04:18 +0100140 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
141 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
142 return pte;
Steve Capper44b6dfc2014-01-15 14:07:12 +0000143}
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000144
Steve Capper44b6dfc2014-01-15 14:07:12 +0000145static inline pte_t pte_mkwrite(pte_t pte)
146{
Catalin Marinas73e86cb2017-07-04 19:04:18 +0100147 pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
148 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
149 return pte;
Steve Capper44b6dfc2014-01-15 14:07:12 +0000150}
151
152static inline pte_t pte_mkclean(pte_t pte)
153{
Steve Capper8781bcbc2017-12-01 17:22:14 +0000154 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
155 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
156
157 return pte;
Steve Capper44b6dfc2014-01-15 14:07:12 +0000158}
159
160static inline pte_t pte_mkdirty(pte_t pte)
161{
Steve Capper8781bcbc2017-12-01 17:22:14 +0000162 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
163
164 if (pte_write(pte))
165 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
166
167 return pte;
Steve Capper44b6dfc2014-01-15 14:07:12 +0000168}
169
170static inline pte_t pte_mkold(pte_t pte)
171{
Laura Abbottb6d4f282014-08-19 20:41:42 +0100172 return clear_pte_bit(pte, __pgprot(PTE_AF));
Steve Capper44b6dfc2014-01-15 14:07:12 +0000173}
174
175static inline pte_t pte_mkyoung(pte_t pte)
176{
Laura Abbottb6d4f282014-08-19 20:41:42 +0100177 return set_pte_bit(pte, __pgprot(PTE_AF));
Steve Capper44b6dfc2014-01-15 14:07:12 +0000178}
179
180static inline pte_t pte_mkspecial(pte_t pte)
181{
Laura Abbottb6d4f282014-08-19 20:41:42 +0100182 return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
Steve Capper44b6dfc2014-01-15 14:07:12 +0000183}
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000184
Jeremy Linton93ef6662015-10-07 12:00:21 -0500185static inline pte_t pte_mkcont(pte_t pte)
186{
David Woods66b39232015-12-17 14:31:26 -0500187 pte = set_pte_bit(pte, __pgprot(PTE_CONT));
188 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
Jeremy Linton93ef6662015-10-07 12:00:21 -0500189}
190
191static inline pte_t pte_mknoncont(pte_t pte)
192{
193 return clear_pte_bit(pte, __pgprot(PTE_CONT));
194}
195
James Morse5ebe3a42016-08-24 18:27:30 +0100196static inline pte_t pte_mkpresent(pte_t pte)
197{
198 return set_pte_bit(pte, __pgprot(PTE_VALID));
199}
200
David Woods66b39232015-12-17 14:31:26 -0500201static inline pmd_t pmd_mkcont(pmd_t pmd)
202{
203 return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
204}
205
Robin Murphy73b20c82019-07-16 16:30:51 -0700206static inline pte_t pte_mkdevmap(pte_t pte)
207{
Jia He30e23532019-08-07 12:58:51 +0800208 return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
Robin Murphy73b20c82019-07-16 16:30:51 -0700209}
210
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000211static inline void set_pte(pte_t *ptep, pte_t pte)
212{
Will Deacon20a004e2018-02-15 11:14:56 +0000213 WRITE_ONCE(*ptep, pte);
Catalin Marinas7f0b1bf2014-06-09 11:55:03 +0100214
215 /*
216 * Only if the new pte is valid and kernel, otherwise TLB maintenance
217 * or update_mmu_cache() have the necessary barriers.
218 */
Will Deacond0b7a302019-08-22 14:58:37 +0100219 if (pte_valid_not_user(pte)) {
Catalin Marinas7f0b1bf2014-06-09 11:55:03 +0100220 dsb(ishst);
Will Deacond0b7a302019-08-22 14:58:37 +0100221 isb();
222 }
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000223}
224
Shaokun Zhang907e21c2018-04-17 20:03:09 +0800225extern void __sync_icache_dcache(pte_t pteval);
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000226
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100227/*
228 * PTE bits configuration in the presence of hardware Dirty Bit Management
229 * (PTE_WRITE == PTE_DBM):
230 *
231 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
232 * 0 0 | 1 0 0
233 * 0 1 | 1 1 0
234 * 1 0 | 1 0 1
235 * 1 1 | 0 1 x
236 *
237 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
238 * the page fault mechanism. Checking the dirty status of a pte becomes:
239 *
Catalin Marinasb8474152015-09-11 18:22:00 +0100240 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100241 */
Mark Rutland9b604722019-06-10 13:41:07 +0100242
243static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
244 pte_t pte)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000245{
Will Deacon20a004e2018-02-15 11:14:56 +0000246 pte_t old_pte;
247
Mark Rutland9b604722019-06-10 13:41:07 +0100248 if (!IS_ENABLED(CONFIG_DEBUG_VM))
249 return;
250
251 old_pte = READ_ONCE(*ptep);
252
253 if (!pte_valid(old_pte) || !pte_valid(pte))
254 return;
255 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
256 return;
257
258 /*
259 * Check for potential race with hardware updates of the pte
260 * (ptep_set_access_flags safely changes valid ptes without going
261 * through an invalid entry).
262 */
263 VM_WARN_ONCE(!pte_young(pte),
264 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
265 __func__, pte_val(old_pte), pte_val(pte));
266 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
267 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
268 __func__, pte_val(old_pte), pte_val(pte));
269}
270
271static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
272 pte_t *ptep, pte_t pte)
273{
Catalin Marinas73e86cb2017-07-04 19:04:18 +0100274 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
Shaokun Zhang907e21c2018-04-17 20:03:09 +0800275 __sync_icache_dcache(pte);
Will Deacon02522462013-01-09 11:08:10 +0000276
Mark Rutland9b604722019-06-10 13:41:07 +0100277 __check_racy_pte_update(mm, ptep, pte);
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100278
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000279 set_pte(ptep, pte);
280}
281
282/*
283 * Huge pte definitions.
284 */
Steve Capper084bd292013-04-10 13:48:00 +0100285#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
286
287/*
288 * Hugetlb definitions.
289 */
David Woods66b39232015-12-17 14:31:26 -0500290#define HUGE_MAX_HSTATE 4
Steve Capper084bd292013-04-10 13:48:00 +0100291#define HPAGE_SHIFT PMD_SHIFT
292#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
293#define HPAGE_MASK (~(HPAGE_SIZE - 1))
294#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000295
Kristina Martsenko75387b92017-12-13 17:07:21 +0000296static inline pte_t pgd_pte(pgd_t pgd)
297{
298 return __pte(pgd_val(pgd));
299}
300
Mike Rapoporte9f63762020-06-04 16:46:23 -0700301static inline pte_t p4d_pte(p4d_t p4d)
302{
303 return __pte(p4d_val(p4d));
304}
305
Steve Capper29e56942014-10-09 15:29:25 -0700306static inline pte_t pud_pte(pud_t pud)
307{
308 return __pte(pud_val(pud));
309}
310
Punit Agrawaleb3f06242018-12-11 17:10:39 +0000311static inline pud_t pte_pud(pte_t pte)
312{
313 return __pud(pte_val(pte));
314}
315
Steve Capper29e56942014-10-09 15:29:25 -0700316static inline pmd_t pud_pmd(pud_t pud)
317{
318 return __pmd(pud_val(pud));
319}
320
Steve Capper9c7e5352014-02-25 10:02:13 +0000321static inline pte_t pmd_pte(pmd_t pmd)
322{
323 return __pte(pmd_val(pmd));
324}
Steve Capperaf074842013-04-19 16:23:57 +0100325
Steve Capper9c7e5352014-02-25 10:02:13 +0000326static inline pmd_t pte_pmd(pte_t pte)
327{
328 return __pmd(pte_val(pte));
329}
Steve Capperaf074842013-04-19 16:23:57 +0100330
Anshuman Khandualf7f00972019-05-27 09:28:15 +0530331static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200332{
Anshuman Khandualf7f00972019-05-27 09:28:15 +0530333 return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
334}
335
336static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
337{
338 return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200339}
340
Ganapatrao Kulkarni56166232016-04-08 15:50:28 -0700341#ifdef CONFIG_NUMA_BALANCING
342/*
Mike Rapoportca5999f2020-06-08 21:32:38 -0700343 * See the comment in include/linux/pgtable.h
Ganapatrao Kulkarni56166232016-04-08 15:50:28 -0700344 */
345static inline int pte_protnone(pte_t pte)
346{
347 return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
348}
349
350static inline int pmd_protnone(pmd_t pmd)
351{
352 return pte_protnone(pmd_pte(pmd));
353}
354#endif
355
Steve Capperaf074842013-04-19 16:23:57 +0100356/*
357 * THP definitions.
358 */
Steve Capperaf074842013-04-19 16:23:57 +0100359
360#ifdef CONFIG_TRANSPARENT_HUGEPAGE
361#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
Steve Capper29e56942014-10-09 15:29:25 -0700362#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Steve Capperaf074842013-04-19 16:23:57 +0100363
Catalin Marinas5bb1cc02016-05-05 10:44:02 +0100364#define pmd_present(pmd) pte_present(pmd_pte(pmd))
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800365#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
Steve Capper9c7e5352014-02-25 10:02:13 +0000366#define pmd_young(pmd) pte_young(pmd_pte(pmd))
Will Deacon0795eda2018-08-22 21:36:31 +0100367#define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
Steve Capper9c7e5352014-02-25 10:02:13 +0000368#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
Steve Capper9c7e5352014-02-25 10:02:13 +0000369#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
370#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
Catalin Marinasab4db1f2016-05-05 10:44:01 +0100371#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
Steve Capper9c7e5352014-02-25 10:02:13 +0000372#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
373#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
Anshuman Khandual86ec2da2020-06-03 16:03:45 -0700374#define pmd_mkinvalid(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
Steve Capperaf074842013-04-19 16:23:57 +0100375
Suzuki K Poulose0dbd3b12016-03-15 10:46:34 +0000376#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
377
Steve Capper9c7e5352014-02-25 10:02:13 +0000378#define pmd_write(pmd) pte_write(pmd_pte(pmd))
Steve Capperaf074842013-04-19 16:23:57 +0100379
380#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
381
Robin Murphy73b20c82019-07-16 16:30:51 -0700382#ifdef CONFIG_TRANSPARENT_HUGEPAGE
383#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
384#endif
Jia He30e23532019-08-07 12:58:51 +0800385static inline pmd_t pmd_mkdevmap(pmd_t pmd)
386{
387 return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
388}
Robin Murphy73b20c82019-07-16 16:30:51 -0700389
Kristina Martsenko75387b92017-12-13 17:07:21 +0000390#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
391#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
392#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
393#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
Steve Capperaf074842013-04-19 16:23:57 +0100394#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
395
Punit Agrawal35a63962018-12-11 17:10:40 +0000396#define pud_young(pud) pte_young(pud_pte(pud))
Punit Agrawaleb3f06242018-12-11 17:10:39 +0000397#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
Steve Capper29e56942014-10-09 15:29:25 -0700398#define pud_write(pud) pte_write(pud_pte(pud))
Kristina Martsenko75387b92017-12-13 17:07:21 +0000399
Punit Agrawalb8e0ba72018-12-11 17:10:41 +0000400#define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
401
Kristina Martsenko75387b92017-12-13 17:07:21 +0000402#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
403#define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
404#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
405#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
Steve Capperaf074842013-04-19 16:23:57 +0100406
Will Deaconceb21832014-05-27 19:11:58 +0100407#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
Steve Capperaf074842013-04-19 16:23:57 +0100408
Mike Rapoporte9f63762020-06-04 16:46:23 -0700409#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
410#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
411
Kristina Martsenko75387b92017-12-13 17:07:21 +0000412#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
413#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
414
Catalin Marinasa501e322014-04-03 15:57:15 +0100415#define __pgprot_modify(prot,mask,bits) \
416 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
417
Christoph Hellwigcca98e92020-06-01 21:51:32 -0700418#define pgprot_nx(prot) \
419 __pgprot_modify(prot, 0, PTE_PXN)
420
Steve Capperaf074842013-04-19 16:23:57 +0100421/*
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000422 * Mark the prot value as uncacheable and unbufferable.
423 */
424#define pgprot_noncached(prot) \
Catalin Marinasde2db742014-03-12 16:07:06 +0000425 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000426#define pgprot_writecombine(prot) \
Catalin Marinasde2db742014-03-12 16:07:06 +0000427 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
Liviu Dudaud1e6dc92014-09-29 15:29:31 +0100428#define pgprot_device(prot) \
429 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
Christoph Hellwig3e4e1d3f2019-08-03 12:38:31 +0300430/*
431 * DMA allocations for non-coherent devices use what the Arm architecture calls
432 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
433 * and merging of writes. This is different from "Device-nGnR[nE]" memory which
434 * is intended for MMIO and thus forbids speculation, preserves access size,
435 * requires strict alignment and can also force write responses to come from the
436 * endpoint.
437 */
Christoph Hellwig419e2f12019-08-26 09:03:44 +0200438#define pgprot_dmacoherent(prot) \
439 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \
440 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
441
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000442#define __HAVE_PHYS_MEM_ACCESS_PROT
443struct file;
444extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
445 unsigned long size, pgprot_t vma_prot);
446
447#define pmd_none(pmd) (!pmd_val(pmd))
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000448
Catalin Marinasab4db1f2016-05-05 10:44:01 +0100449#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000450
Marc Zyngier36311602012-12-07 18:35:41 +0000451#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
452 PMD_TYPE_TABLE)
453#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
454 PMD_TYPE_SECT)
Steven Price8aa82df2020-02-03 17:35:14 -0800455#define pmd_leaf(pmd) pmd_sect(pmd)
Marc Zyngier36311602012-12-07 18:35:41 +0000456
Catalin Marinascac4b8c2016-02-25 15:53:44 +0000457#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
Qian Cai7d4e2dc2019-07-31 16:05:45 -0400458static inline bool pud_sect(pud_t pud) { return false; }
459static inline bool pud_table(pud_t pud) { return true; }
Steve Capper206a2a72014-05-06 14:02:27 +0100460#else
461#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
462 PUD_TYPE_SECT)
zhichang.yuan523d6e92014-12-09 07:26:47 +0000463#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
464 PUD_TYPE_TABLE)
Steve Capper206a2a72014-05-06 14:02:27 +0100465#endif
Marc Zyngier36311602012-12-07 18:35:41 +0000466
Jun Yao2330b7c2018-09-24 17:15:02 +0100467extern pgd_t init_pg_dir[PTRS_PER_PGD];
468extern pgd_t init_pg_end[];
469extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
470extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
Gavin Shan9d2d75e2020-04-28 09:57:00 +1000471extern pgd_t idmap_pg_end[];
Jun Yao2330b7c2018-09-24 17:15:02 +0100472extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
473
474extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
475
476static inline bool in_swapper_pgdir(void *addr)
477{
478 return ((unsigned long)addr & PAGE_MASK) ==
479 ((unsigned long)swapper_pg_dir & PAGE_MASK);
480}
481
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000482static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
483{
James Morsee9ed8212018-10-05 14:49:16 +0100484#ifdef __PAGETABLE_PMD_FOLDED
485 if (in_swapper_pgdir(pmdp)) {
Jun Yao2330b7c2018-09-24 17:15:02 +0100486 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
487 return;
488 }
James Morsee9ed8212018-10-05 14:49:16 +0100489#endif /* __PAGETABLE_PMD_FOLDED */
Jun Yao2330b7c2018-09-24 17:15:02 +0100490
Will Deacon20a004e2018-02-15 11:14:56 +0000491 WRITE_ONCE(*pmdp, pmd);
Will Deacon0795eda2018-08-22 21:36:31 +0100492
Will Deacond0b7a302019-08-22 14:58:37 +0100493 if (pmd_valid(pmd)) {
Will Deacon0795eda2018-08-22 21:36:31 +0100494 dsb(ishst);
Will Deacond0b7a302019-08-22 14:58:37 +0100495 isb();
496 }
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000497}
498
499static inline void pmd_clear(pmd_t *pmdp)
500{
501 set_pmd(pmdp, __pmd(0));
502}
503
Mark Rutlanddca56dc2016-01-25 11:45:04 +0000504static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000505{
Kristina Martsenko75387b92017-12-13 17:07:21 +0000506 return __pmd_to_phys(pmd);
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000507}
508
Mike Rapoport974b9b22020-06-08 21:33:10 -0700509static inline unsigned long pmd_page_vaddr(pmd_t pmd)
510{
511 return (unsigned long)__va(pmd_page_paddr(pmd));
512}
Qian Cai74dd0222019-04-29 13:37:01 -0400513
Mark Rutland053520f2016-01-25 11:45:03 +0000514/* Find an entry in the third-level page table. */
Will Deaconf069fab2017-09-29 11:29:55 +0100515#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
Mark Rutland053520f2016-01-25 11:45:03 +0000516
Mark Rutland961faac2016-01-25 11:45:07 +0000517#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
518#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
519#define pte_clear_fixmap() clear_fixmap(FIX_PTE)
520
Gavin Shan68ecabd2020-04-28 09:46:55 +1000521#define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd))
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000522
Ard Biesheuvel65339452016-02-16 13:52:37 +0100523/* use ONLY for statically allocated translation tables */
524#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
525
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000526/*
527 * Conversion functions: convert a page and protection to a page entry,
528 * and a page entry and page directory to the page they refer to.
529 */
530#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
531
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700532#if CONFIG_PGTABLE_LEVELS > 2
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000533
Catalin Marinas7078db42014-07-21 14:52:49 +0100534#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
535
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000536#define pud_none(pud) (!pud_val(pud))
Catalin Marinasab4db1f2016-05-05 10:44:01 +0100537#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
Punit Agrawalf02ab082017-06-08 18:25:26 +0100538#define pud_present(pud) pte_present(pud_pte(pud))
Steven Price8aa82df2020-02-03 17:35:14 -0800539#define pud_leaf(pud) pud_sect(pud)
Will Deacon0795eda2018-08-22 21:36:31 +0100540#define pud_valid(pud) pte_valid(pud_pte(pud))
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000541
542static inline void set_pud(pud_t *pudp, pud_t pud)
543{
James Morsee9ed8212018-10-05 14:49:16 +0100544#ifdef __PAGETABLE_PUD_FOLDED
545 if (in_swapper_pgdir(pudp)) {
Jun Yao2330b7c2018-09-24 17:15:02 +0100546 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
547 return;
548 }
James Morsee9ed8212018-10-05 14:49:16 +0100549#endif /* __PAGETABLE_PUD_FOLDED */
Jun Yao2330b7c2018-09-24 17:15:02 +0100550
Will Deacon20a004e2018-02-15 11:14:56 +0000551 WRITE_ONCE(*pudp, pud);
Will Deacon0795eda2018-08-22 21:36:31 +0100552
Will Deacond0b7a302019-08-22 14:58:37 +0100553 if (pud_valid(pud)) {
Will Deacon0795eda2018-08-22 21:36:31 +0100554 dsb(ishst);
Will Deacond0b7a302019-08-22 14:58:37 +0100555 isb();
556 }
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000557}
558
559static inline void pud_clear(pud_t *pudp)
560{
561 set_pud(pudp, __pud(0));
562}
563
Mark Rutlanddca56dc2016-01-25 11:45:04 +0000564static inline phys_addr_t pud_page_paddr(pud_t pud)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000565{
Kristina Martsenko75387b92017-12-13 17:07:21 +0000566 return __pud_to_phys(pud);
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000567}
568
Mike Rapoport974b9b22020-06-08 21:33:10 -0700569static inline unsigned long pud_page_vaddr(pud_t pud)
570{
571 return (unsigned long)__va(pud_page_paddr(pud));
572}
Catalin Marinas7078db42014-07-21 14:52:49 +0100573
Mike Rapoport974b9b22020-06-08 21:33:10 -0700574/* Find an entry in the second-level page table. */
Will Deacon20a004e2018-02-15 11:14:56 +0000575#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
Catalin Marinas7078db42014-07-21 14:52:49 +0100576
Mark Rutland961faac2016-01-25 11:45:07 +0000577#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
578#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
579#define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000580
Gavin Shan68ecabd2020-04-28 09:46:55 +1000581#define pud_page(pud) phys_to_page(__pud_to_phys(pud))
Steve Capper29e56942014-10-09 15:29:25 -0700582
Ard Biesheuvel65339452016-02-16 13:52:37 +0100583/* use ONLY for statically allocated translation tables */
584#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
585
Mark Rutlanddca56dc2016-01-25 11:45:04 +0000586#else
587
588#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
589
Mark Rutland961faac2016-01-25 11:45:07 +0000590/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
591#define pmd_set_fixmap(addr) NULL
592#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
593#define pmd_clear_fixmap()
594
Ard Biesheuvel65339452016-02-16 13:52:37 +0100595#define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
596
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700597#endif /* CONFIG_PGTABLE_LEVELS > 2 */
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000598
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700599#if CONFIG_PGTABLE_LEVELS > 3
Jungseok Leec79b954b2014-05-12 18:40:51 +0900600
Catalin Marinas7078db42014-07-21 14:52:49 +0100601#define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
602
Mike Rapoporte9f63762020-06-04 16:46:23 -0700603#define p4d_none(p4d) (!p4d_val(p4d))
604#define p4d_bad(p4d) (!(p4d_val(p4d) & 2))
605#define p4d_present(p4d) (p4d_val(p4d))
Jungseok Leec79b954b2014-05-12 18:40:51 +0900606
Mike Rapoporte9f63762020-06-04 16:46:23 -0700607static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
Jungseok Leec79b954b2014-05-12 18:40:51 +0900608{
Mike Rapoporte9f63762020-06-04 16:46:23 -0700609 if (in_swapper_pgdir(p4dp)) {
610 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
Jun Yao2330b7c2018-09-24 17:15:02 +0100611 return;
612 }
613
Mike Rapoporte9f63762020-06-04 16:46:23 -0700614 WRITE_ONCE(*p4dp, p4d);
Jungseok Leec79b954b2014-05-12 18:40:51 +0900615 dsb(ishst);
Will Deaconeb6a4dc2019-08-23 13:03:55 +0100616 isb();
Jungseok Leec79b954b2014-05-12 18:40:51 +0900617}
618
Mike Rapoporte9f63762020-06-04 16:46:23 -0700619static inline void p4d_clear(p4d_t *p4dp)
Jungseok Leec79b954b2014-05-12 18:40:51 +0900620{
Mike Rapoporte9f63762020-06-04 16:46:23 -0700621 set_p4d(p4dp, __p4d(0));
Jungseok Leec79b954b2014-05-12 18:40:51 +0900622}
623
Mike Rapoporte9f63762020-06-04 16:46:23 -0700624static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
Jungseok Leec79b954b2014-05-12 18:40:51 +0900625{
Mike Rapoporte9f63762020-06-04 16:46:23 -0700626 return __p4d_to_phys(p4d);
Jungseok Leec79b954b2014-05-12 18:40:51 +0900627}
628
Mike Rapoport974b9b22020-06-08 21:33:10 -0700629static inline unsigned long p4d_page_vaddr(p4d_t p4d)
630{
631 return (unsigned long)__va(p4d_page_paddr(p4d));
632}
Catalin Marinas7078db42014-07-21 14:52:49 +0100633
Mike Rapoport974b9b22020-06-08 21:33:10 -0700634/* Find an entry in the frst-level page table. */
Mike Rapoporte9f63762020-06-04 16:46:23 -0700635#define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
Catalin Marinas7078db42014-07-21 14:52:49 +0100636
Mark Rutland961faac2016-01-25 11:45:07 +0000637#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
Mike Rapoporte9f63762020-06-04 16:46:23 -0700638#define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr))
Mark Rutland961faac2016-01-25 11:45:07 +0000639#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
Jungseok Leec79b954b2014-05-12 18:40:51 +0900640
Mike Rapoporte9f63762020-06-04 16:46:23 -0700641#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
Jungseok Lee5d96e0c2014-12-20 00:49:40 +0000642
Ard Biesheuvel65339452016-02-16 13:52:37 +0100643/* use ONLY for statically allocated translation tables */
644#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
645
Mark Rutlanddca56dc2016-01-25 11:45:04 +0000646#else
647
Mike Rapoporte9f63762020-06-04 16:46:23 -0700648#define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;})
Mark Rutlanddca56dc2016-01-25 11:45:04 +0000649#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
650
Mark Rutland961faac2016-01-25 11:45:07 +0000651/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
652#define pud_set_fixmap(addr) NULL
653#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
654#define pud_clear_fixmap()
655
Ard Biesheuvel65339452016-02-16 13:52:37 +0100656#define pud_offset_kimg(dir,addr) ((pud_t *)dir)
657
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700658#endif /* CONFIG_PGTABLE_LEVELS > 3 */
Jungseok Leec79b954b2014-05-12 18:40:51 +0900659
Catalin Marinas7078db42014-07-21 14:52:49 +0100660#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
661
Mark Rutland961faac2016-01-25 11:45:07 +0000662#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
663#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
664
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000665static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
666{
Will Deacona6fadf72012-12-18 14:15:15 +0000667 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
Dave Martin8ef8f3602020-03-16 16:50:45 +0000668 PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP;
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100669 /* preserve the hardware dirty information */
670 if (pte_hw_dirty(pte))
Catalin Marinas62d96c72015-09-11 18:22:01 +0100671 pte = pte_mkdirty(pte);
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000672 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
673 return pte;
674}
675
Steve Capper9c7e5352014-02-25 10:02:13 +0000676static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
677{
678 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
679}
680
Catalin Marinas66dbd6e2016-04-13 16:01:22 +0100681#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
682extern int ptep_set_access_flags(struct vm_area_struct *vma,
683 unsigned long address, pte_t *ptep,
684 pte_t entry, int dirty);
685
Catalin Marinas282aa702016-05-05 10:44:00 +0100686#ifdef CONFIG_TRANSPARENT_HUGEPAGE
687#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
688static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
689 unsigned long address, pmd_t *pmdp,
690 pmd_t entry, int dirty)
691{
692 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
693}
Robin Murphy73b20c82019-07-16 16:30:51 -0700694
695static inline int pud_devmap(pud_t pud)
696{
697 return 0;
698}
699
700static inline int pgd_devmap(pgd_t pgd)
701{
702 return 0;
703}
Catalin Marinas282aa702016-05-05 10:44:00 +0100704#endif
705
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100706/*
707 * Atomic pte/pmd modifications.
708 */
709#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
Catalin Marinas06485052016-04-13 17:57:37 +0100710static inline int __ptep_test_and_clear_young(pte_t *ptep)
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100711{
Catalin Marinas3bbf7152017-06-26 14:27:36 +0100712 pte_t old_pte, pte;
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100713
Catalin Marinas3bbf7152017-06-26 14:27:36 +0100714 pte = READ_ONCE(*ptep);
715 do {
716 old_pte = pte;
717 pte = pte_mkold(pte);
718 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
719 pte_val(old_pte), pte_val(pte));
720 } while (pte_val(pte) != pte_val(old_pte));
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100721
Catalin Marinas3bbf7152017-06-26 14:27:36 +0100722 return pte_young(pte);
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100723}
724
Catalin Marinas06485052016-04-13 17:57:37 +0100725static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
726 unsigned long address,
727 pte_t *ptep)
728{
729 return __ptep_test_and_clear_young(ptep);
730}
731
Alex Van Brunt3403e562018-10-29 14:55:58 +0530732#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
733static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
734 unsigned long address, pte_t *ptep)
735{
736 int young = ptep_test_and_clear_young(vma, address, ptep);
737
738 if (young) {
739 /*
740 * We can elide the trailing DSB here since the worst that can
741 * happen is that a CPU continues to use the young entry in its
742 * TLB and we mistakenly reclaim the associated page. The
743 * window for such an event is bounded by the next
744 * context-switch, which provides a DSB to complete the TLB
745 * invalidation.
746 */
747 flush_tlb_page_nosync(vma, address);
748 }
749
750 return young;
751}
752
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100753#ifdef CONFIG_TRANSPARENT_HUGEPAGE
754#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
755static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
756 unsigned long address,
757 pmd_t *pmdp)
758{
759 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
760}
761#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
762
763#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
764static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
765 unsigned long address, pte_t *ptep)
766{
Catalin Marinas3bbf7152017-06-26 14:27:36 +0100767 return __pte(xchg_relaxed(&pte_val(*ptep), 0));
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100768}
769
770#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Catalin Marinas911f56e2016-05-05 10:43:59 +0100771#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
772static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
773 unsigned long address, pmd_t *pmdp)
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100774{
775 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
776}
777#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
778
779/*
Steve Capper8781bcbc2017-12-01 17:22:14 +0000780 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
781 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100782 */
783#define __HAVE_ARCH_PTEP_SET_WRPROTECT
784static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
785{
Catalin Marinas3bbf7152017-06-26 14:27:36 +0100786 pte_t old_pte, pte;
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100787
Catalin Marinas3bbf7152017-06-26 14:27:36 +0100788 pte = READ_ONCE(*ptep);
789 do {
790 old_pte = pte;
Steve Capper8781bcbc2017-12-01 17:22:14 +0000791 /*
792 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
793 * clear), set the PTE_DIRTY bit.
794 */
795 if (pte_hw_dirty(pte))
796 pte = pte_mkdirty(pte);
Catalin Marinas3bbf7152017-06-26 14:27:36 +0100797 pte = pte_wrprotect(pte);
798 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
799 pte_val(old_pte), pte_val(pte));
800 } while (pte_val(pte) != pte_val(old_pte));
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100801}
802
803#ifdef CONFIG_TRANSPARENT_HUGEPAGE
804#define __HAVE_ARCH_PMDP_SET_WRPROTECT
805static inline void pmdp_set_wrprotect(struct mm_struct *mm,
806 unsigned long address, pmd_t *pmdp)
807{
808 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
809}
Catalin Marinas1d78a622018-01-31 16:17:55 -0800810
811#define pmdp_establish pmdp_establish
812static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
813 unsigned long address, pmd_t *pmdp, pmd_t pmd)
814{
815 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
816}
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100817#endif
Catalin Marinas2f4b8292015-07-10 17:24:28 +0100818
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000819/*
820 * Encode and decode a swap entry:
Catalin Marinas3676f9e2013-11-27 16:59:27 +0000821 * bits 0-1: present (must be zero)
Kirill A. Shutemov9b3e6612015-02-10 14:10:15 -0800822 * bits 2-7: swap type
823 * bits 8-57: swap offset
Catalin Marinasfdc69e72016-03-09 16:31:29 +0000824 * bit 58: PTE_PROT_NONE (must be zero)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000825 */
Kirill A. Shutemov9b3e6612015-02-10 14:10:15 -0800826#define __SWP_TYPE_SHIFT 2
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000827#define __SWP_TYPE_BITS 6
Kirill A. Shutemov9b3e6612015-02-10 14:10:15 -0800828#define __SWP_OFFSET_BITS 50
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000829#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
830#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
Catalin Marinas3676f9e2013-11-27 16:59:27 +0000831#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000832
833#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
Catalin Marinas3676f9e2013-11-27 16:59:27 +0000834#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000835#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
836
837#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
838#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
839
840/*
841 * Ensure that there are not more swap files than can be encoded in the kernel
Geert Uytterhoevenaad90612014-03-11 11:23:39 +0100842 * PTEs.
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000843 */
844#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
845
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000846extern int kern_addr_valid(unsigned long addr);
847
Will Deaconcba35742015-07-16 19:26:02 +0100848/*
849 * On AArch64, the cache coherency is handled via the set_pte_at() function.
850 */
851static inline void update_mmu_cache(struct vm_area_struct *vma,
852 unsigned long addr, pte_t *ptep)
853{
854 /*
Will Deacon120798d2015-10-06 18:46:30 +0100855 * We don't do anything here, so there's a very small chance of
856 * us retaking a user fault which we just fixed up. The alternative
857 * is doing a dsb(ishst), but that penalises the fastpath.
Will Deaconcba35742015-07-16 19:26:02 +0100858 */
Will Deaconcba35742015-07-16 19:26:02 +0100859}
860
861#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
862
Kristina Martsenko529c4b02017-12-13 17:07:18 +0000863#ifdef CONFIG_ARM64_PA_BITS_52
864#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
865#else
866#define phys_to_ttbr(addr) (addr)
867#endif
868
Jia He6af31222019-10-11 22:09:37 +0800869/*
870 * On arm64 without hardware Access Flag, copying from user will fail because
871 * the pte is old and cannot be marked young. So we always end up with zeroed
872 * page after fork() + CoW for pfn mappings. We don't always have a
873 * hardware-managed access flag on arm64.
874 */
875static inline bool arch_faults_on_old_pte(void)
876{
877 WARN_ON(preemptible());
878
879 return !cpu_has_hw_af();
880}
881#define arch_faults_on_old_pte arch_faults_on_old_pte
882
Catalin Marinas4f04d8f2012-03-05 11:49:27 +0000883#endif /* !__ASSEMBLY__ */
884
885#endif /* __ASM_PGTABLE_H */