blob: bf204e7c1f7425b7a43cd004fd4fb1ff5bcd71e6 [file] [log] [blame]
Thomas Gleixner50acfb22019-05-29 07:18:00 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Palmer Dabbelt07037db2017-07-10 18:06:09 -07002/*
3 * Copyright (C) 2012 Regents of the University of California
Palmer Dabbelt07037db2017-07-10 18:06:09 -07004 */
5
6#ifndef _ASM_RISCV_PGTABLE_H
7#define _ASM_RISCV_PGTABLE_H
8
9#include <linux/mmzone.h>
Yash Shah00a5bf32019-10-25 08:30:03 +000010#include <linux/sizes.h>
Palmer Dabbelt07037db2017-07-10 18:06:09 -070011
12#include <asm/pgtable-bits.h>
13
Alexandre Ghiti2bfc6cd2021-04-11 12:41:44 -040014#ifndef CONFIG_MMU
15#define KERNEL_LINK_ADDR PAGE_OFFSET
16#else
Palmer Dabbelt07037db2017-07-10 18:06:09 -070017
Alexandre Ghiti2bfc6cd2021-04-11 12:41:44 -040018#define ADDRESS_SPACE_END (UL(-1))
Palmer Dabbelt07037db2017-07-10 18:06:09 -070019
Alexandre Ghiti2bfc6cd2021-04-11 12:41:44 -040020#ifdef CONFIG_64BIT
21/* Leave 2GB for kernel and BPF at the end of the address space */
22#define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
23#else
24#define KERNEL_LINK_ADDR PAGE_OFFSET
25#endif
Atish Patra9f40b6e2020-02-24 11:34:36 -080026
27#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
28#define VMALLOC_END (PAGE_OFFSET - 1)
29#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
30
31#define BPF_JIT_REGION_SIZE (SZ_128M)
Alexandre Ghiti2bfc6cd2021-04-11 12:41:44 -040032#ifdef CONFIG_64BIT
Jisheng Zhang3a027642021-06-18 22:09:13 +080033#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
34#define BPF_JIT_REGION_END (MODULES_END)
Alexandre Ghiti2bfc6cd2021-04-11 12:41:44 -040035#else
Atish Patra9f40b6e2020-02-24 11:34:36 -080036#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
37#define BPF_JIT_REGION_END (VMALLOC_END)
Alexandre Ghiti2bfc6cd2021-04-11 12:41:44 -040038#endif
39
40/* Modules always live before the kernel */
41#ifdef CONFIG_64BIT
42#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
43#define MODULES_END (PFN_ALIGN((unsigned long)&_start))
44#endif
Atish Patra9f40b6e2020-02-24 11:34:36 -080045
46/*
47 * Roughly size the vmemmap space to be large enough to fit enough
48 * struct pages to map half the virtual address space. Then
49 * position vmemmap directly below the VMALLOC region.
50 */
51#define VMEMMAP_SHIFT \
52 (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
53#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
54#define VMEMMAP_END (VMALLOC_START - 1)
55#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
56
57/*
58 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
59 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
60 */
61#define vmemmap ((struct page *)VMEMMAP_START)
62
63#define PCI_IO_SIZE SZ_16M
64#define PCI_IO_END VMEMMAP_START
65#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
66
67#define FIXADDR_TOP PCI_IO_START
68#ifdef CONFIG_64BIT
69#define FIXADDR_SIZE PMD_SIZE
70#else
71#define FIXADDR_SIZE PGDIR_SIZE
72#endif
73#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
Vitaly Wool44c92252021-04-13 02:35:14 -040074
Palmer Dabbeltf54c7b52021-04-28 14:45:12 -070075#endif
76
Vitaly Wool44c92252021-04-13 02:35:14 -040077#ifdef CONFIG_XIP_KERNEL
Vitaly Woolf9ace4e2021-10-11 11:14:14 +020078#define XIP_OFFSET SZ_32M
79#define XIP_OFFSET_MASK (SZ_32M - 1)
Alexandre Ghiti7094e6a2021-06-04 13:49:48 +020080#else
81#define XIP_OFFSET 0
Atish Patra9f40b6e2020-02-24 11:34:36 -080082#endif
83
Alexandre Ghiti2bfc6cd2021-04-11 12:41:44 -040084#ifndef __ASSEMBLY__
85
86/* Page Upper Directory not used in RISC-V */
87#include <asm-generic/pgtable-nopud.h>
88#include <asm/page.h>
89#include <asm/tlbflush.h>
90#include <linux/mm_types.h>
91
Palmer Dabbelt07037db2017-07-10 18:06:09 -070092#ifdef CONFIG_64BIT
93#include <asm/pgtable-64.h>
94#else
95#include <asm/pgtable-32.h>
96#endif /* CONFIG_64BIT */
97
Palmer Dabbeltf54c7b52021-04-28 14:45:12 -070098#ifdef CONFIG_XIP_KERNEL
99#define XIP_FIXUP(addr) ({ \
100 uintptr_t __a = (uintptr_t)(addr); \
Vitaly Woolf9ace4e2021-10-11 11:14:14 +0200101 (__a >= CONFIG_XIP_PHYS_ADDR && \
102 __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \
Palmer Dabbeltf54c7b52021-04-28 14:45:12 -0700103 __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
104 __a; \
105 })
106#else
107#define XIP_FIXUP(addr) (addr)
108#endif /* CONFIG_XIP_KERNEL */
109
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100110#ifdef CONFIG_MMU
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700111/* Number of entries in the page global directory */
112#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
113/* Number of entries in the page table */
114#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
115
116/* Number of PGD entries that a user-mode program can use */
117#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700118
119/* Page protection bits */
120#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
121
Stefan O'Reare3613bb2018-12-16 13:03:36 -0500122#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700123#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
124#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
125#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
126#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
127#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
128 _PAGE_EXEC | _PAGE_WRITE)
129
130#define PAGE_COPY PAGE_READ
131#define PAGE_COPY_EXEC PAGE_EXEC
132#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
133#define PAGE_SHARED PAGE_WRITE
134#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
135
136#define _PAGE_KERNEL (_PAGE_READ \
137 | _PAGE_WRITE \
138 | _PAGE_PRESENT \
139 | _PAGE_ACCESSED \
Guo Rencba43c32021-05-26 05:49:20 +0000140 | _PAGE_DIRTY \
141 | _PAGE_GLOBAL)
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700142
143#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
Atish Patrab91540d2020-09-17 15:37:15 -0700144#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
145#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
146#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
147 | _PAGE_EXEC)
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700148
Anup Patel671f9a32019-06-28 13:36:21 -0700149#define PAGE_TABLE __pgprot(_PAGE_TABLE)
150
Christoph Hellwig38af5782019-08-13 11:27:56 +0200151/*
152 * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
153 * change the properties of memory regions.
154 */
155#define _PAGE_IOREMAP _PAGE_KERNEL
156
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700157extern pgd_t swapper_pg_dir[];
158
159/* MAP_PRIVATE permissions: xwr (copy-on-write) */
160#define __P000 PAGE_NONE
161#define __P001 PAGE_READ
162#define __P010 PAGE_COPY
163#define __P011 PAGE_COPY
164#define __P100 PAGE_EXEC
165#define __P101 PAGE_READ_EXEC
166#define __P110 PAGE_COPY_EXEC
167#define __P111 PAGE_COPY_READ_EXEC
168
169/* MAP_SHARED permissions: xwr */
170#define __S000 PAGE_NONE
171#define __S001 PAGE_READ
172#define __S010 PAGE_SHARED
173#define __S011 PAGE_SHARED
174#define __S100 PAGE_EXEC
175#define __S101 PAGE_READ_EXEC
176#define __S110 PAGE_SHARED_EXEC
177#define __S111 PAGE_SHARED_EXEC
178
Nanyong Sune88b3332021-04-30 16:28:50 +0800179#ifdef CONFIG_TRANSPARENT_HUGEPAGE
180static inline int pmd_present(pmd_t pmd)
181{
182 /*
183 * Checking for _PAGE_LEAF is needed too because:
184 * When splitting a THP, split_huge_page() will temporarily clear
185 * the present bit, in this situation, pmd_present() and
186 * pmd_trans_huge() still needs to return true.
187 */
188 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
189}
190#else
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700191static inline int pmd_present(pmd_t pmd)
192{
Stefan O'Reare3613bb2018-12-16 13:03:36 -0500193 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700194}
Nanyong Sune88b3332021-04-30 16:28:50 +0800195#endif
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700196
197static inline int pmd_none(pmd_t pmd)
198{
199 return (pmd_val(pmd) == 0);
200}
201
202static inline int pmd_bad(pmd_t pmd)
203{
Nanyong Sun141682f2021-04-30 16:28:48 +0800204 return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700205}
206
Steven Priceaf6513e2020-02-03 17:35:28 -0800207#define pmd_leaf pmd_leaf
208static inline int pmd_leaf(pmd_t pmd)
209{
Nanyong Sunf5397c32021-04-30 16:28:47 +0800210 return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
Steven Priceaf6513e2020-02-03 17:35:28 -0800211}
212
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700213static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
214{
215 *pmdp = pmd;
216}
217
218static inline void pmd_clear(pmd_t *pmdp)
219{
220 set_pmd(pmdp, __pmd(0));
221}
222
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700223static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
224{
225 return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
226}
227
Anup Patel671f9a32019-06-28 13:36:21 -0700228static inline unsigned long _pgd_pfn(pgd_t pgd)
229{
230 return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
231}
232
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700233static inline struct page *pmd_page(pmd_t pmd)
234{
235 return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
236}
237
238static inline unsigned long pmd_page_vaddr(pmd_t pmd)
239{
240 return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
241}
242
Greentime Hu3e5b0bd2020-11-18 16:38:28 -0800243static inline pte_t pmd_pte(pmd_t pmd)
244{
245 return __pte(pmd_val(pmd));
246}
247
Jisheng Zhang3332f412021-04-17 00:37:22 +0800248static inline pte_t pud_pte(pud_t pud)
249{
250 return __pte(pud_val(pud));
251}
252
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700253/* Yields the page frame number (PFN) of a page table entry */
254static inline unsigned long pte_pfn(pte_t pte)
255{
256 return (pte_val(pte) >> _PAGE_PFN_SHIFT);
257}
258
259#define pte_page(x) pfn_to_page(pte_pfn(x))
260
261/* Constructs a page table entry */
262static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
263{
264 return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
265}
266
Kefeng Wang62103ec2019-10-23 11:23:01 +0800267#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700268
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700269static inline int pte_present(pte_t pte)
270{
Stefan O'Reare3613bb2018-12-16 13:03:36 -0500271 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700272}
273
274static inline int pte_none(pte_t pte)
275{
276 return (pte_val(pte) == 0);
277}
278
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700279static inline int pte_write(pte_t pte)
280{
281 return pte_val(pte) & _PAGE_WRITE;
282}
283
Andrew Waterman08f051e2017-10-25 14:30:32 -0700284static inline int pte_exec(pte_t pte)
285{
286 return pte_val(pte) & _PAGE_EXEC;
287}
288
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700289static inline int pte_huge(pte_t pte)
290{
Nanyong Sunf5397c32021-04-30 16:28:47 +0800291 return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700292}
293
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700294static inline int pte_dirty(pte_t pte)
295{
296 return pte_val(pte) & _PAGE_DIRTY;
297}
298
299static inline int pte_young(pte_t pte)
300{
301 return pte_val(pte) & _PAGE_ACCESSED;
302}
303
304static inline int pte_special(pte_t pte)
305{
306 return pte_val(pte) & _PAGE_SPECIAL;
307}
308
309/* static inline pte_t pte_rdprotect(pte_t pte) */
310
311static inline pte_t pte_wrprotect(pte_t pte)
312{
313 return __pte(pte_val(pte) & ~(_PAGE_WRITE));
314}
315
316/* static inline pte_t pte_mkread(pte_t pte) */
317
318static inline pte_t pte_mkwrite(pte_t pte)
319{
320 return __pte(pte_val(pte) | _PAGE_WRITE);
321}
322
323/* static inline pte_t pte_mkexec(pte_t pte) */
324
325static inline pte_t pte_mkdirty(pte_t pte)
326{
327 return __pte(pte_val(pte) | _PAGE_DIRTY);
328}
329
330static inline pte_t pte_mkclean(pte_t pte)
331{
332 return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
333}
334
335static inline pte_t pte_mkyoung(pte_t pte)
336{
337 return __pte(pte_val(pte) | _PAGE_ACCESSED);
338}
339
340static inline pte_t pte_mkold(pte_t pte)
341{
342 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
343}
344
345static inline pte_t pte_mkspecial(pte_t pte)
346{
347 return __pte(pte_val(pte) | _PAGE_SPECIAL);
348}
349
Alexandre Ghiti9e953cd2019-05-26 08:50:38 -0400350static inline pte_t pte_mkhuge(pte_t pte)
351{
352 return pte;
353}
354
Greentime Hu3e5b0bd2020-11-18 16:38:28 -0800355#ifdef CONFIG_NUMA_BALANCING
356/*
357 * See the comment in include/asm-generic/pgtable.h
358 */
359static inline int pte_protnone(pte_t pte)
360{
361 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
362}
363
364static inline int pmd_protnone(pmd_t pmd)
365{
366 return pte_protnone(pmd_pte(pmd));
367}
368#endif
369
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700370/* Modify page protection bits */
371static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
372{
373 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
374}
375
376#define pgd_ERROR(e) \
377 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
378
379
380/* Commit new configuration to MMU hardware */
381static inline void update_mmu_cache(struct vm_area_struct *vma,
382 unsigned long address, pte_t *ptep)
383{
384 /*
385 * The kernel assumes that TLBs don't cache invalid entries, but
386 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
387 * cache flush; it is necessary even after writing invalid entries.
388 * Relying on flush_tlb_fix_spurious_fault would suffice, but
389 * the extra traps reduce performance. So, eagerly SFENCE.VMA.
390 */
391 local_flush_tlb_page(address);
392}
393
Nanyong Sune88b3332021-04-30 16:28:50 +0800394static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
395 unsigned long address, pmd_t *pmdp)
396{
397 pte_t *ptep = (pte_t *)pmdp;
398
399 update_mmu_cache(vma, address, ptep);
400}
401
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700402#define __HAVE_ARCH_PTE_SAME
403static inline int pte_same(pte_t pte_a, pte_t pte_b)
404{
405 return pte_val(pte_a) == pte_val(pte_b);
406}
407
Andrew Waterman08f051e2017-10-25 14:30:32 -0700408/*
409 * Certain architectures need to do special things when PTEs within
410 * a page table are directly modified. Thus, the following hook is
411 * made available.
412 */
413static inline void set_pte(pte_t *ptep, pte_t pteval)
414{
415 *ptep = pteval;
416}
417
418void flush_icache_pte(pte_t pte);
419
420static inline void set_pte_at(struct mm_struct *mm,
421 unsigned long addr, pte_t *ptep, pte_t pteval)
422{
423 if (pte_present(pteval) && pte_exec(pteval))
424 flush_icache_pte(pteval);
425
426 set_pte(ptep, pteval);
427}
428
429static inline void pte_clear(struct mm_struct *mm,
430 unsigned long addr, pte_t *ptep)
431{
432 set_pte_at(mm, addr, ptep, __pte(0));
433}
434
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700435#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
436static inline int ptep_set_access_flags(struct vm_area_struct *vma,
437 unsigned long address, pte_t *ptep,
438 pte_t entry, int dirty)
439{
440 if (!pte_same(*ptep, entry))
441 set_pte_at(vma->vm_mm, address, ptep, entry);
442 /*
443 * update_mmu_cache will unconditionally execute, handling both
444 * the case that the PTE changed and the spurious fault case.
445 */
446 return true;
447}
448
449#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
450static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
451 unsigned long address, pte_t *ptep)
452{
453 return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
454}
455
456#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
457static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
458 unsigned long address,
459 pte_t *ptep)
460{
461 if (!pte_young(*ptep))
462 return 0;
463 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
464}
465
466#define __HAVE_ARCH_PTEP_SET_WRPROTECT
467static inline void ptep_set_wrprotect(struct mm_struct *mm,
468 unsigned long address, pte_t *ptep)
469{
470 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
471}
472
473#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
474static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
475 unsigned long address, pte_t *ptep)
476{
477 /*
478 * This comment is borrowed from x86, but applies equally to RISC-V:
479 *
480 * Clearing the accessed bit without a TLB flush
481 * doesn't cause data corruption. [ It could cause incorrect
482 * page aging and the (mistaken) reclaim of hot pages, but the
483 * chance of that should be relatively low. ]
484 *
485 * So as a performance optimization don't flush the TLB when
486 * clearing the accessed bit, it will eventually be flushed by
487 * a context switch or a VM operation anyway. [ In the rare
488 * event of it not getting flushed for a long time the delay
489 * shouldn't really matter because there's no real memory
490 * pressure for swapout to react to. ]
491 */
492 return ptep_test_and_clear_young(vma, address, ptep);
493}
494
495/*
Nanyong Sune88b3332021-04-30 16:28:50 +0800496 * THP functions
497 */
498static inline pmd_t pte_pmd(pte_t pte)
499{
500 return __pmd(pte_val(pte));
501}
502
503static inline pmd_t pmd_mkhuge(pmd_t pmd)
504{
505 return pmd;
506}
507
508static inline pmd_t pmd_mkinvalid(pmd_t pmd)
509{
510 return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
511}
512
513#define __pmd_to_phys(pmd) (pmd_val(pmd) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
514
515static inline unsigned long pmd_pfn(pmd_t pmd)
516{
517 return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
518}
519
Nanyong Sune88b3332021-04-30 16:28:50 +0800520static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
521{
522 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
523}
524
525#define pmd_write pmd_write
526static inline int pmd_write(pmd_t pmd)
527{
528 return pte_write(pmd_pte(pmd));
529}
530
531static inline int pmd_dirty(pmd_t pmd)
532{
533 return pte_dirty(pmd_pte(pmd));
534}
535
536static inline int pmd_young(pmd_t pmd)
537{
538 return pte_young(pmd_pte(pmd));
539}
540
541static inline pmd_t pmd_mkold(pmd_t pmd)
542{
543 return pte_pmd(pte_mkold(pmd_pte(pmd)));
544}
545
546static inline pmd_t pmd_mkyoung(pmd_t pmd)
547{
548 return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
549}
550
551static inline pmd_t pmd_mkwrite(pmd_t pmd)
552{
553 return pte_pmd(pte_mkwrite(pmd_pte(pmd)));
554}
555
556static inline pmd_t pmd_wrprotect(pmd_t pmd)
557{
558 return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
559}
560
561static inline pmd_t pmd_mkclean(pmd_t pmd)
562{
563 return pte_pmd(pte_mkclean(pmd_pte(pmd)));
564}
565
566static inline pmd_t pmd_mkdirty(pmd_t pmd)
567{
568 return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
569}
570
571static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
572 pmd_t *pmdp, pmd_t pmd)
573{
574 return set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
575}
576
Jisheng Zhang3332f412021-04-17 00:37:22 +0800577static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
578 pud_t *pudp, pud_t pud)
579{
580 return set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
581}
582
Nanyong Sune88b3332021-04-30 16:28:50 +0800583#ifdef CONFIG_TRANSPARENT_HUGEPAGE
584static inline int pmd_trans_huge(pmd_t pmd)
585{
586 return pmd_leaf(pmd);
587}
588
589#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
590static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
591 unsigned long address, pmd_t *pmdp,
592 pmd_t entry, int dirty)
593{
594 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
595}
596
597#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
598static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
599 unsigned long address, pmd_t *pmdp)
600{
601 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
602}
603
604#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
605static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
606 unsigned long address, pmd_t *pmdp)
607{
608 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
609}
610
611#define __HAVE_ARCH_PMDP_SET_WRPROTECT
612static inline void pmdp_set_wrprotect(struct mm_struct *mm,
613 unsigned long address, pmd_t *pmdp)
614{
615 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
616}
617
618#define pmdp_establish pmdp_establish
619static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
620 unsigned long address, pmd_t *pmdp, pmd_t pmd)
621{
622 return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
623}
Nanyong Sune88b3332021-04-30 16:28:50 +0800624#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
625
626/*
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700627 * Encode and decode a swap entry
628 *
629 * Format of swap PTE:
630 * bit 0: _PAGE_PRESENT (zero)
Stefan O'Reare3613bb2018-12-16 13:03:36 -0500631 * bit 1: _PAGE_PROT_NONE (zero)
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700632 * bits 2 to 6: swap type
633 * bits 7 to XLEN-1: swap offset
634 */
635#define __SWP_TYPE_SHIFT 2
636#define __SWP_TYPE_BITS 5
637#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
638#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
639
640#define MAX_SWAPFILES_CHECK() \
641 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
642
643#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
644#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
645#define __swp_entry(type, offset) ((swp_entry_t) \
646 { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
647
648#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
649#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
650
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700651/*
Zong Li59c4da82020-03-12 10:58:35 +0800652 * In the RV64 Linux scheme, we give the user half of the virtual-address space
653 * and give the kernel the other (upper) half.
654 */
655#ifdef CONFIG_64BIT
656#define KERN_VIRT_START (-(BIT(CONFIG_VA_BITS)) + TASK_SIZE)
657#else
658#define KERN_VIRT_START FIXADDR_START
659#endif
660
661/*
Anup Patela256f2e2019-08-19 05:14:23 +0000662 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700663 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
664 */
665#ifdef CONFIG_64BIT
666#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
667#else
Anup Patela256f2e2019-08-19 05:14:23 +0000668#define TASK_SIZE FIXADDR_START
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700669#endif
670
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100671#else /* CONFIG_MMU */
672
Kefeng Wangfa8174a2020-05-11 10:19:54 +0800673#define PAGE_SHARED __pgprot(0)
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100674#define PAGE_KERNEL __pgprot(0)
675#define swapper_pg_dir NULL
Christoph Hellwigc3f896d2020-06-01 21:51:57 -0700676#define TASK_SIZE 0xffffffffUL
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100677#define VMALLOC_START 0
Christoph Hellwigc3f896d2020-06-01 21:51:57 -0700678#define VMALLOC_END TASK_SIZE
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100679
680#endif /* !CONFIG_MMU */
681
682#define kern_addr_valid(addr) (1) /* FIXME */
683
Alexandre Ghiti2bfc6cd2021-04-11 12:41:44 -0400684extern char _start[];
Vitaly Wool44c92252021-04-13 02:35:14 -0400685extern void *_dtb_early_va;
686extern uintptr_t _dtb_early_pa;
687#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
688#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
689#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
690#else
691#define dtb_early_va _dtb_early_va
692#define dtb_early_pa _dtb_early_pa
693#endif /* CONFIG_XIP_KERNEL */
694
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100695void paging_init(void);
Atish Patracbd34f42020-11-18 16:38:27 -0800696void misc_mem_init(void);
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100697
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100698/*
699 * ZERO_PAGE is a global shared page that is always zero,
700 * used for zero-mapped memory areas, etc.
701 */
702extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
703#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
704
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700705#endif /* !__ASSEMBLY__ */
706
707#endif /* _ASM_RISCV_PGTABLE_H */