blob: 11823004b87a30324805d5c8fd65f4256f166b52 [file] [log] [blame]
Thomas Gleixner50acfb22019-05-29 07:18:00 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Palmer Dabbelt07037db2017-07-10 18:06:09 -07002/*
3 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
4 * Copyright (C) 2012 Regents of the University of California
Palmer Dabbelt07037db2017-07-10 18:06:09 -07005 */
6
7#ifndef _ASM_RISCV_PGALLOC_H
8#define _ASM_RISCV_PGALLOC_H
9
10#include <linux/mm.h>
11#include <asm/tlb.h>
12
Christoph Hellwig6bd33e12019-10-28 13:10:41 +010013#ifdef CONFIG_MMU
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +010014#define __HAVE_ARCH_PUD_ALLOC_ONE
15#define __HAVE_ARCH_PUD_FREE
Mike Rapoport1355c312020-08-06 23:22:39 -070016#include <asm-generic/pgalloc.h>
Mike Rapoportd1b46fe2019-07-11 20:58:31 -070017
Palmer Dabbelt07037db2017-07-10 18:06:09 -070018static inline void pmd_populate_kernel(struct mm_struct *mm,
19 pmd_t *pmd, pte_t *pte)
20{
21 unsigned long pfn = virt_to_pfn(pte);
22
23 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
24}
25
26static inline void pmd_populate(struct mm_struct *mm,
27 pmd_t *pmd, pgtable_t pte)
28{
29 unsigned long pfn = virt_to_pfn(page_address(pte));
30
31 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
32}
33
34#ifndef __PAGETABLE_PMD_FOLDED
35static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
36{
37 unsigned long pfn = virt_to_pfn(pmd);
38
39 set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
40}
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +010041
42static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
43{
44 if (pgtable_l4_enabled) {
45 unsigned long pfn = virt_to_pfn(pud);
46
47 set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
48 }
49}
50
51static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d,
52 pud_t *pud)
53{
54 if (pgtable_l4_enabled) {
55 unsigned long pfn = virt_to_pfn(pud);
56
57 set_p4d_safe(p4d,
58 __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
59 }
60}
61
62#define pud_alloc_one pud_alloc_one
63static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
64{
65 if (pgtable_l4_enabled)
66 return __pud_alloc_one(mm, addr);
67
68 return NULL;
69}
70
71#define pud_free pud_free
72static inline void pud_free(struct mm_struct *mm, pud_t *pud)
73{
74 if (pgtable_l4_enabled)
75 __pud_free(mm, pud);
76}
77
78#define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud)
Palmer Dabbelt07037db2017-07-10 18:06:09 -070079#endif /* __PAGETABLE_PMD_FOLDED */
80
Palmer Dabbelt07037db2017-07-10 18:06:09 -070081static inline pgd_t *pgd_alloc(struct mm_struct *mm)
82{
83 pgd_t *pgd;
84
85 pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
86 if (likely(pgd != NULL)) {
87 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
88 /* Copy kernel mappings */
89 memcpy(pgd + USER_PTRS_PER_PGD,
90 init_mm.pgd + USER_PTRS_PER_PGD,
91 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
92 }
93 return pgd;
94}
95
Palmer Dabbelt07037db2017-07-10 18:06:09 -070096#ifndef __PAGETABLE_PMD_FOLDED
97
Palmer Dabbelt07037db2017-07-10 18:06:09 -070098#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
99
100#endif /* __PAGETABLE_PMD_FOLDED */
101
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700102#define __pte_free_tlb(tlb, pte, buf) \
103do { \
Mark Rutlandb4ed71f2019-09-25 16:49:46 -0700104 pgtable_pte_page_dtor(pte); \
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700105 tlb_remove_page((tlb), pte); \
106} while (0)
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100107#endif /* CONFIG_MMU */
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700108
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700109#endif /* _ASM_RISCV_PGALLOC_H */