blob: d59ea92285ec2b36ba90abacce6d107415f6ef80 [file] [log] [blame]
Thomas Gleixner50acfb22019-05-29 07:18:00 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Palmer Dabbelt07037db2017-07-10 18:06:09 -07002/*
3 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
4 * Copyright (C) 2012 Regents of the University of California
Palmer Dabbelt07037db2017-07-10 18:06:09 -07005 */
6
7#ifndef _ASM_RISCV_PGALLOC_H
8#define _ASM_RISCV_PGALLOC_H
9
10#include <linux/mm.h>
11#include <asm/tlb.h>
12
Mike Rapoportd1b46fe2019-07-11 20:58:31 -070013#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
14
Palmer Dabbelt07037db2017-07-10 18:06:09 -070015static inline void pmd_populate_kernel(struct mm_struct *mm,
16 pmd_t *pmd, pte_t *pte)
17{
18 unsigned long pfn = virt_to_pfn(pte);
19
20 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
21}
22
23static inline void pmd_populate(struct mm_struct *mm,
24 pmd_t *pmd, pgtable_t pte)
25{
26 unsigned long pfn = virt_to_pfn(page_address(pte));
27
28 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
29}
30
31#ifndef __PAGETABLE_PMD_FOLDED
32static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
33{
34 unsigned long pfn = virt_to_pfn(pmd);
35
36 set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
37}
38#endif /* __PAGETABLE_PMD_FOLDED */
39
40#define pmd_pgtable(pmd) pmd_page(pmd)
41
42static inline pgd_t *pgd_alloc(struct mm_struct *mm)
43{
44 pgd_t *pgd;
45
46 pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
47 if (likely(pgd != NULL)) {
48 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
49 /* Copy kernel mappings */
50 memcpy(pgd + USER_PTRS_PER_PGD,
51 init_mm.pgd + USER_PTRS_PER_PGD,
52 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
53 }
54 return pgd;
55}
56
57static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
58{
59 free_page((unsigned long)pgd);
60}
61
62#ifndef __PAGETABLE_PMD_FOLDED
63
64static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
65{
66 return (pmd_t *)__get_free_page(
67 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
68}
69
70static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
71{
72 free_page((unsigned long)pmd);
73}
74
75#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
76
77#endif /* __PAGETABLE_PMD_FOLDED */
78
Palmer Dabbelt07037db2017-07-10 18:06:09 -070079#define __pte_free_tlb(tlb, pte, buf) \
80do { \
Mark Rutlandb4ed71f2019-09-25 16:49:46 -070081 pgtable_pte_page_dtor(pte); \
Palmer Dabbelt07037db2017-07-10 18:06:09 -070082 tlb_remove_page((tlb), pte); \
83} while (0)
84
Palmer Dabbelt07037db2017-07-10 18:06:09 -070085#endif /* _ASM_RISCV_PGALLOC_H */