blob: 39b9f311c4ef471a725fa0be8b0d3178ebd947ca [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2001, 2003 by Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_PGALLOC_H
10#define _ASM_PGALLOC_H
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/highmem.h>
13#include <linux/mm.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040014#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
17 pte_t *pte)
18{
19 set_pmd(pmd, __pmd((unsigned long)pte));
20}
21
22static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080023 pgtable_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -070024{
25 set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
26}
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080027#define pmd_pgtable(pmd) pmd_page(pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29/*
Ralf Baechlec6e8b582005-02-10 12:19:59 +000030 * Initialize a new pmd table with invalid pointers.
31 */
32extern void pmd_init(unsigned long page, unsigned long pagetable);
33
David Daney325f8a02009-12-04 13:52:36 -080034#ifndef __PAGETABLE_PMD_FOLDED
Ralf Baechlec6e8b582005-02-10 12:19:59 +000035
36static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
37{
38 set_pud(pud, __pud((unsigned long)pmd));
39}
40#endif
41
42/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 * Initialize a new pgd / pmd table with invalid pointers.
44 */
45extern void pgd_init(unsigned long page);
James Hogan814f91b2017-02-02 01:21:35 +000046extern pgd_t *pgd_alloc(struct mm_struct *mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080048static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070049{
50 free_pages((unsigned long)pgd, PGD_ORDER);
51}
52
53static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
54 unsigned long address)
55{
Masahiro Yamadadb194622016-09-15 00:31:01 +090056 return (pte_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, PTE_ORDER);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
59static inline struct page *pte_alloc_one(struct mm_struct *mm,
60 unsigned long address)
61{
62 struct page *pte;
63
Michal Hocko65f84652016-06-24 14:49:01 -070064 pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
Kirill A. Shutemov3b5b51c2013-11-14 14:31:36 -080065 if (!pte)
66 return NULL;
67 clear_highpage(pte);
68 if (!pgtable_page_ctor(pte)) {
69 __free_page(pte);
70 return NULL;
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080071 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 return pte;
73}
74
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080075static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076{
77 free_pages((unsigned long)pte, PTE_ORDER);
78}
79
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080080static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -070081{
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080082 pgtable_page_dtor(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 __free_pages(pte, PTE_ORDER);
84}
85
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100086#define __pte_free_tlb(tlb,pte,address) \
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080087do { \
88 pgtable_page_dtor(pte); \
89 tlb_remove_page((tlb), pte); \
90} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
David Daney325f8a02009-12-04 13:52:36 -080092#ifndef __PAGETABLE_PMD_FOLDED
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Linus Torvalds1da177e2005-04-16 15:20:36 -070094static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
95{
96 pmd_t *pmd;
97
Michal Hocko65f84652016-06-24 14:49:01 -070098 pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 if (pmd)
100 pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
101 return pmd;
102}
103
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800104static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105{
106 free_pages((unsigned long)pmd, PMD_ORDER);
107}
108
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000109#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111#endif
112
Alex Belits3377e222017-02-16 17:27:34 -0800113#ifndef __PAGETABLE_PUD_FOLDED
114
115static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
116{
117 pud_t *pud;
118
Michal Hocko473738eb2017-07-12 14:36:42 -0700119 pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER);
Alex Belits3377e222017-02-16 17:27:34 -0800120 if (pud)
121 pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
122 return pud;
123}
124
125static inline void pud_free(struct mm_struct *mm, pud_t *pud)
126{
127 free_pages((unsigned long)pud, PUD_ORDER);
128}
129
130static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
131{
132 set_pgd(pgd, __pgd((unsigned long)pud));
133}
134
135#define __pud_free_tlb(tlb, x, addr) pud_free((tlb)->mm, x)
136
137#endif /* __PAGETABLE_PUD_FOLDED */
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139#define check_pgt_cache() do { } while (0)
140
Atsushi Nemoto6a1e5522007-02-19 01:27:34 +0900141extern void pagetable_init(void);
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143#endif /* _ASM_PGALLOC_H */