blob: 130a2a6c153156bd311e6646bbfd3b92fc6f7228 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_PGTABLE_64_H
10#define _ASM_PGTABLE_64_H
11
Ralf Baechle344afa62012-10-17 01:01:12 +020012#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/linkage.h>
14
15#include <asm/addrspace.h>
16#include <asm/page.h>
17#include <asm/cachectl.h>
Atsushi Nemoto656be922006-10-26 00:08:31 +090018#include <asm/fixmap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Kirill A. Shutemov9849a562017-03-09 17:24:05 +030020#define __ARCH_USE_5LEVEL_HACK
Leonid Yegoshin1e321fa2015-05-14 18:34:43 -070021#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
David Daney325f8a02009-12-04 13:52:36 -080022#include <asm-generic/pgtable-nopmd.h>
23#else
Ralf Baechlec6e8b582005-02-10 12:19:59 +000024#include <asm-generic/pgtable-nopud.h>
David Daney325f8a02009-12-04 13:52:36 -080025#endif
Ralf Baechlec6e8b582005-02-10 12:19:59 +000026
Linus Torvalds1da177e2005-04-16 15:20:36 -070027/*
28 * Each address space has 2 4K pages as its page directory, giving 1024
29 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
Ralf Baechlec6e8b582005-02-10 12:19:59 +000030 * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
31 * tables. Each page table is also a single 4K page, giving 512 (==
32 * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
33 * invalid_pmd_table, each pmd entry is initialized to point to
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * invalid_pte_table, each pte is initialized to 0. When memory is low,
35 * and a pmd table or a page table allocation fails, empty_bad_pmd_table
36 * and empty_bad_page_table is returned back to higher layer code, so
37 * that the failure is recognized later on. Linux does not seem to
38 * handle these failures very well though. The empty_bad_page_table has
39 * invalid pte entries in it, to force page faults.
40 *
41 * Kernel mappings: kernel mappings are held in the swapper_pg_table.
42 * The layout is identical to userspace except it's indexed with the
43 * fault address - VMALLOC_START.
44 */
45
David Daney325f8a02009-12-04 13:52:36 -080046
47/* PGDIR_SHIFT determines what a third-level page table entry can map */
48#ifdef __PAGETABLE_PMD_FOLDED
49#define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
50#else
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* PMD_SHIFT determines the size of the area a second-level page table can map */
Ralf Baechlec6e8b582005-02-10 12:19:59 +000053#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#define PMD_SIZE (1UL << PMD_SHIFT)
55#define PMD_MASK (~(PMD_SIZE-1))
56
David Daney325f8a02009-12-04 13:52:36 -080057
Ralf Baechlec6e8b582005-02-10 12:19:59 +000058#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
David Daney325f8a02009-12-04 13:52:36 -080059#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
61#define PGDIR_MASK (~(PGDIR_SIZE-1))
62
63/*
Ralf Baechlec6e8b582005-02-10 12:19:59 +000064 * For 4kB page size we use a 3 level page tree and an 8kB pud, which
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 * permits us mapping 40 bits of virtual address space.
66 *
67 * We used to implement 41 bits by having an order 1 pmd level but that seemed
68 * rather pointless.
69 *
70 * For 8kB page size we use a 3 level page tree which permits a total of
71 * 8TB of address space. Alternatively a 33-bit / 8GB organization using
72 * two levels would be easy to implement.
73 *
74 * For 16kB page size we use a 2 level page tree which permits a total of
Thiemo Seuferf29244a2005-02-21 11:11:32 +000075 * 36 bits of virtual address space. We could add a third level but it seems
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 * like at the moment there's no need for this.
77 *
78 * For 64kB page size we use a 2 level page table tree for a total of 42 bits
79 * of virtual address space.
80 */
81#ifdef CONFIG_PAGE_SIZE_4KB
82#define PGD_ORDER 1
Ralf Baechlec6e8b582005-02-10 12:19:59 +000083#define PUD_ORDER aieeee_attempt_to_allocate_pud
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#define PMD_ORDER 0
85#define PTE_ORDER 0
86#endif
87#ifdef CONFIG_PAGE_SIZE_8KB
88#define PGD_ORDER 0
Ralf Baechlec6e8b582005-02-10 12:19:59 +000089#define PUD_ORDER aieeee_attempt_to_allocate_pud
Linus Torvalds1da177e2005-04-16 15:20:36 -070090#define PMD_ORDER 0
91#define PTE_ORDER 0
92#endif
93#ifdef CONFIG_PAGE_SIZE_16KB
Leonid Yegoshin1e321fa2015-05-14 18:34:43 -070094#ifdef CONFIG_MIPS_VA_BITS_48
95#define PGD_ORDER 1
96#else
97#define PGD_ORDER 0
98#endif
Ralf Baechlec6e8b582005-02-10 12:19:59 +000099#define PUD_ORDER aieeee_attempt_to_allocate_pud
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#define PMD_ORDER 0
101#define PTE_ORDER 0
102#endif
Ralf Baechlec52399b2009-04-02 14:07:10 +0200103#ifdef CONFIG_PAGE_SIZE_32KB
104#define PGD_ORDER 0
105#define PUD_ORDER aieeee_attempt_to_allocate_pud
106#define PMD_ORDER 0
107#define PTE_ORDER 0
108#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#ifdef CONFIG_PAGE_SIZE_64KB
110#define PGD_ORDER 0
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000111#define PUD_ORDER aieeee_attempt_to_allocate_pud
Leonid Yegoshin1e321fa2015-05-14 18:34:43 -0700112#ifdef CONFIG_MIPS_VA_BITS_48
113#define PMD_ORDER 0
114#else
David Daney325f8a02009-12-04 13:52:36 -0800115#define PMD_ORDER aieeee_attempt_to_allocate_pmd
Leonid Yegoshin1e321fa2015-05-14 18:34:43 -0700116#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#define PTE_ORDER 0
118#endif
119
120#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
David Daney325f8a02009-12-04 13:52:36 -0800121#ifndef __PAGETABLE_PMD_FOLDED
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
David Daney325f8a02009-12-04 13:52:36 -0800123#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
125
Leonid Yegoshin1e321fa2015-05-14 18:34:43 -0700126#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
Peter Watkins9dbd7b92006-08-23 11:15:49 -0400127#define FIRST_USER_ADDRESS 0UL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
David Daneyc8f3cc02010-04-19 11:43:10 -0700129/*
130 * TLB refill handlers also map the vmalloc area into xuseg. Avoid
131 * the first couple of pages so NULL pointer dereferences will still
132 * reliably trap.
133 */
134#define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135#define VMALLOC_END \
David Daneyc8f3cc02010-04-19 11:43:10 -0700136 (MAP_BASE + \
Guenter Roeck91dfc422010-02-02 08:52:20 -0800137 min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
138 (1UL << cpu_vmbits)) - (1UL << 32))
139
Franck Bui-Huu054c51b2007-02-15 14:21:36 +0100140#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
Atsushi Nemoto656be922006-10-26 00:08:31 +0900141 VMALLOC_START != CKSSEG
142/* Load modules into 32bit-compatible segment. */
143#define MODULE_START CKSSEG
144#define MODULE_END (FIXADDR_START-2*PAGE_SIZE)
Atsushi Nemoto656be922006-10-26 00:08:31 +0900145#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
147#define pte_ERROR(e) \
148 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
David Daney325f8a02009-12-04 13:52:36 -0800149#ifndef __PAGETABLE_PMD_FOLDED
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150#define pmd_ERROR(e) \
151 printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
David Daney325f8a02009-12-04 13:52:36 -0800152#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153#define pgd_ERROR(e) \
154 printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
155
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000156extern pte_t invalid_pte_table[PTRS_PER_PTE];
157extern pte_t empty_bad_page_table[PTRS_PER_PTE];
David Daney325f8a02009-12-04 13:52:36 -0800158
159
160#ifndef __PAGETABLE_PMD_FOLDED
161/*
162 * For 3-level pagetables we defines these ourselves, for 2-level the
163 * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
164 */
165typedef struct { unsigned long pmd; } pmd_t;
166#define pmd_val(x) ((x).pmd)
167#define __pmd(x) ((pmd_t) { (x) } )
168
169
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000170extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
David Daney325f8a02009-12-04 13:52:36 -0800171#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
173/*
Thiemo Seufer1b3a6e92005-04-01 14:07:13 +0000174 * Empty pgd/pmd entries point to the invalid_pte_table.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 */
176static inline int pmd_none(pmd_t pmd)
177{
178 return pmd_val(pmd) == (unsigned long) invalid_pte_table;
179}
180
Ralf Baechle344afa62012-10-17 01:01:12 +0200181static inline int pmd_bad(pmd_t pmd)
182{
Ralf Baechle970d0322012-10-18 13:54:15 +0200183#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
Ralf Baechle344afa62012-10-17 01:01:12 +0200184 /* pmd_huge(pmd) but inline */
185 if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
186 return 0;
187#endif
188
189 if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
190 return 1;
191
192 return 0;
193}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
195static inline int pmd_present(pmd_t pmd)
196{
197 return pmd_val(pmd) != (unsigned long) invalid_pte_table;
198}
199
200static inline void pmd_clear(pmd_t *pmdp)
201{
202 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
203}
David Daney325f8a02009-12-04 13:52:36 -0800204#ifndef __PAGETABLE_PMD_FOLDED
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206/*
Thiemo Seuferf29244a2005-02-21 11:11:32 +0000207 * Empty pud entries point to the invalid_pmd_table.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 */
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000209static inline int pud_none(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210{
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000211 return pud_val(pud) == (unsigned long) invalid_pmd_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212}
213
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000214static inline int pud_bad(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215{
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000216 return pud_val(pud) & ~PAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217}
218
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000219static inline int pud_present(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220{
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000221 return pud_val(pud) != (unsigned long) invalid_pmd_table;
222}
223
224static inline void pud_clear(pud_t *pudp)
225{
226 pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227}
David Daney325f8a02009-12-04 13:52:36 -0800228#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
Thiemo Seufer1b3a6e92005-04-01 14:07:13 +0000230#define pte_page(x) pfn_to_page(pte_pfn(x))
231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232#ifdef CONFIG_CPU_VR41XX
233#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
234#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
235#else
David Daney6dd93442010-02-10 15:12:47 -0800236#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
237#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
David Daney86ea9c52013-01-18 18:16:40 +0000238#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239#endif
240
241#define __pgd_offset(address) pgd_index(address)
Thiemo Seuferf29244a2005-02-21 11:11:32 +0000242#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
Thiemo Seufer1b3a6e92005-04-01 14:07:13 +0000243#define __pmd_offset(address) pmd_index(address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245/* to find an entry in a kernel page-table-directory */
Wu Feie0cc87f2009-09-03 22:29:53 +0800246#define pgd_offset_k(address) pgd_offset(&init_mm, address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Thiemo Seuferf29244a2005-02-21 11:11:32 +0000248#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
Thiemo Seufer1b3a6e92005-04-01 14:07:13 +0000249#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251/* to find an entry in a page-table-directory */
Ralf Baechle21a151d2007-10-11 23:46:15 +0100252#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
David Daney325f8a02009-12-04 13:52:36 -0800254#ifndef __PAGETABLE_PMD_FOLDED
Dave McCracken46a82b22006-09-25 23:31:48 -0700255static inline unsigned long pud_page_vaddr(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256{
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000257 return pud_val(pud);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258}
Franck Bui-Huuc9d06962007-03-19 17:36:42 +0100259#define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
Dave McCracken46a82b22006-09-25 23:31:48 -0700260#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
262/* Find an entry in the second-level page table.. */
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000263static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264{
Dave McCracken46a82b22006-09-25 23:31:48 -0700265 return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266}
David Daney325f8a02009-12-04 13:52:36 -0800267#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
269/* Find an entry in the third-level page table.. */
270#define __pte_offset(address) \
271 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
272#define pte_offset(dir, address) \
Franck Bui-Huu5b70a312006-12-05 10:39:56 +0100273 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274#define pte_offset_kernel(dir, address) \
Franck Bui-Huu5b70a312006-12-05 10:39:56 +0100275 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276#define pte_offset_map(dir, address) \
277 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278#define pte_unmap(pte) ((void)(pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
280/*
281 * Initialize a new pgd / pmd table with invalid pointers.
282 */
283extern void pgd_init(unsigned long page);
284extern void pmd_init(unsigned long page, unsigned long pagetable);
285
286/*
David Daney5ae03b12015-02-24 15:35:34 -0800287 * Non-present pages: high 40 bits are offset, next 8 bits type,
288 * low 16 bits zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 */
290static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
David Daney5ae03b12015-02-24 15:35:34 -0800291{ pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
David Daney5ae03b12015-02-24 15:35:34 -0800293#define __swp_type(x) (((x).val >> 16) & 0xff)
294#define __swp_offset(x) ((x).val >> 24)
Ralf Baechle21a151d2007-10-11 23:46:15 +0100295#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
Ralf Baechle70342282013-01-22 12:59:30 +0100296#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299#endif /* _ASM_PGTABLE_64_H */