Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 2 | /* |
Christophe Leroy | f381d57 | 2019-08-20 14:07:17 +0000 | [diff] [blame] | 3 | * This file contains pgtable related functions for 64-bit machines. |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 4 | * |
| 5 | * Derived from arch/ppc64/mm/init.c |
| 6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 7 | * |
| 8 | * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) |
| 9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
| 10 | * Copyright (C) 1996 Paul Mackerras |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 11 | * |
| 12 | * Derived from "arch/i386/mm/init.c" |
| 13 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 14 | * |
| 15 | * Dave Engebretsen <engebret@us.ibm.com> |
| 16 | * Rework for PPC64 port. |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 17 | */ |
| 18 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 19 | #include <linux/signal.h> |
| 20 | #include <linux/sched.h> |
| 21 | #include <linux/kernel.h> |
| 22 | #include <linux/errno.h> |
| 23 | #include <linux/string.h> |
Paul Gortmaker | 66b15db | 2011-05-27 10:46:24 -0400 | [diff] [blame] | 24 | #include <linux/export.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 25 | #include <linux/types.h> |
| 26 | #include <linux/mman.h> |
| 27 | #include <linux/mm.h> |
| 28 | #include <linux/swap.h> |
| 29 | #include <linux/stddef.h> |
| 30 | #include <linux/vmalloc.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 31 | #include <linux/slab.h> |
Aneesh Kumar K.V | 0674352 | 2014-11-05 21:57:39 +0530 | [diff] [blame] | 32 | #include <linux/hugetlb.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 33 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 34 | #include <asm/page.h> |
| 35 | #include <asm/prom.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 36 | #include <asm/mmu_context.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 37 | #include <asm/mmu.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 38 | #include <asm/smp.h> |
| 39 | #include <asm/machdep.h> |
| 40 | #include <asm/tlb.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 41 | #include <asm/processor.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 42 | #include <asm/cputable.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 43 | #include <asm/sections.h> |
Stephen Rothwell | 5e203d6 | 2006-09-25 13:36:31 +1000 | [diff] [blame] | 44 | #include <asm/firmware.h> |
Anton Blanchard | 68cf0d6 | 2014-09-17 22:15:35 +1000 | [diff] [blame] | 45 | #include <asm/dma.h> |
David Gibson | 800fc3e | 2005-11-16 15:43:48 +1100 | [diff] [blame] | 46 | |
Christophe Leroy | 9d9f2cc | 2019-03-29 09:59:59 +0000 | [diff] [blame] | 47 | #include <mm/mmu_decl.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 48 | |
Aneesh Kumar K.V | 78f1dbd | 2012-09-10 02:52:57 +0000 | [diff] [blame] | 49 | |
Aneesh Kumar K.V | 50de596 | 2016-04-29 23:25:43 +1000 | [diff] [blame] | 50 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 51 | /* |
| 52 | * partition table and process table for ISA 3.0 |
| 53 | */ |
| 54 | struct prtb_entry *process_tb; |
| 55 | struct patb_entry *partition_tb; |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 56 | /* |
| 57 | * page table size |
| 58 | */ |
| 59 | unsigned long __pte_index_size; |
| 60 | EXPORT_SYMBOL(__pte_index_size); |
| 61 | unsigned long __pmd_index_size; |
| 62 | EXPORT_SYMBOL(__pmd_index_size); |
| 63 | unsigned long __pud_index_size; |
| 64 | EXPORT_SYMBOL(__pud_index_size); |
| 65 | unsigned long __pgd_index_size; |
| 66 | EXPORT_SYMBOL(__pgd_index_size); |
Aneesh Kumar K.V | fae2211 | 2018-02-11 20:30:06 +0530 | [diff] [blame] | 67 | unsigned long __pud_cache_index; |
| 68 | EXPORT_SYMBOL(__pud_cache_index); |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 69 | unsigned long __pte_table_size; |
| 70 | EXPORT_SYMBOL(__pte_table_size); |
| 71 | unsigned long __pmd_table_size; |
| 72 | EXPORT_SYMBOL(__pmd_table_size); |
| 73 | unsigned long __pud_table_size; |
| 74 | EXPORT_SYMBOL(__pud_table_size); |
| 75 | unsigned long __pgd_table_size; |
| 76 | EXPORT_SYMBOL(__pgd_table_size); |
Aneesh Kumar K.V | a2f41eb | 2016-04-29 23:26:19 +1000 | [diff] [blame] | 77 | unsigned long __pmd_val_bits; |
| 78 | EXPORT_SYMBOL(__pmd_val_bits); |
| 79 | unsigned long __pud_val_bits; |
| 80 | EXPORT_SYMBOL(__pud_val_bits); |
| 81 | unsigned long __pgd_val_bits; |
| 82 | EXPORT_SYMBOL(__pgd_val_bits); |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 83 | unsigned long __kernel_virt_start; |
| 84 | EXPORT_SYMBOL(__kernel_virt_start); |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 85 | unsigned long __vmalloc_start; |
| 86 | EXPORT_SYMBOL(__vmalloc_start); |
| 87 | unsigned long __vmalloc_end; |
| 88 | EXPORT_SYMBOL(__vmalloc_end); |
Michael Ellerman | 63ee9b2 | 2017-08-01 20:29:22 +1000 | [diff] [blame] | 89 | unsigned long __kernel_io_start; |
| 90 | EXPORT_SYMBOL(__kernel_io_start); |
Aneesh Kumar K.V | a35a3c6 | 2019-04-17 18:29:13 +0530 | [diff] [blame] | 91 | unsigned long __kernel_io_end; |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 92 | struct page *vmemmap; |
| 93 | EXPORT_SYMBOL(vmemmap); |
Aneesh Kumar K.V | 5ed7ecd | 2016-04-29 23:26:23 +1000 | [diff] [blame] | 94 | unsigned long __pte_frag_nr; |
| 95 | EXPORT_SYMBOL(__pte_frag_nr); |
| 96 | unsigned long __pte_frag_size_shift; |
| 97 | EXPORT_SYMBOL(__pte_frag_size_shift); |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 98 | #endif |
Benjamin Herrenschmidt | a245067 | 2009-07-23 23:15:16 +0000 | [diff] [blame] | 99 | |
Aneesh Kumar K.V | 0674352 | 2014-11-05 21:57:39 +0530 | [diff] [blame] | 100 | #ifndef __PAGETABLE_PUD_FOLDED |
| 101 | /* 4 level page table */ |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 102 | struct page *p4d_page(p4d_t p4d) |
Aneesh Kumar K.V | 0674352 | 2014-11-05 21:57:39 +0530 | [diff] [blame] | 103 | { |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 104 | if (p4d_is_leaf(p4d)) { |
| 105 | VM_WARN_ON(!p4d_huge(p4d)); |
| 106 | return pte_page(p4d_pte(p4d)); |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 107 | } |
Aneesh Kumar K.V | dc4875f | 2021-07-07 18:09:56 -0700 | [diff] [blame] | 108 | return virt_to_page(p4d_pgtable(p4d)); |
Aneesh Kumar K.V | 0674352 | 2014-11-05 21:57:39 +0530 | [diff] [blame] | 109 | } |
| 110 | #endif |
| 111 | |
| 112 | struct page *pud_page(pud_t pud) |
| 113 | { |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 114 | if (pud_is_leaf(pud)) { |
| 115 | VM_WARN_ON(!pud_huge(pud)); |
Aneesh Kumar K.V | 0674352 | 2014-11-05 21:57:39 +0530 | [diff] [blame] | 116 | return pte_page(pud_pte(pud)); |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 117 | } |
Aneesh Kumar K.V | 9cf6fa2 | 2021-07-07 18:09:53 -0700 | [diff] [blame] | 118 | return virt_to_page(pud_pgtable(pud)); |
Aneesh Kumar K.V | 0674352 | 2014-11-05 21:57:39 +0530 | [diff] [blame] | 119 | } |
| 120 | |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 121 | /* |
| 122 | * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags |
| 123 | * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. |
| 124 | */ |
| 125 | struct page *pmd_page(pmd_t pmd) |
| 126 | { |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 127 | if (pmd_is_leaf(pmd)) { |
Aneesh Kumar K.V | 1ecf2cd | 2019-05-14 11:33:01 +0530 | [diff] [blame] | 128 | VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 129 | return pte_page(pmd_pte(pmd)); |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 130 | } |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 131 | return virt_to_page(pmd_page_vaddr(pmd)); |
| 132 | } |
| 133 | |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 134 | #ifdef CONFIG_STRICT_KERNEL_RWX |
| 135 | void mark_rodata_ro(void) |
| 136 | { |
| 137 | if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) { |
| 138 | pr_warn("Warning: Unable to mark rodata read only on this CPU.\n"); |
| 139 | return; |
| 140 | } |
| 141 | |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 142 | if (radix_enabled()) |
| 143 | radix__mark_rodata_ro(); |
| 144 | else |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 145 | hash__mark_rodata_ro(); |
Russell Currey | 453d87f | 2019-05-02 17:39:47 +1000 | [diff] [blame] | 146 | |
| 147 | // mark_initmem_nx() should have already run by now |
| 148 | ptdump_check_wx(); |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 149 | } |
Michael Ellerman | 029d925 | 2017-07-14 16:51:23 +1000 | [diff] [blame] | 150 | |
| 151 | void mark_initmem_nx(void) |
| 152 | { |
| 153 | if (radix_enabled()) |
| 154 | radix__mark_initmem_nx(); |
| 155 | else |
| 156 | hash__mark_initmem_nx(); |
| 157 | } |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 158 | #endif |