Thomas Gleixner | 97fb5e8 | 2019-05-29 07:17:58 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2014, The Linux Foundation. All rights reserved. |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 4 | */ |
| 5 | #include <linux/kernel.h> |
| 6 | #include <linux/mm.h> |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/sched.h> |
Ard Biesheuvel | 95f5c80 | 2016-01-27 10:50:19 +0100 | [diff] [blame] | 9 | #include <linux/vmalloc.h> |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 10 | |
| 11 | #include <asm/pgtable.h> |
Laura Abbott | d4bbc30 | 2017-05-08 15:58:05 -0700 | [diff] [blame] | 12 | #include <asm/set_memory.h> |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 13 | #include <asm/tlbflush.h> |
| 14 | |
| 15 | struct page_change_data { |
| 16 | pgprot_t set_mask; |
| 17 | pgprot_t clear_mask; |
| 18 | }; |
| 19 | |
Ard Biesheuvel | c55191e | 2018-11-07 11:36:20 +0100 | [diff] [blame] | 20 | bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED); |
| 21 | |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 22 | static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, |
| 23 | void *data) |
| 24 | { |
| 25 | struct page_change_data *cdata = data; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 26 | pte_t pte = READ_ONCE(*ptep); |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 27 | |
| 28 | pte = clear_pte_bit(pte, cdata->clear_mask); |
| 29 | pte = set_pte_bit(pte, cdata->set_mask); |
| 30 | |
| 31 | set_pte(ptep, pte); |
| 32 | return 0; |
| 33 | } |
| 34 | |
Laura Abbott | 83863f2 | 2016-02-05 16:24:47 -0800 | [diff] [blame] | 35 | /* |
| 36 | * This function assumes that the range is mapped with PAGE_SIZE pages. |
| 37 | */ |
| 38 | static int __change_memory_common(unsigned long start, unsigned long size, |
| 39 | pgprot_t set_mask, pgprot_t clear_mask) |
| 40 | { |
| 41 | struct page_change_data data; |
| 42 | int ret; |
| 43 | |
| 44 | data.set_mask = set_mask; |
| 45 | data.clear_mask = clear_mask; |
| 46 | |
| 47 | ret = apply_to_page_range(&init_mm, start, size, change_page_range, |
| 48 | &data); |
| 49 | |
| 50 | flush_tlb_kernel_range(start, start + size); |
| 51 | return ret; |
| 52 | } |
| 53 | |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 54 | static int change_memory_common(unsigned long addr, int numpages, |
| 55 | pgprot_t set_mask, pgprot_t clear_mask) |
| 56 | { |
| 57 | unsigned long start = addr; |
| 58 | unsigned long size = PAGE_SIZE*numpages; |
| 59 | unsigned long end = start + size; |
Ard Biesheuvel | 95f5c80 | 2016-01-27 10:50:19 +0100 | [diff] [blame] | 60 | struct vm_struct *area; |
Ard Biesheuvel | c55191e | 2018-11-07 11:36:20 +0100 | [diff] [blame] | 61 | int i; |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 62 | |
Alexander Kuleshov | f23bef3 | 2015-10-26 17:26:57 +0600 | [diff] [blame] | 63 | if (!PAGE_ALIGNED(addr)) { |
Laura Abbott | b4da184 | 2014-09-11 23:10:32 +0100 | [diff] [blame] | 64 | start &= PAGE_MASK; |
| 65 | end = start + size; |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 66 | WARN_ON_ONCE(1); |
| 67 | } |
| 68 | |
Ard Biesheuvel | 95f5c80 | 2016-01-27 10:50:19 +0100 | [diff] [blame] | 69 | /* |
| 70 | * Kernel VA mappings are always live, and splitting live section |
| 71 | * mappings into page mappings may cause TLB conflicts. This means |
| 72 | * we have to ensure that changing the permission bits of the range |
| 73 | * we are operating on does not result in such splitting. |
| 74 | * |
| 75 | * Let's restrict ourselves to mappings created by vmalloc (or vmap). |
| 76 | * Those are guaranteed to consist entirely of page mappings, and |
| 77 | * splitting is never needed. |
| 78 | * |
| 79 | * So check whether the [addr, addr + size) interval is entirely |
| 80 | * covered by precisely one VM area that has the VM_ALLOC flag set. |
| 81 | */ |
| 82 | area = find_vm_area((void *)addr); |
| 83 | if (!area || |
| 84 | end > (unsigned long)area->addr + area->size || |
| 85 | !(area->flags & VM_ALLOC)) |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 86 | return -EINVAL; |
| 87 | |
Mika Penttilä | 57adec8 | 2016-01-26 15:47:25 +0000 | [diff] [blame] | 88 | if (!numpages) |
| 89 | return 0; |
| 90 | |
Ard Biesheuvel | b34d2ef | 2018-11-07 11:36:19 +0100 | [diff] [blame] | 91 | /* |
Ard Biesheuvel | c55191e | 2018-11-07 11:36:20 +0100 | [diff] [blame] | 92 | * If we are manipulating read-only permissions, apply the same |
| 93 | * change to the linear mapping of the pages that back this VM area. |
| 94 | */ |
| 95 | if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || |
| 96 | pgprot_val(clear_mask) == PTE_RDONLY)) { |
| 97 | for (i = 0; i < area->nr_pages; i++) { |
| 98 | __change_memory_common((u64)page_address(area->pages[i]), |
| 99 | PAGE_SIZE, set_mask, clear_mask); |
| 100 | } |
| 101 | } |
| 102 | |
| 103 | /* |
Ard Biesheuvel | b34d2ef | 2018-11-07 11:36:19 +0100 | [diff] [blame] | 104 | * Get rid of potentially aliasing lazily unmapped vm areas that may |
| 105 | * have permissions set that deviate from the ones we are setting here. |
| 106 | */ |
| 107 | vm_unmap_aliases(); |
| 108 | |
Laura Abbott | 83863f2 | 2016-02-05 16:24:47 -0800 | [diff] [blame] | 109 | return __change_memory_common(start, size, set_mask, clear_mask); |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | int set_memory_ro(unsigned long addr, int numpages) |
| 113 | { |
| 114 | return change_memory_common(addr, numpages, |
| 115 | __pgprot(PTE_RDONLY), |
| 116 | __pgprot(PTE_WRITE)); |
| 117 | } |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 118 | |
| 119 | int set_memory_rw(unsigned long addr, int numpages) |
| 120 | { |
| 121 | return change_memory_common(addr, numpages, |
| 122 | __pgprot(PTE_WRITE), |
| 123 | __pgprot(PTE_RDONLY)); |
| 124 | } |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 125 | |
| 126 | int set_memory_nx(unsigned long addr, int numpages) |
| 127 | { |
| 128 | return change_memory_common(addr, numpages, |
| 129 | __pgprot(PTE_PXN), |
| 130 | __pgprot(0)); |
| 131 | } |
| 132 | EXPORT_SYMBOL_GPL(set_memory_nx); |
| 133 | |
| 134 | int set_memory_x(unsigned long addr, int numpages) |
| 135 | { |
| 136 | return change_memory_common(addr, numpages, |
| 137 | __pgprot(0), |
| 138 | __pgprot(PTE_PXN)); |
| 139 | } |
| 140 | EXPORT_SYMBOL_GPL(set_memory_x); |
Laura Abbott | 83863f2 | 2016-02-05 16:24:47 -0800 | [diff] [blame] | 141 | |
AKASHI Takahiro | 9b0aa14 | 2017-04-03 11:24:33 +0900 | [diff] [blame] | 142 | int set_memory_valid(unsigned long addr, int numpages, int enable) |
Laura Abbott | 83863f2 | 2016-02-05 16:24:47 -0800 | [diff] [blame] | 143 | { |
Laura Abbott | 83863f2 | 2016-02-05 16:24:47 -0800 | [diff] [blame] | 144 | if (enable) |
AKASHI Takahiro | 9b0aa14 | 2017-04-03 11:24:33 +0900 | [diff] [blame] | 145 | return __change_memory_common(addr, PAGE_SIZE * numpages, |
Laura Abbott | 83863f2 | 2016-02-05 16:24:47 -0800 | [diff] [blame] | 146 | __pgprot(PTE_VALID), |
| 147 | __pgprot(0)); |
| 148 | else |
AKASHI Takahiro | 9b0aa14 | 2017-04-03 11:24:33 +0900 | [diff] [blame] | 149 | return __change_memory_common(addr, PAGE_SIZE * numpages, |
Laura Abbott | 83863f2 | 2016-02-05 16:24:47 -0800 | [diff] [blame] | 150 | __pgprot(0), |
| 151 | __pgprot(PTE_VALID)); |
| 152 | } |
AKASHI Takahiro | 9b0aa14 | 2017-04-03 11:24:33 +0900 | [diff] [blame] | 153 | |
Ard Biesheuvel | 4739d53 | 2019-05-23 11:22:54 +0100 | [diff] [blame] | 154 | int set_direct_map_invalid_noflush(struct page *page) |
| 155 | { |
| 156 | struct page_change_data data = { |
| 157 | .set_mask = __pgprot(0), |
| 158 | .clear_mask = __pgprot(PTE_VALID), |
| 159 | }; |
| 160 | |
| 161 | if (!rodata_full) |
| 162 | return 0; |
| 163 | |
| 164 | return apply_to_page_range(&init_mm, |
| 165 | (unsigned long)page_address(page), |
| 166 | PAGE_SIZE, change_page_range, &data); |
| 167 | } |
| 168 | |
| 169 | int set_direct_map_default_noflush(struct page *page) |
| 170 | { |
| 171 | struct page_change_data data = { |
| 172 | .set_mask = __pgprot(PTE_VALID | PTE_WRITE), |
| 173 | .clear_mask = __pgprot(PTE_RDONLY), |
| 174 | }; |
| 175 | |
| 176 | if (!rodata_full) |
| 177 | return 0; |
| 178 | |
| 179 | return apply_to_page_range(&init_mm, |
| 180 | (unsigned long)page_address(page), |
| 181 | PAGE_SIZE, change_page_range, &data); |
| 182 | } |
| 183 | |
AKASHI Takahiro | 9b0aa14 | 2017-04-03 11:24:33 +0900 | [diff] [blame] | 184 | void __kernel_map_pages(struct page *page, int numpages, int enable) |
| 185 | { |
Ard Biesheuvel | 4739d53 | 2019-05-23 11:22:54 +0100 | [diff] [blame] | 186 | if (!debug_pagealloc_enabled() && !rodata_full) |
| 187 | return; |
| 188 | |
AKASHI Takahiro | 9b0aa14 | 2017-04-03 11:24:33 +0900 | [diff] [blame] | 189 | set_memory_valid((unsigned long)page_address(page), numpages, enable); |
| 190 | } |
Ard Biesheuvel | 4739d53 | 2019-05-23 11:22:54 +0100 | [diff] [blame] | 191 | |
James Morse | 5ebe3a4 | 2016-08-24 18:27:30 +0100 | [diff] [blame] | 192 | /* |
Ard Biesheuvel | 4739d53 | 2019-05-23 11:22:54 +0100 | [diff] [blame] | 193 | * This function is used to determine if a linear map page has been marked as |
| 194 | * not-valid. Walk the page table and check the PTE_VALID bit. This is based |
| 195 | * on kern_addr_valid(), which almost does what we need. |
James Morse | 5ebe3a4 | 2016-08-24 18:27:30 +0100 | [diff] [blame] | 196 | * |
| 197 | * Because this is only called on the kernel linear map, p?d_sect() implies |
| 198 | * p?d_present(). When debug_pagealloc is enabled, sections mappings are |
| 199 | * disabled. |
| 200 | */ |
| 201 | bool kernel_page_present(struct page *page) |
| 202 | { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 203 | pgd_t *pgdp; |
| 204 | pud_t *pudp, pud; |
| 205 | pmd_t *pmdp, pmd; |
| 206 | pte_t *ptep; |
James Morse | 5ebe3a4 | 2016-08-24 18:27:30 +0100 | [diff] [blame] | 207 | unsigned long addr = (unsigned long)page_address(page); |
| 208 | |
Ard Biesheuvel | 4739d53 | 2019-05-23 11:22:54 +0100 | [diff] [blame] | 209 | if (!debug_pagealloc_enabled() && !rodata_full) |
| 210 | return true; |
| 211 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 212 | pgdp = pgd_offset_k(addr); |
| 213 | if (pgd_none(READ_ONCE(*pgdp))) |
James Morse | 5ebe3a4 | 2016-08-24 18:27:30 +0100 | [diff] [blame] | 214 | return false; |
| 215 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 216 | pudp = pud_offset(pgdp, addr); |
| 217 | pud = READ_ONCE(*pudp); |
| 218 | if (pud_none(pud)) |
James Morse | 5ebe3a4 | 2016-08-24 18:27:30 +0100 | [diff] [blame] | 219 | return false; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 220 | if (pud_sect(pud)) |
James Morse | 5ebe3a4 | 2016-08-24 18:27:30 +0100 | [diff] [blame] | 221 | return true; |
| 222 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 223 | pmdp = pmd_offset(pudp, addr); |
| 224 | pmd = READ_ONCE(*pmdp); |
| 225 | if (pmd_none(pmd)) |
James Morse | 5ebe3a4 | 2016-08-24 18:27:30 +0100 | [diff] [blame] | 226 | return false; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 227 | if (pmd_sect(pmd)) |
James Morse | 5ebe3a4 | 2016-08-24 18:27:30 +0100 | [diff] [blame] | 228 | return true; |
| 229 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 230 | ptep = pte_offset_kernel(pmdp, addr); |
| 231 | return pte_valid(READ_ONCE(*ptep)); |
James Morse | 5ebe3a4 | 2016-08-24 18:27:30 +0100 | [diff] [blame] | 232 | } |