blob: fcdcf6cd7677e0c0d02067d567f6df5255b17fe2 [file] [log] [blame]
Thomas Gleixner97fb5e82019-05-29 07:17:58 -07001// SPDX-License-Identifier: GPL-2.0-only
Laura Abbott11d91a72014-08-19 20:41:43 +01002/*
3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
Laura Abbott11d91a72014-08-19 20:41:43 +01004 */
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/module.h>
8#include <linux/sched.h>
Ard Biesheuvel95f5c802016-01-27 10:50:19 +01009#include <linux/vmalloc.h>
Laura Abbott11d91a72014-08-19 20:41:43 +010010
11#include <asm/pgtable.h>
Laura Abbottd4bbc302017-05-08 15:58:05 -070012#include <asm/set_memory.h>
Laura Abbott11d91a72014-08-19 20:41:43 +010013#include <asm/tlbflush.h>
14
15struct page_change_data {
16 pgprot_t set_mask;
17 pgprot_t clear_mask;
18};
19
Ard Biesheuvelc55191e2018-11-07 11:36:20 +010020bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
21
Laura Abbott11d91a72014-08-19 20:41:43 +010022static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
23 void *data)
24{
25 struct page_change_data *cdata = data;
Will Deacon20a004e2018-02-15 11:14:56 +000026 pte_t pte = READ_ONCE(*ptep);
Laura Abbott11d91a72014-08-19 20:41:43 +010027
28 pte = clear_pte_bit(pte, cdata->clear_mask);
29 pte = set_pte_bit(pte, cdata->set_mask);
30
31 set_pte(ptep, pte);
32 return 0;
33}
34
Laura Abbott83863f22016-02-05 16:24:47 -080035/*
36 * This function assumes that the range is mapped with PAGE_SIZE pages.
37 */
38static int __change_memory_common(unsigned long start, unsigned long size,
39 pgprot_t set_mask, pgprot_t clear_mask)
40{
41 struct page_change_data data;
42 int ret;
43
44 data.set_mask = set_mask;
45 data.clear_mask = clear_mask;
46
47 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
48 &data);
49
50 flush_tlb_kernel_range(start, start + size);
51 return ret;
52}
53
Laura Abbott11d91a72014-08-19 20:41:43 +010054static int change_memory_common(unsigned long addr, int numpages,
55 pgprot_t set_mask, pgprot_t clear_mask)
56{
57 unsigned long start = addr;
58 unsigned long size = PAGE_SIZE*numpages;
59 unsigned long end = start + size;
Ard Biesheuvel95f5c802016-01-27 10:50:19 +010060 struct vm_struct *area;
Ard Biesheuvelc55191e2018-11-07 11:36:20 +010061 int i;
Laura Abbott11d91a72014-08-19 20:41:43 +010062
Alexander Kuleshovf23bef32015-10-26 17:26:57 +060063 if (!PAGE_ALIGNED(addr)) {
Laura Abbottb4da1842014-09-11 23:10:32 +010064 start &= PAGE_MASK;
65 end = start + size;
Laura Abbott11d91a72014-08-19 20:41:43 +010066 WARN_ON_ONCE(1);
67 }
68
Ard Biesheuvel95f5c802016-01-27 10:50:19 +010069 /*
70 * Kernel VA mappings are always live, and splitting live section
71 * mappings into page mappings may cause TLB conflicts. This means
72 * we have to ensure that changing the permission bits of the range
73 * we are operating on does not result in such splitting.
74 *
75 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
76 * Those are guaranteed to consist entirely of page mappings, and
77 * splitting is never needed.
78 *
79 * So check whether the [addr, addr + size) interval is entirely
80 * covered by precisely one VM area that has the VM_ALLOC flag set.
81 */
82 area = find_vm_area((void *)addr);
83 if (!area ||
84 end > (unsigned long)area->addr + area->size ||
85 !(area->flags & VM_ALLOC))
Laura Abbott11d91a72014-08-19 20:41:43 +010086 return -EINVAL;
87
Mika Penttilä57adec82016-01-26 15:47:25 +000088 if (!numpages)
89 return 0;
90
Ard Biesheuvelb34d2ef2018-11-07 11:36:19 +010091 /*
Ard Biesheuvelc55191e2018-11-07 11:36:20 +010092 * If we are manipulating read-only permissions, apply the same
93 * change to the linear mapping of the pages that back this VM area.
94 */
95 if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
96 pgprot_val(clear_mask) == PTE_RDONLY)) {
97 for (i = 0; i < area->nr_pages; i++) {
98 __change_memory_common((u64)page_address(area->pages[i]),
99 PAGE_SIZE, set_mask, clear_mask);
100 }
101 }
102
103 /*
Ard Biesheuvelb34d2ef2018-11-07 11:36:19 +0100104 * Get rid of potentially aliasing lazily unmapped vm areas that may
105 * have permissions set that deviate from the ones we are setting here.
106 */
107 vm_unmap_aliases();
108
Laura Abbott83863f22016-02-05 16:24:47 -0800109 return __change_memory_common(start, size, set_mask, clear_mask);
Laura Abbott11d91a72014-08-19 20:41:43 +0100110}
111
112int set_memory_ro(unsigned long addr, int numpages)
113{
114 return change_memory_common(addr, numpages,
115 __pgprot(PTE_RDONLY),
116 __pgprot(PTE_WRITE));
117}
Laura Abbott11d91a72014-08-19 20:41:43 +0100118
119int set_memory_rw(unsigned long addr, int numpages)
120{
121 return change_memory_common(addr, numpages,
122 __pgprot(PTE_WRITE),
123 __pgprot(PTE_RDONLY));
124}
Laura Abbott11d91a72014-08-19 20:41:43 +0100125
126int set_memory_nx(unsigned long addr, int numpages)
127{
128 return change_memory_common(addr, numpages,
129 __pgprot(PTE_PXN),
130 __pgprot(0));
131}
132EXPORT_SYMBOL_GPL(set_memory_nx);
133
134int set_memory_x(unsigned long addr, int numpages)
135{
136 return change_memory_common(addr, numpages,
137 __pgprot(0),
138 __pgprot(PTE_PXN));
139}
140EXPORT_SYMBOL_GPL(set_memory_x);
Laura Abbott83863f22016-02-05 16:24:47 -0800141
AKASHI Takahiro9b0aa142017-04-03 11:24:33 +0900142int set_memory_valid(unsigned long addr, int numpages, int enable)
Laura Abbott83863f22016-02-05 16:24:47 -0800143{
Laura Abbott83863f22016-02-05 16:24:47 -0800144 if (enable)
AKASHI Takahiro9b0aa142017-04-03 11:24:33 +0900145 return __change_memory_common(addr, PAGE_SIZE * numpages,
Laura Abbott83863f22016-02-05 16:24:47 -0800146 __pgprot(PTE_VALID),
147 __pgprot(0));
148 else
AKASHI Takahiro9b0aa142017-04-03 11:24:33 +0900149 return __change_memory_common(addr, PAGE_SIZE * numpages,
Laura Abbott83863f22016-02-05 16:24:47 -0800150 __pgprot(0),
151 __pgprot(PTE_VALID));
152}
AKASHI Takahiro9b0aa142017-04-03 11:24:33 +0900153
Ard Biesheuvel4739d532019-05-23 11:22:54 +0100154int set_direct_map_invalid_noflush(struct page *page)
155{
156 struct page_change_data data = {
157 .set_mask = __pgprot(0),
158 .clear_mask = __pgprot(PTE_VALID),
159 };
160
161 if (!rodata_full)
162 return 0;
163
164 return apply_to_page_range(&init_mm,
165 (unsigned long)page_address(page),
166 PAGE_SIZE, change_page_range, &data);
167}
168
169int set_direct_map_default_noflush(struct page *page)
170{
171 struct page_change_data data = {
172 .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
173 .clear_mask = __pgprot(PTE_RDONLY),
174 };
175
176 if (!rodata_full)
177 return 0;
178
179 return apply_to_page_range(&init_mm,
180 (unsigned long)page_address(page),
181 PAGE_SIZE, change_page_range, &data);
182}
183
AKASHI Takahiro9b0aa142017-04-03 11:24:33 +0900184void __kernel_map_pages(struct page *page, int numpages, int enable)
185{
Ard Biesheuvel4739d532019-05-23 11:22:54 +0100186 if (!debug_pagealloc_enabled() && !rodata_full)
187 return;
188
AKASHI Takahiro9b0aa142017-04-03 11:24:33 +0900189 set_memory_valid((unsigned long)page_address(page), numpages, enable);
190}
Ard Biesheuvel4739d532019-05-23 11:22:54 +0100191
James Morse5ebe3a42016-08-24 18:27:30 +0100192/*
Ard Biesheuvel4739d532019-05-23 11:22:54 +0100193 * This function is used to determine if a linear map page has been marked as
194 * not-valid. Walk the page table and check the PTE_VALID bit. This is based
195 * on kern_addr_valid(), which almost does what we need.
James Morse5ebe3a42016-08-24 18:27:30 +0100196 *
197 * Because this is only called on the kernel linear map, p?d_sect() implies
198 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
199 * disabled.
200 */
201bool kernel_page_present(struct page *page)
202{
Will Deacon20a004e2018-02-15 11:14:56 +0000203 pgd_t *pgdp;
204 pud_t *pudp, pud;
205 pmd_t *pmdp, pmd;
206 pte_t *ptep;
James Morse5ebe3a42016-08-24 18:27:30 +0100207 unsigned long addr = (unsigned long)page_address(page);
208
Ard Biesheuvel4739d532019-05-23 11:22:54 +0100209 if (!debug_pagealloc_enabled() && !rodata_full)
210 return true;
211
Will Deacon20a004e2018-02-15 11:14:56 +0000212 pgdp = pgd_offset_k(addr);
213 if (pgd_none(READ_ONCE(*pgdp)))
James Morse5ebe3a42016-08-24 18:27:30 +0100214 return false;
215
Will Deacon20a004e2018-02-15 11:14:56 +0000216 pudp = pud_offset(pgdp, addr);
217 pud = READ_ONCE(*pudp);
218 if (pud_none(pud))
James Morse5ebe3a42016-08-24 18:27:30 +0100219 return false;
Will Deacon20a004e2018-02-15 11:14:56 +0000220 if (pud_sect(pud))
James Morse5ebe3a42016-08-24 18:27:30 +0100221 return true;
222
Will Deacon20a004e2018-02-15 11:14:56 +0000223 pmdp = pmd_offset(pudp, addr);
224 pmd = READ_ONCE(*pmdp);
225 if (pmd_none(pmd))
James Morse5ebe3a42016-08-24 18:27:30 +0100226 return false;
Will Deacon20a004e2018-02-15 11:14:56 +0000227 if (pmd_sect(pmd))
James Morse5ebe3a42016-08-24 18:27:30 +0100228 return true;
229
Will Deacon20a004e2018-02-15 11:14:56 +0000230 ptep = pte_offset_kernel(pmdp, addr);
231 return pte_valid(READ_ONCE(*ptep));
James Morse5ebe3a42016-08-24 18:27:30 +0100232}