blob: e14e95ea7338a3adaf013acb7cd848d909f4dd95 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Ingo Molnar9f4c8152008-01-30 13:33:41 +01002/*
3 * Copyright 2002 Andi Kleen, SuSE Labs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Thanks to Ben LaHaise for precious feedback.
Ingo Molnar9f4c8152008-01-30 13:33:41 +01005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/highmem.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -07007#include <linux/memblock.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +01008#include <linux/sched.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +01009#include <linux/mm.h>
Thomas Gleixner76ebd052008-02-09 23:24:09 +010010#include <linux/interrupt.h>
Thomas Gleixneree7ae7a2008-04-17 17:40:45 +020011#include <linux/seq_file.h>
12#include <linux/debugfs.h>
Tejun Heoe59a1bb2009-06-22 11:56:24 +090013#include <linux/pfn.h>
Tejun Heo8c4bfc62009-07-04 08:10:59 +090014#include <linux/percpu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/gfp.h>
Matthieu Castet5bd5a452010-11-16 22:31:26 +010016#include <linux/pci.h>
Stephen Rothwelld6472302015-06-02 19:01:38 +100017#include <linux/vmalloc.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010018
Ingo Molnar66441bd2017-01-27 10:27:10 +010019#include <asm/e820/api.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/processor.h>
21#include <asm/tlbflush.h>
Dave Jonesf8af0952006-01-06 00:12:10 -080022#include <asm/sections.h>
Jeremy Fitzhardinge93dbda72009-02-26 17:35:44 -080023#include <asm/setup.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080024#include <linux/uaccess.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010025#include <asm/pgalloc.h>
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +010026#include <asm/proto.h>
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070027#include <asm/pat.h>
Laura Abbottd1163652017-05-08 15:58:11 -070028#include <asm/set_memory.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Peter Zijlstra935f5832018-12-03 18:03:49 +010030#include "mm_internal.h"
31
Ingo Molnar9df84992008-02-04 16:48:09 +010032/*
33 * The current flushing context - we pass it instead of 5 arguments:
34 */
Thomas Gleixner72e458d2008-02-04 16:48:07 +010035struct cpa_data {
Shaohua Lid75586a2008-08-21 10:46:06 +080036 unsigned long *vaddr;
Borislav Petkov0fd64c22013-10-31 17:25:00 +010037 pgd_t *pgd;
Thomas Gleixner72e458d2008-02-04 16:48:07 +010038 pgprot_t mask_set;
39 pgprot_t mask_clr;
Matt Fleming74256372016-01-29 11:36:10 +000040 unsigned long numpages;
Peter Zijlstra98bfc9b2018-12-03 18:03:47 +010041 unsigned long curpage;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +010042 unsigned long pfn;
Peter Zijlstra98bfc9b2018-12-03 18:03:47 +010043 unsigned int flags;
44 unsigned int force_split : 1,
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +020045 force_static_prot : 1;
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -070046 struct page **pages;
Thomas Gleixner72e458d2008-02-04 16:48:07 +010047};
48
Thomas Gleixner40464602018-09-17 16:29:11 +020049enum cpa_warn {
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +020050 CPA_CONFLICT,
Thomas Gleixner40464602018-09-17 16:29:11 +020051 CPA_PROTECT,
52 CPA_DETECT,
53};
54
55static const int cpa_warn_level = CPA_PROTECT;
56
Suresh Siddhaad5ca552008-09-23 14:00:42 -070057/*
58 * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
59 * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
60 * entries change the page attribute in parallel to some other cpu
61 * splitting a large page entry along with changing the attribute.
62 */
63static DEFINE_SPINLOCK(cpa_lock);
64
Shaohua Lid75586a2008-08-21 10:46:06 +080065#define CPA_FLUSHTLB 1
66#define CPA_ARRAY 2
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -070067#define CPA_PAGES_ARRAY 4
Dave Hansenc40a56a2018-08-02 15:58:31 -070068#define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */
Shaohua Lid75586a2008-08-21 10:46:06 +080069
Thomas Gleixner65280e62008-05-05 16:35:21 +020070#ifdef CONFIG_PROC_FS
Andi Kleence0c0e52008-05-02 11:46:49 +020071static unsigned long direct_pages_count[PG_LEVEL_NUM];
72
Thomas Gleixner65280e62008-05-05 16:35:21 +020073void update_page_count(int level, unsigned long pages)
Andi Kleence0c0e52008-05-02 11:46:49 +020074{
Andi Kleence0c0e52008-05-02 11:46:49 +020075 /* Protect against CPA */
Andrea Arcangelia79e53d2011-02-16 15:45:22 -080076 spin_lock(&pgd_lock);
Andi Kleence0c0e52008-05-02 11:46:49 +020077 direct_pages_count[level] += pages;
Andrea Arcangelia79e53d2011-02-16 15:45:22 -080078 spin_unlock(&pgd_lock);
Andi Kleence0c0e52008-05-02 11:46:49 +020079}
80
Thomas Gleixner65280e62008-05-05 16:35:21 +020081static void split_page_count(int level)
82{
Dave Jonesc9e0d392016-01-11 12:04:28 -050083 if (direct_pages_count[level] == 0)
84 return;
85
Thomas Gleixner65280e62008-05-05 16:35:21 +020086 direct_pages_count[level]--;
87 direct_pages_count[level - 1] += PTRS_PER_PTE;
88}
89
Alexey Dobriyane1759c22008-10-15 23:50:22 +040090void arch_report_meminfo(struct seq_file *m)
Thomas Gleixner65280e62008-05-05 16:35:21 +020091{
Hugh Dickinsb9c3bfc2008-11-06 12:05:40 +000092 seq_printf(m, "DirectMap4k: %8lu kB\n",
Hugh Dickinsa06de632008-08-15 13:58:32 +010093 direct_pages_count[PG_LEVEL_4K] << 2);
94#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
Hugh Dickinsb9c3bfc2008-11-06 12:05:40 +000095 seq_printf(m, "DirectMap2M: %8lu kB\n",
Hugh Dickinsa06de632008-08-15 13:58:32 +010096 direct_pages_count[PG_LEVEL_2M] << 11);
97#else
Hugh Dickinsb9c3bfc2008-11-06 12:05:40 +000098 seq_printf(m, "DirectMap4M: %8lu kB\n",
Hugh Dickinsa06de632008-08-15 13:58:32 +010099 direct_pages_count[PG_LEVEL_2M] << 12);
100#endif
Hugh Dickinsa06de632008-08-15 13:58:32 +0100101 if (direct_gbpages)
Hugh Dickinsb9c3bfc2008-11-06 12:05:40 +0000102 seq_printf(m, "DirectMap1G: %8lu kB\n",
Hugh Dickinsa06de632008-08-15 13:58:32 +0100103 direct_pages_count[PG_LEVEL_1G] << 20);
Thomas Gleixner65280e62008-05-05 16:35:21 +0200104}
105#else
106static inline void split_page_count(int level) { }
107#endif
108
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200109#ifdef CONFIG_X86_CPA_STATISTICS
110
111static unsigned long cpa_1g_checked;
112static unsigned long cpa_1g_sameprot;
113static unsigned long cpa_1g_preserved;
114static unsigned long cpa_2m_checked;
115static unsigned long cpa_2m_sameprot;
116static unsigned long cpa_2m_preserved;
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200117static unsigned long cpa_4k_install;
118
119static inline void cpa_inc_1g_checked(void)
120{
121 cpa_1g_checked++;
122}
123
124static inline void cpa_inc_2m_checked(void)
125{
126 cpa_2m_checked++;
127}
128
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200129static inline void cpa_inc_4k_install(void)
130{
131 cpa_4k_install++;
132}
133
134static inline void cpa_inc_lp_sameprot(int level)
135{
136 if (level == PG_LEVEL_1G)
137 cpa_1g_sameprot++;
138 else
139 cpa_2m_sameprot++;
140}
141
142static inline void cpa_inc_lp_preserved(int level)
143{
144 if (level == PG_LEVEL_1G)
145 cpa_1g_preserved++;
146 else
147 cpa_2m_preserved++;
148}
149
150static int cpastats_show(struct seq_file *m, void *p)
151{
152 seq_printf(m, "1G pages checked: %16lu\n", cpa_1g_checked);
153 seq_printf(m, "1G pages sameprot: %16lu\n", cpa_1g_sameprot);
154 seq_printf(m, "1G pages preserved: %16lu\n", cpa_1g_preserved);
155 seq_printf(m, "2M pages checked: %16lu\n", cpa_2m_checked);
156 seq_printf(m, "2M pages sameprot: %16lu\n", cpa_2m_sameprot);
157 seq_printf(m, "2M pages preserved: %16lu\n", cpa_2m_preserved);
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200158 seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install);
159 return 0;
160}
161
162static int cpastats_open(struct inode *inode, struct file *file)
163{
164 return single_open(file, cpastats_show, NULL);
165}
166
167static const struct file_operations cpastats_fops = {
168 .open = cpastats_open,
169 .read = seq_read,
170 .llseek = seq_lseek,
171 .release = single_release,
172};
173
174static int __init cpa_stats_init(void)
175{
176 debugfs_create_file("cpa_stats", S_IRUSR, arch_debugfs_dir, NULL,
177 &cpastats_fops);
178 return 0;
179}
180late_initcall(cpa_stats_init);
181#else
182static inline void cpa_inc_1g_checked(void) { }
183static inline void cpa_inc_2m_checked(void) { }
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200184static inline void cpa_inc_4k_install(void) { }
185static inline void cpa_inc_lp_sameprot(int level) { }
186static inline void cpa_inc_lp_preserved(int level) { }
187#endif
188
189
Dave Hansen58e65b52018-04-20 15:20:21 -0700190static inline int
191within(unsigned long addr, unsigned long start, unsigned long end)
192{
193 return addr >= start && addr < end;
194}
195
196static inline int
197within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
198{
199 return addr >= start && addr <= end;
200}
201
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100202#ifdef CONFIG_X86_64
203
204static inline unsigned long highmap_start_pfn(void)
205{
Alexander Duyckfc8d7822012-11-16 13:57:13 -0800206 return __pa_symbol(_text) >> PAGE_SHIFT;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100207}
208
209static inline unsigned long highmap_end_pfn(void)
210{
Thomas Garnier4ff53082016-06-15 12:05:45 -0700211 /* Do not reference physical address outside the kernel. */
212 return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100213}
214
Dave Hansen58e65b52018-04-20 15:20:21 -0700215static bool __cpa_pfn_in_highmap(unsigned long pfn)
216{
217 /*
218 * Kernel text has an alias mapping at a high address, known
219 * here as "highmap".
220 */
221 return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn());
222}
223
224#else
225
226static bool __cpa_pfn_in_highmap(unsigned long pfn)
227{
228 /* There is no highmap on 32-bit */
229 return false;
230}
231
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100232#endif
233
Peter Zijlstra0521e8b2019-02-08 13:08:59 +0100234/*
235 * See set_mce_nospec().
236 *
237 * Machine check recovery code needs to change cache mode of poisoned pages to
238 * UC to avoid speculative access logging another error. But passing the
239 * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a
240 * speculative access. So we cheat and flip the top bit of the address. This
241 * works fine for the code that updates the page tables. But at the end of the
242 * process we need to flush the TLB and cache and the non-canonical address
243 * causes a #GP fault when used by the INVLPG and CLFLUSH instructions.
244 *
245 * But in the common case we already have a canonical address. This code
246 * will fix the top bit if needed and is a no-op otherwise.
247 */
248static inline unsigned long fix_addr(unsigned long addr)
249{
250#ifdef CONFIG_X86_64
251 return (long)(addr << 1) >> 1;
252#else
253 return addr;
254#endif
255}
256
Peter Zijlstra98bfc9b2018-12-03 18:03:47 +0100257static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
Peter Zijlstra16ebf032018-12-03 18:03:46 +0100258{
259 if (cpa->flags & CPA_PAGES_ARRAY) {
260 struct page *page = cpa->pages[idx];
261
262 if (unlikely(PageHighMem(page)))
263 return 0;
264
265 return (unsigned long)page_address(page);
266 }
267
268 if (cpa->flags & CPA_ARRAY)
269 return cpa->vaddr[idx];
270
Peter Zijlstra98bfc9b2018-12-03 18:03:47 +0100271 return *cpa->vaddr + idx * PAGE_SIZE;
Peter Zijlstra16ebf032018-12-03 18:03:46 +0100272}
273
Arjan van de Vened724be2008-01-30 13:34:04 +0100274/*
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100275 * Flushing functions
276 */
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +0100277
Peter Zijlstrac38116b2018-12-03 18:03:52 +0100278static void clflush_cache_range_opt(void *vaddr, unsigned int size)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100279{
Chris Wilson1f1a89a2016-01-08 09:55:33 +0000280 const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
281 void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
Ross Zwisler6c434d62015-05-11 10:15:49 +0200282 void *vend = vaddr + size;
Chris Wilson1f1a89a2016-01-08 09:55:33 +0000283
284 if (p >= vend)
285 return;
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100286
Chris Wilson1f1a89a2016-01-08 09:55:33 +0000287 for (; p < vend; p += clflush_size)
Ross Zwisler6c434d62015-05-11 10:15:49 +0200288 clflushopt(p);
Peter Zijlstrac38116b2018-12-03 18:03:52 +0100289}
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100290
Peter Zijlstrac38116b2018-12-03 18:03:52 +0100291/**
292 * clflush_cache_range - flush a cache range with clflush
293 * @vaddr: virtual start address
294 * @size: number of bytes to flush
295 *
296 * CLFLUSHOPT is an unordered instruction which needs fencing with MFENCE or
297 * SFENCE to avoid ordering issues.
298 */
299void clflush_cache_range(void *vaddr, unsigned int size)
300{
301 mb();
302 clflush_cache_range_opt(vaddr, size);
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +0100303 mb();
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100304}
Eric Anholte517a5e2009-09-10 17:48:48 -0700305EXPORT_SYMBOL_GPL(clflush_cache_range);
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100306
Dan Williamsf2b61252017-05-29 23:00:34 -0700307void arch_invalidate_pmem(void *addr, size_t size)
308{
309 clflush_cache_range(addr, size);
310}
311EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
312
Thomas Gleixneraf1e6842008-01-30 13:34:08 +0100313static void __cpa_flush_all(void *arg)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100314{
Andi Kleen6bb83832008-02-04 16:48:06 +0100315 unsigned long cache = (unsigned long)arg;
316
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100317 /*
318 * Flush all to work around Errata in early athlons regarding
319 * large page flushing.
320 */
321 __flush_tlb_all();
322
venkatesh.pallipadi@intel.com0b827532009-05-22 13:23:37 -0700323 if (cache && boot_cpu_data.x86 >= 4)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100324 wbinvd();
325}
326
Andi Kleen6bb83832008-02-04 16:48:06 +0100327static void cpa_flush_all(unsigned long cache)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100328{
Dave Hansend2479a32018-04-20 15:20:19 -0700329 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100330
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200331 on_each_cpu(__cpa_flush_all, (void *) cache, 1);
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100332}
333
Peter Zijlstrafe0937b2018-12-03 18:03:51 +0100334void __cpa_flush_tlb(void *data)
Peter Zijlstra47e262a2018-09-19 10:50:23 +0200335{
Peter Zijlstra935f5832018-12-03 18:03:49 +0100336 struct cpa_data *cpa = data;
337 unsigned int i;
Shaohua Lid75586a2008-08-21 10:46:06 +0800338
Peter Zijlstra935f5832018-12-03 18:03:49 +0100339 for (i = 0; i < cpa->numpages; i++)
Peter Zijlstra0521e8b2019-02-08 13:08:59 +0100340 __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
Peter Zijlstra935f5832018-12-03 18:03:49 +0100341}
342
Peter Zijlstrafe0937b2018-12-03 18:03:51 +0100343static void cpa_flush(struct cpa_data *data, int cache)
Peter Zijlstra935f5832018-12-03 18:03:49 +0100344{
Peter Zijlstrafe0937b2018-12-03 18:03:51 +0100345 struct cpa_data *cpa = data;
Peter Zijlstra935f5832018-12-03 18:03:49 +0100346 unsigned int i;
347
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100348 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
349
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100350 if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
351 cpa_flush_all(cache);
Peter Zijlstra721066d2018-12-03 18:03:44 +0100352 return;
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100353 }
354
Peter Zijlstra935f5832018-12-03 18:03:49 +0100355 if (cpa->numpages <= tlb_single_page_flush_ceiling)
Peter Zijlstrafe0937b2018-12-03 18:03:51 +0100356 on_each_cpu(__cpa_flush_tlb, cpa, 1);
Peter Zijlstra935f5832018-12-03 18:03:49 +0100357 else
358 flush_tlb_all();
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100359
360 if (!cache)
361 return;
362
Peter Zijlstrac38116b2018-12-03 18:03:52 +0100363 mb();
Peter Zijlstra935f5832018-12-03 18:03:49 +0100364 for (i = 0; i < cpa->numpages; i++) {
365 unsigned long addr = __cpa_addr(cpa, i);
366 unsigned int level;
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700367
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100368 pte_t *pte = lookup_address(addr, &level);
369
Thomas Gleixner7bfb72e2008-02-04 16:48:08 +0100370 /*
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100371 * Only flush present addresses:
372 */
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100373 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
Peter Zijlstra0521e8b2019-02-08 13:08:59 +0100374 clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
Shaohua Lid75586a2008-08-21 10:46:06 +0800375 }
Peter Zijlstrac38116b2018-12-03 18:03:52 +0100376 mb();
Shaohua Lid75586a2008-08-21 10:46:06 +0800377}
378
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200379static bool overlaps(unsigned long r1_start, unsigned long r1_end,
380 unsigned long r2_start, unsigned long r2_end)
381{
382 return (r1_start <= r2_end && r1_end >= r2_start) ||
383 (r2_start <= r1_end && r2_end >= r1_start);
384}
385
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200386#ifdef CONFIG_PCI_BIOS
387/*
388 * The BIOS area between 640k and 1Mb needs to be executable for PCI BIOS
389 * based config access (CONFIG_PCI_GOBIOS) support.
390 */
391#define BIOS_PFN PFN_DOWN(BIOS_BEGIN)
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200392#define BIOS_PFN_END PFN_DOWN(BIOS_END - 1)
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200393
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200394static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200395{
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200396 if (pcibios_enabled && overlaps(spfn, epfn, BIOS_PFN, BIOS_PFN_END))
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200397 return _PAGE_NX;
398 return 0;
399}
400#else
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200401static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200402{
403 return 0;
404}
405#endif
406
407/*
408 * The .rodata section needs to be read-only. Using the pfn catches all
409 * aliases. This also includes __ro_after_init, so do not enforce until
410 * kernel_set_to_readonly is true.
411 */
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200412static pgprotval_t protect_rodata(unsigned long spfn, unsigned long epfn)
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200413{
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200414 unsigned long epfn_ro, spfn_ro = PFN_DOWN(__pa_symbol(__start_rodata));
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200415
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200416 /*
417 * Note: __end_rodata is at page aligned and not inclusive, so
418 * subtract 1 to get the last enforced PFN in the rodata area.
419 */
420 epfn_ro = PFN_DOWN(__pa_symbol(__end_rodata)) - 1;
421
422 if (kernel_set_to_readonly && overlaps(spfn, epfn, spfn_ro, epfn_ro))
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200423 return _PAGE_RW;
424 return 0;
425}
426
427/*
428 * Protect kernel text against becoming non executable by forbidding
429 * _PAGE_NX. This protects only the high kernel mapping (_text -> _etext)
430 * out of which the kernel actually executes. Do not protect the low
431 * mapping.
432 *
433 * This does not cover __inittext since that is gone after boot.
434 */
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200435static pgprotval_t protect_kernel_text(unsigned long start, unsigned long end)
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200436{
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200437 unsigned long t_end = (unsigned long)_etext - 1;
438 unsigned long t_start = (unsigned long)_text;
439
440 if (overlaps(start, end, t_start, t_end))
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200441 return _PAGE_NX;
442 return 0;
443}
444
445#if defined(CONFIG_X86_64)
446/*
447 * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
448 * kernel text mappings for the large page aligned text, rodata sections
449 * will be always read-only. For the kernel identity mappings covering the
450 * holes caused by this alignment can be anything that user asks.
451 *
452 * This will preserve the large page mappings for kernel text/data at no
453 * extra cost.
454 */
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200455static pgprotval_t protect_kernel_text_ro(unsigned long start,
456 unsigned long end)
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200457{
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200458 unsigned long t_end = (unsigned long)__end_rodata_hpage_align - 1;
459 unsigned long t_start = (unsigned long)_text;
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200460 unsigned int level;
461
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200462 if (!kernel_set_to_readonly || !overlaps(start, end, t_start, t_end))
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200463 return 0;
464 /*
465 * Don't enforce the !RW mapping for the kernel text mapping, if
466 * the current mapping is already using small page mapping. No
467 * need to work hard to preserve large page mappings in this case.
468 *
469 * This also fixes the Linux Xen paravirt guest boot failure caused
470 * by unexpected read-only mappings for kernel identity
471 * mappings. In this paravirt guest case, the kernel text mapping
472 * and the kernel identity mapping share the same page-table pages,
473 * so the protections for kernel text and identity mappings have to
474 * be the same.
475 */
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200476 if (lookup_address(start, &level) && (level != PG_LEVEL_4K))
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200477 return _PAGE_RW;
478 return 0;
479}
480#else
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200481static pgprotval_t protect_kernel_text_ro(unsigned long start,
482 unsigned long end)
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200483{
484 return 0;
485}
486#endif
487
Thomas Gleixner40464602018-09-17 16:29:11 +0200488static inline bool conflicts(pgprot_t prot, pgprotval_t val)
489{
490 return (pgprot_val(prot) & ~val) != pgprot_val(prot);
491}
492
493static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
494 unsigned long start, unsigned long end,
495 unsigned long pfn, const char *txt)
496{
497 static const char *lvltxt[] = {
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200498 [CPA_CONFLICT] = "conflict",
Thomas Gleixner40464602018-09-17 16:29:11 +0200499 [CPA_PROTECT] = "protect",
500 [CPA_DETECT] = "detect",
501 };
502
503 if (warnlvl > cpa_warn_level || !conflicts(prot, val))
504 return;
505
506 pr_warn("CPA %8s %10s: 0x%016lx - 0x%016lx PFN %lx req %016llx prevent %016llx\n",
507 lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot),
508 (unsigned long long)val);
509}
510
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100511/*
Arjan van de Vened724be2008-01-30 13:34:04 +0100512 * Certain areas of memory on x86 require very specific protection flags,
513 * for example the BIOS area or kernel text. Callers don't always get this
514 * right (again, ioremap() on BIOS memory is not uncommon) so this function
515 * checks and fixes these known static required protection bits.
516 */
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200517static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
Thomas Gleixner40464602018-09-17 16:29:11 +0200518 unsigned long pfn, unsigned long npg,
Thomas Gleixner7af01452019-08-29 00:31:34 +0200519 unsigned long lpsize, int warnlvl)
Arjan van de Vened724be2008-01-30 13:34:04 +0100520{
Thomas Gleixner40464602018-09-17 16:29:11 +0200521 pgprotval_t forbidden, res;
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200522 unsigned long end;
Arjan van de Vened724be2008-01-30 13:34:04 +0100523
Thomas Gleixner69c31e62018-09-17 16:29:13 +0200524 /*
525 * There is no point in checking RW/NX conflicts when the requested
526 * mapping is setting the page !PRESENT.
527 */
528 if (!(pgprot_val(prot) & _PAGE_PRESENT))
529 return prot;
530
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200531 /* Operate on the virtual address */
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200532 end = start + npg * PAGE_SIZE - 1;
Thomas Gleixner40464602018-09-17 16:29:11 +0200533
534 res = protect_kernel_text(start, end);
535 check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
536 forbidden = res;
537
Thomas Gleixner7af01452019-08-29 00:31:34 +0200538 /*
539 * Special case to preserve a large page. If the change spawns the
540 * full large page mapping then there is no point to split it
541 * up. Happens with ftrace and is going to be removed once ftrace
542 * switched to text_poke().
543 */
544 if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) {
545 res = protect_kernel_text_ro(start, end);
546 check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
547 forbidden |= res;
548 }
Arjan van de Vened724be2008-01-30 13:34:04 +0100549
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200550 /* Check the PFN directly */
Thomas Gleixner40464602018-09-17 16:29:11 +0200551 res = protect_pci_bios(pfn, pfn + npg - 1);
552 check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX");
553 forbidden |= res;
554
555 res = protect_rodata(pfn, pfn + npg - 1);
556 check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO");
557 forbidden |= res;
Arjan van de Vencc0f21b2008-02-04 16:48:05 +0100558
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200559 return __pgprot(pgprot_val(prot) & ~forbidden);
Ingo Molnar687c4822008-01-30 13:34:04 +0100560}
561
Matt Fleming426e34c2013-12-06 21:13:04 +0000562/*
563 * Lookup the page table entry for a virtual address in a specific pgd.
564 * Return a pointer to the entry and the level of the mapping.
565 */
566pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
567 unsigned int *level)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100568{
Kirill A. Shutemov45478332017-03-17 21:55:12 +0300569 p4d_t *p4d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 pud_t *pud;
571 pmd_t *pmd;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100572
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100573 *level = PG_LEVEL_NONE;
574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 if (pgd_none(*pgd))
576 return NULL;
Ingo Molnar9df84992008-02-04 16:48:09 +0100577
Kirill A. Shutemov45478332017-03-17 21:55:12 +0300578 p4d = p4d_offset(pgd, address);
579 if (p4d_none(*p4d))
580 return NULL;
581
582 *level = PG_LEVEL_512G;
583 if (p4d_large(*p4d) || !p4d_present(*p4d))
584 return (pte_t *)p4d;
585
586 pud = pud_offset(p4d, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 if (pud_none(*pud))
588 return NULL;
Andi Kleenc2f71ee2008-02-04 16:48:09 +0100589
590 *level = PG_LEVEL_1G;
591 if (pud_large(*pud) || !pud_present(*pud))
592 return (pte_t *)pud;
593
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 pmd = pmd_offset(pud, address);
595 if (pmd_none(*pmd))
596 return NULL;
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100597
598 *level = PG_LEVEL_2M;
Thomas Gleixner9a14aef2008-02-04 16:48:07 +0100599 if (pmd_large(*pmd) || !pmd_present(*pmd))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 return (pte_t *)pmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100602 *level = PG_LEVEL_4K;
Ingo Molnar9df84992008-02-04 16:48:09 +0100603
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100604 return pte_offset_kernel(pmd, address);
605}
Borislav Petkov0fd64c22013-10-31 17:25:00 +0100606
607/*
608 * Lookup the page table entry for a virtual address. Return a pointer
609 * to the entry and the level of the mapping.
610 *
611 * Note: We return pud and pmd either when the entry is marked large
612 * or when the present bit is not set. Otherwise we would return a
613 * pointer to a nonexisting mapping.
614 */
615pte_t *lookup_address(unsigned long address, unsigned int *level)
616{
Thomas Gleixner8679de02018-09-17 16:29:08 +0200617 return lookup_address_in_pgd(pgd_offset_k(address), address, level);
Borislav Petkov0fd64c22013-10-31 17:25:00 +0100618}
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200619EXPORT_SYMBOL_GPL(lookup_address);
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100620
Borislav Petkov0fd64c22013-10-31 17:25:00 +0100621static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
622 unsigned int *level)
623{
Thomas Gleixner8679de02018-09-17 16:29:08 +0200624 if (cpa->pgd)
Matt Fleming426e34c2013-12-06 21:13:04 +0000625 return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
Borislav Petkov0fd64c22013-10-31 17:25:00 +0100626 address, level);
627
Thomas Gleixner8679de02018-09-17 16:29:08 +0200628 return lookup_address(address, level);
Borislav Petkov0fd64c22013-10-31 17:25:00 +0100629}
630
Ingo Molnar9df84992008-02-04 16:48:09 +0100631/*
Juergen Gross792230c2014-11-28 11:53:56 +0100632 * Lookup the PMD entry for a virtual address. Return a pointer to the entry
633 * or NULL if not present.
634 */
635pmd_t *lookup_pmd_address(unsigned long address)
636{
637 pgd_t *pgd;
Kirill A. Shutemov45478332017-03-17 21:55:12 +0300638 p4d_t *p4d;
Juergen Gross792230c2014-11-28 11:53:56 +0100639 pud_t *pud;
640
641 pgd = pgd_offset_k(address);
642 if (pgd_none(*pgd))
643 return NULL;
644
Kirill A. Shutemov45478332017-03-17 21:55:12 +0300645 p4d = p4d_offset(pgd, address);
646 if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d))
647 return NULL;
648
649 pud = pud_offset(p4d, address);
Juergen Gross792230c2014-11-28 11:53:56 +0100650 if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
651 return NULL;
652
653 return pmd_offset(pud, address);
654}
655
656/*
Dave Hansend7656532013-01-22 13:24:33 -0800657 * This is necessary because __pa() does not work on some
658 * kinds of memory, like vmalloc() or the alloc_remap()
659 * areas on 32-bit NUMA systems. The percpu areas can
660 * end up in this kind of memory, for instance.
661 *
662 * This could be optimized, but it is only intended to be
663 * used at inititalization time, and keeping it
664 * unoptimized should increase the testing coverage for
665 * the more obscure platforms.
666 */
667phys_addr_t slow_virt_to_phys(void *__virt_addr)
668{
669 unsigned long virt_addr = (unsigned long)__virt_addr;
Dexuan Cuibf70e552016-02-25 01:58:12 -0800670 phys_addr_t phys_addr;
671 unsigned long offset;
Dave Hansend7656532013-01-22 13:24:33 -0800672 enum pg_level level;
Dave Hansend7656532013-01-22 13:24:33 -0800673 pte_t *pte;
674
675 pte = lookup_address(virt_addr, &level);
676 BUG_ON(!pte);
Toshi Kani34437e62015-09-17 12:24:20 -0600677
Dexuan Cuibf70e552016-02-25 01:58:12 -0800678 /*
679 * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
680 * before being left-shifted PAGE_SHIFT bits -- this trick is to
681 * make 32-PAE kernel work correctly.
682 */
Toshi Kani34437e62015-09-17 12:24:20 -0600683 switch (level) {
684 case PG_LEVEL_1G:
Dexuan Cuibf70e552016-02-25 01:58:12 -0800685 phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
Toshi Kani34437e62015-09-17 12:24:20 -0600686 offset = virt_addr & ~PUD_PAGE_MASK;
687 break;
688 case PG_LEVEL_2M:
Dexuan Cuibf70e552016-02-25 01:58:12 -0800689 phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
Toshi Kani34437e62015-09-17 12:24:20 -0600690 offset = virt_addr & ~PMD_PAGE_MASK;
691 break;
692 default:
Dexuan Cuibf70e552016-02-25 01:58:12 -0800693 phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
Toshi Kani34437e62015-09-17 12:24:20 -0600694 offset = virt_addr & ~PAGE_MASK;
695 }
696
697 return (phys_addr_t)(phys_addr | offset);
Dave Hansend7656532013-01-22 13:24:33 -0800698}
699EXPORT_SYMBOL_GPL(slow_virt_to_phys);
700
701/*
Ingo Molnar9df84992008-02-04 16:48:09 +0100702 * Set the new pmd in all the pgds we know about:
703 */
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100704static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100705{
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100706 /* change init_mm */
707 set_pte_atomic(kpte, pte);
Ingo Molnar44af6c42008-01-30 13:34:03 +0100708#ifdef CONFIG_X86_32
Ingo Molnare4b71dc2008-01-30 13:34:04 +0100709 if (!SHARED_KERNEL_PMD) {
Ingo Molnar44af6c42008-01-30 13:34:03 +0100710 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +0100712 list_for_each_entry(page, &pgd_list, lru) {
Ingo Molnar44af6c42008-01-30 13:34:03 +0100713 pgd_t *pgd;
Kirill A. Shutemov45478332017-03-17 21:55:12 +0300714 p4d_t *p4d;
Ingo Molnar44af6c42008-01-30 13:34:03 +0100715 pud_t *pud;
716 pmd_t *pmd;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100717
Ingo Molnar44af6c42008-01-30 13:34:03 +0100718 pgd = (pgd_t *)page_address(page) + pgd_index(address);
Kirill A. Shutemov45478332017-03-17 21:55:12 +0300719 p4d = p4d_offset(pgd, address);
720 pud = pud_offset(p4d, address);
Ingo Molnar44af6c42008-01-30 13:34:03 +0100721 pmd = pmd_offset(pud, address);
722 set_pte_atomic((pte_t *)pmd, pte);
723 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 }
Ingo Molnar44af6c42008-01-30 13:34:03 +0100725#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726}
727
Dave Hansend1440b22018-04-06 13:55:02 -0700728static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot)
729{
730 /*
731 * _PAGE_GLOBAL means "global page" for present PTEs.
732 * But, it is also used to indicate _PAGE_PROTNONE
733 * for non-present PTEs.
734 *
735 * This ensures that a _PAGE_GLOBAL PTE going from
736 * present to non-present is not confused as
737 * _PAGE_PROTNONE.
738 */
739 if (!(pgprot_val(prot) & _PAGE_PRESENT))
740 pgprot_val(prot) &= ~_PAGE_GLOBAL;
741
742 return prot;
743}
744
Thomas Gleixner8679de02018-09-17 16:29:08 +0200745static int __should_split_large_page(pte_t *kpte, unsigned long address,
746 struct cpa_data *cpa)
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100747{
Thomas Gleixner585948f42018-09-17 16:29:17 +0200748 unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn;
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200749 pgprot_t old_prot, new_prot, req_prot, chk_prot;
Qian Cai24c41222019-03-01 10:29:24 -0500750 pte_t new_pte, *tmp;
Dave Hansenf3c4fbb2013-01-22 13:24:32 -0800751 enum pg_level level;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100752
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100753 /*
754 * Check for races, another CPU might have split this page
755 * up already:
756 */
Borislav Petkov82f07122013-10-31 17:25:07 +0100757 tmp = _lookup_address_cpa(cpa, address, &level);
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100758 if (tmp != kpte)
Thomas Gleixner8679de02018-09-17 16:29:08 +0200759 return 1;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100760
761 switch (level) {
762 case PG_LEVEL_2M:
Toshi Kani3a191092015-09-17 12:24:22 -0600763 old_prot = pmd_pgprot(*(pmd_t *)kpte);
764 old_pfn = pmd_pfn(*(pmd_t *)kpte);
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200765 cpa_inc_2m_checked();
Toshi Kani3a191092015-09-17 12:24:22 -0600766 break;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100767 case PG_LEVEL_1G:
Toshi Kani3a191092015-09-17 12:24:22 -0600768 old_prot = pud_pgprot(*(pud_t *)kpte);
769 old_pfn = pud_pfn(*(pud_t *)kpte);
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200770 cpa_inc_1g_checked();
Dave Hansenf3c4fbb2013-01-22 13:24:32 -0800771 break;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100772 default:
Thomas Gleixner8679de02018-09-17 16:29:08 +0200773 return -EINVAL;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100774 }
775
Toshi Kani3a191092015-09-17 12:24:22 -0600776 psize = page_level_size(level);
777 pmask = page_level_mask(level);
778
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100779 /*
780 * Calculate the number of pages, which fit into this large
781 * page starting at address:
782 */
Thomas Gleixner8679de02018-09-17 16:29:08 +0200783 lpaddr = (address + psize) & pmask;
784 numpages = (lpaddr - address) >> PAGE_SHIFT;
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100785 if (numpages < cpa->numpages)
786 cpa->numpages = numpages;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100787
788 /*
789 * We are safe now. Check whether the new pgprot is the same:
Juergen Grossf5b28312014-11-03 14:02:02 +0100790 * Convert protection attributes to 4k-format, as cpa->mask* are set
791 * up accordingly.
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100792 */
Qian Cai24c41222019-03-01 10:29:24 -0500793
Dave Hansen606c7192018-04-06 13:55:04 -0700794 /* Clear PSE (aka _PAGE_PAT) and move PAT bit to correct position */
Toshi Kani55696b12015-09-17 12:24:24 -0600795 req_prot = pgprot_large_2_4k(old_prot);
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100796
matthieu castet64edc8e2010-11-16 22:30:27 +0100797 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
798 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100799
800 /*
Juergen Grossf5b28312014-11-03 14:02:02 +0100801 * req_prot is in format of 4k pages. It must be converted to large
802 * page format: the caching mode includes the PAT bit located at
803 * different bit positions in the two formats.
804 */
805 req_prot = pgprot_4k_2_large(req_prot);
Dave Hansend1440b22018-04-06 13:55:02 -0700806 req_prot = pgprot_clear_protnone_bits(req_prot);
Andrea Arcangelif76cfa32013-04-10 15:28:25 +0200807 if (pgprot_val(req_prot) & _PAGE_PRESENT)
Dave Hansend1440b22018-04-06 13:55:02 -0700808 pgprot_val(req_prot) |= _PAGE_PSE;
Andrea Arcangelia8aed3e2013-02-22 15:11:51 -0800809
810 /*
Thomas Gleixner8679de02018-09-17 16:29:08 +0200811 * old_pfn points to the large page base pfn. So we need to add the
812 * offset of the virtual address:
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100813 */
Toshi Kani3a191092015-09-17 12:24:22 -0600814 pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100815 cpa->pfn = pfn;
816
Thomas Gleixner8679de02018-09-17 16:29:08 +0200817 /*
818 * Calculate the large page base address and the number of 4K pages
819 * in the large page
820 */
821 lpaddr = address & pmask;
822 numpages = psize >> PAGE_SHIFT;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100823
824 /*
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200825 * Sanity check that the existing mapping is correct versus the static
826 * protections. static_protections() guards against !PRESENT, so no
827 * extra conditional required here.
828 */
829 chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
Thomas Gleixner7af01452019-08-29 00:31:34 +0200830 psize, CPA_CONFLICT);
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200831
832 if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
833 /*
834 * Split the large page and tell the split code to
835 * enforce static protections.
836 */
837 cpa->force_static_prot = 1;
838 return 1;
839 }
840
841 /*
Thomas Gleixner1c4b4062018-09-17 16:29:15 +0200842 * Optimization: If the requested pgprot is the same as the current
843 * pgprot, then the large page can be preserved and no updates are
844 * required independent of alignment and length of the requested
845 * range. The above already established that the current pgprot is
846 * correct, which in consequence makes the requested pgprot correct
847 * as well if it is the same. The static protection scan below will
848 * not come to a different conclusion.
849 */
850 if (pgprot_val(req_prot) == pgprot_val(old_prot)) {
851 cpa_inc_lp_sameprot(level);
852 return 0;
853 }
854
855 /*
Thomas Gleixner585948f42018-09-17 16:29:17 +0200856 * If the requested range does not cover the full page, split it up
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100857 */
Thomas Gleixner8679de02018-09-17 16:29:08 +0200858 if (address != lpaddr || cpa->numpages != numpages)
859 return 1;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100860
Thomas Gleixner585948f42018-09-17 16:29:17 +0200861 /*
862 * Check whether the requested pgprot is conflicting with a static
863 * protection requirement in the large page.
864 */
865 new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
Thomas Gleixner7af01452019-08-29 00:31:34 +0200866 psize, CPA_DETECT);
Thomas Gleixner585948f42018-09-17 16:29:17 +0200867
868 /*
869 * If there is a conflict, split the large page.
870 *
871 * There used to be a 4k wise evaluation trying really hard to
872 * preserve the large pages, but experimentation has shown, that this
873 * does not help at all. There might be corner cases which would
874 * preserve one large page occasionally, but it's really not worth the
875 * extra code and cycles for the common case.
876 */
877 if (pgprot_val(req_prot) != pgprot_val(new_prot))
878 return 1;
879
Thomas Gleixner8679de02018-09-17 16:29:08 +0200880 /* All checks passed. Update the large page mapping. */
881 new_pte = pfn_pte(old_pfn, new_prot);
882 __set_pmd_pte(kpte, address, new_pte);
883 cpa->flags |= CPA_FLUSHTLB;
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200884 cpa_inc_lp_preserved(level);
Thomas Gleixner8679de02018-09-17 16:29:08 +0200885 return 0;
886}
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100887
Thomas Gleixner8679de02018-09-17 16:29:08 +0200888static int should_split_large_page(pte_t *kpte, unsigned long address,
889 struct cpa_data *cpa)
890{
891 int do_split;
892
893 if (cpa->force_split)
894 return 1;
895
896 spin_lock(&pgd_lock);
897 do_split = __should_split_large_page(kpte, address, cpa);
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800898 spin_unlock(&pgd_lock);
Ingo Molnar9df84992008-02-04 16:48:09 +0100899
Ingo Molnarbeaff632008-02-04 16:48:09 +0100900 return do_split;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100901}
902
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200903static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
904 pgprot_t ref_prot, unsigned long address,
905 unsigned long size)
906{
907 unsigned int npg = PFN_DOWN(size);
908 pgprot_t prot;
909
910 /*
911 * If should_split_large_page() discovered an inconsistent mapping,
912 * remove the invalid protection in the split mapping.
913 */
914 if (!cpa->force_static_prot)
915 goto set;
916
Thomas Gleixner7af01452019-08-29 00:31:34 +0200917 /* Hand in lpsize = 0 to enforce the protection mechanism */
918 prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200919
920 if (pgprot_val(prot) == pgprot_val(ref_prot))
921 goto set;
922
923 /*
924 * If this is splitting a PMD, fix it up. PUD splits cannot be
925 * fixed trivially as that would require to rescan the newly
926 * installed PMD mappings after returning from split_large_page()
927 * so an eventual further split can allocate the necessary PTE
928 * pages. Warn for now and revisit it in case this actually
929 * happens.
930 */
931 if (size == PAGE_SIZE)
932 ref_prot = prot;
933 else
934 pr_warn_once("CPA: Cannot fixup static protections for PUD split\n");
935set:
936 set_pte(pte, pfn_pte(pfn, ref_prot));
937}
938
Borislav Petkov59528862013-03-21 18:16:57 +0100939static int
Borislav Petkov82f07122013-10-31 17:25:07 +0100940__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
941 struct page *base)
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100942{
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200943 unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1;
Borislav Petkov59528862013-03-21 18:16:57 +0100944 pte_t *pbase = (pte_t *)page_address(base);
Ingo Molnar86f03982008-01-30 13:34:09 +0100945 unsigned int i, level;
Ingo Molnar9df84992008-02-04 16:48:09 +0100946 pgprot_t ref_prot;
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200947 pte_t *tmp;
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100948
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800949 spin_lock(&pgd_lock);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100950 /*
951 * Check for races, another CPU might have split this page
952 * up for us already:
953 */
Borislav Petkov82f07122013-10-31 17:25:07 +0100954 tmp = _lookup_address_cpa(cpa, address, &level);
Wen Congyangae9aae92013-02-22 16:33:04 -0800955 if (tmp != kpte) {
956 spin_unlock(&pgd_lock);
957 return 1;
958 }
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100959
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700960 paravirt_alloc_pte(&init_mm, page_to_pfn(base));
Juergen Grossf5b28312014-11-03 14:02:02 +0100961
Toshi Kanid551aaa2015-09-17 12:24:23 -0600962 switch (level) {
963 case PG_LEVEL_2M:
964 ref_prot = pmd_pgprot(*(pmd_t *)kpte);
Dave Hansen606c7192018-04-06 13:55:04 -0700965 /*
966 * Clear PSE (aka _PAGE_PAT) and move
967 * PAT bit to correct position.
968 */
Juergen Grossf5b28312014-11-03 14:02:02 +0100969 ref_prot = pgprot_large_2_4k(ref_prot);
Toshi Kanid551aaa2015-09-17 12:24:23 -0600970 ref_pfn = pmd_pfn(*(pmd_t *)kpte);
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200971 lpaddr = address & PMD_MASK;
972 lpinc = PAGE_SIZE;
Toshi Kanid551aaa2015-09-17 12:24:23 -0600973 break;
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100974
Toshi Kanid551aaa2015-09-17 12:24:23 -0600975 case PG_LEVEL_1G:
976 ref_prot = pud_pgprot(*(pud_t *)kpte);
977 ref_pfn = pud_pfn(*(pud_t *)kpte);
Andi Kleenf07333f2008-02-04 16:48:09 +0100978 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200979 lpaddr = address & PUD_MASK;
980 lpinc = PMD_SIZE;
Andrea Arcangelia8aed3e2013-02-22 15:11:51 -0800981 /*
Toshi Kanid551aaa2015-09-17 12:24:23 -0600982 * Clear the PSE flags if the PRESENT flag is not set
Andrea Arcangelia8aed3e2013-02-22 15:11:51 -0800983 * otherwise pmd_present/pmd_huge will return true
984 * even on a non present pmd.
985 */
Toshi Kanid551aaa2015-09-17 12:24:23 -0600986 if (!(pgprot_val(ref_prot) & _PAGE_PRESENT))
Andrea Arcangelia8aed3e2013-02-22 15:11:51 -0800987 pgprot_val(ref_prot) &= ~_PAGE_PSE;
Toshi Kanid551aaa2015-09-17 12:24:23 -0600988 break;
989
990 default:
991 spin_unlock(&pgd_lock);
992 return 1;
Andi Kleenf07333f2008-02-04 16:48:09 +0100993 }
Andi Kleenf07333f2008-02-04 16:48:09 +0100994
Dave Hansend1440b22018-04-06 13:55:02 -0700995 ref_prot = pgprot_clear_protnone_bits(ref_prot);
Andrea Arcangelia8aed3e2013-02-22 15:11:51 -0800996
997 /*
Thomas Gleixner63c1dcf2008-02-04 16:48:05 +0100998 * Get the target pfn from the original entry:
999 */
Toshi Kanid551aaa2015-09-17 12:24:23 -06001000 pfn = ref_pfn;
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +02001001 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
1002 split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +01001003
Sai Praneeth2c66e24d2015-10-16 16:20:27 -07001004 if (virt_addr_valid(address)) {
1005 unsigned long pfn = PFN_DOWN(__pa(address));
1006
1007 if (pfn_range_is_mapped(pfn, pfn + 1))
1008 split_page_count(level);
1009 }
Yinghai Luf361a452008-07-10 20:38:26 -07001010
Ingo Molnarbb5c2db2008-01-30 13:33:56 +01001011 /*
Ingo Molnar07a66d72009-02-20 08:04:13 +01001012 * Install the new, split up pagetable.
Huang, Ying4c881ca2008-01-30 13:34:04 +01001013 *
Ingo Molnar07a66d72009-02-20 08:04:13 +01001014 * We use the standard kernel pagetable protections for the new
1015 * pagetable protections, the actual ptes set above control the
1016 * primary protection behavior:
Ingo Molnarbb5c2db2008-01-30 13:33:56 +01001017 */
Ingo Molnar07a66d72009-02-20 08:04:13 +01001018 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
Ingo Molnar211b3d02009-03-10 22:31:03 +01001019
1020 /*
Peter Zijlstrac0a759a2018-09-19 10:50:18 +02001021 * Do a global flush tlb after splitting the large page
1022 * and before we do the actual change page attribute in the PTE.
Ingo Molnar211b3d02009-03-10 22:31:03 +01001023 *
Peter Zijlstrac0a759a2018-09-19 10:50:18 +02001024 * Without this, we violate the TLB application note, that says:
1025 * "The TLBs may contain both ordinary and large-page
1026 * translations for a 4-KByte range of linear addresses. This
1027 * may occur if software modifies the paging structures so that
1028 * the page size used for the address range changes. If the two
1029 * translations differ with respect to page frame or attributes
1030 * (e.g., permissions), processor behavior is undefined and may
1031 * be implementation-specific."
1032 *
1033 * We do this global tlb flush inside the cpa_lock, so that we
1034 * don't allow any other cpu, with stale tlb entries change the
1035 * page attribute in parallel, that also falls into the
1036 * just split large page entry.
Ingo Molnar211b3d02009-03-10 22:31:03 +01001037 */
Peter Zijlstrac0a759a2018-09-19 10:50:18 +02001038 flush_tlb_all();
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001039 spin_unlock(&pgd_lock);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +01001040
Ingo Molnarbb5c2db2008-01-30 13:33:56 +01001041 return 0;
1042}
1043
Borislav Petkov82f07122013-10-31 17:25:07 +01001044static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
1045 unsigned long address)
Wen Congyangae9aae92013-02-22 16:33:04 -08001046{
Wen Congyangae9aae92013-02-22 16:33:04 -08001047 struct page *base;
1048
Christian Borntraeger288cf3c2016-03-15 14:57:33 -07001049 if (!debug_pagealloc_enabled())
Wen Congyangae9aae92013-02-22 16:33:04 -08001050 spin_unlock(&cpa_lock);
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08001051 base = alloc_pages(GFP_KERNEL, 0);
Christian Borntraeger288cf3c2016-03-15 14:57:33 -07001052 if (!debug_pagealloc_enabled())
Wen Congyangae9aae92013-02-22 16:33:04 -08001053 spin_lock(&cpa_lock);
1054 if (!base)
1055 return -ENOMEM;
1056
Borislav Petkov82f07122013-10-31 17:25:07 +01001057 if (__split_large_page(cpa, kpte, address, base))
Wen Congyangae9aae92013-02-22 16:33:04 -08001058 __free_page(base);
1059
1060 return 0;
1061}
1062
Borislav Petkov52a628f2013-10-31 17:25:06 +01001063static bool try_to_free_pte_page(pte_t *pte)
1064{
1065 int i;
1066
1067 for (i = 0; i < PTRS_PER_PTE; i++)
1068 if (!pte_none(pte[i]))
1069 return false;
1070
1071 free_page((unsigned long)pte);
1072 return true;
1073}
1074
1075static bool try_to_free_pmd_page(pmd_t *pmd)
1076{
1077 int i;
1078
1079 for (i = 0; i < PTRS_PER_PMD; i++)
1080 if (!pmd_none(pmd[i]))
1081 return false;
1082
1083 free_page((unsigned long)pmd);
1084 return true;
1085}
1086
1087static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
1088{
1089 pte_t *pte = pte_offset_kernel(pmd, start);
1090
1091 while (start < end) {
1092 set_pte(pte, __pte(0));
1093
1094 start += PAGE_SIZE;
1095 pte++;
1096 }
1097
1098 if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
1099 pmd_clear(pmd);
1100 return true;
1101 }
1102 return false;
1103}
1104
1105static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
1106 unsigned long start, unsigned long end)
1107{
1108 if (unmap_pte_range(pmd, start, end))
1109 if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
1110 pud_clear(pud);
1111}
1112
1113static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
1114{
1115 pmd_t *pmd = pmd_offset(pud, start);
1116
1117 /*
1118 * Not on a 2MB page boundary?
1119 */
1120 if (start & (PMD_SIZE - 1)) {
1121 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
1122 unsigned long pre_end = min_t(unsigned long, end, next_page);
1123
1124 __unmap_pmd_range(pud, pmd, start, pre_end);
1125
1126 start = pre_end;
1127 pmd++;
1128 }
1129
1130 /*
1131 * Try to unmap in 2M chunks.
1132 */
1133 while (end - start >= PMD_SIZE) {
1134 if (pmd_large(*pmd))
1135 pmd_clear(pmd);
1136 else
1137 __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
1138
1139 start += PMD_SIZE;
1140 pmd++;
1141 }
1142
1143 /*
1144 * 4K leftovers?
1145 */
1146 if (start < end)
1147 return __unmap_pmd_range(pud, pmd, start, end);
1148
1149 /*
1150 * Try again to free the PMD page if haven't succeeded above.
1151 */
1152 if (!pud_none(*pud))
1153 if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
1154 pud_clear(pud);
1155}
Borislav Petkov0bb8aee2013-10-31 17:25:05 +01001156
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001157static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
Borislav Petkov0bb8aee2013-10-31 17:25:05 +01001158{
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001159 pud_t *pud = pud_offset(p4d, start);
Borislav Petkov0bb8aee2013-10-31 17:25:05 +01001160
1161 /*
1162 * Not on a GB page boundary?
1163 */
1164 if (start & (PUD_SIZE - 1)) {
1165 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
1166 unsigned long pre_end = min_t(unsigned long, end, next_page);
1167
1168 unmap_pmd_range(pud, start, pre_end);
1169
1170 start = pre_end;
1171 pud++;
1172 }
1173
1174 /*
1175 * Try to unmap in 1G chunks?
1176 */
1177 while (end - start >= PUD_SIZE) {
1178
1179 if (pud_large(*pud))
1180 pud_clear(pud);
1181 else
1182 unmap_pmd_range(pud, start, start + PUD_SIZE);
1183
1184 start += PUD_SIZE;
1185 pud++;
1186 }
1187
1188 /*
1189 * 2M leftovers?
1190 */
1191 if (start < end)
1192 unmap_pmd_range(pud, start, end);
1193
1194 /*
1195 * No need to try to free the PUD page because we'll free it in
1196 * populate_pgd's error path
1197 */
1198}
1199
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001200static int alloc_pte_page(pmd_t *pmd)
1201{
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08001202 pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001203 if (!pte)
1204 return -1;
1205
1206 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
1207 return 0;
1208}
1209
Borislav Petkov4b235382013-10-31 17:25:02 +01001210static int alloc_pmd_page(pud_t *pud)
1211{
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08001212 pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
Borislav Petkov4b235382013-10-31 17:25:02 +01001213 if (!pmd)
1214 return -1;
1215
1216 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
1217 return 0;
1218}
1219
Borislav Petkovc6b6f362013-10-31 17:25:04 +01001220static void populate_pte(struct cpa_data *cpa,
1221 unsigned long start, unsigned long end,
1222 unsigned num_pages, pmd_t *pmd, pgprot_t pgprot)
1223{
1224 pte_t *pte;
1225
1226 pte = pte_offset_kernel(pmd, start);
1227
Dave Hansend1440b22018-04-06 13:55:02 -07001228 pgprot = pgprot_clear_protnone_bits(pgprot);
Sai Praneeth3976301502016-02-17 12:35:56 +00001229
Borislav Petkovc6b6f362013-10-31 17:25:04 +01001230 while (num_pages-- && start < end) {
Matt Flemingedc3b912015-11-27 21:09:31 +00001231 set_pte(pte, pfn_pte(cpa->pfn, pgprot));
Borislav Petkovc6b6f362013-10-31 17:25:04 +01001232
1233 start += PAGE_SIZE;
Matt Flemingedc3b912015-11-27 21:09:31 +00001234 cpa->pfn++;
Borislav Petkovc6b6f362013-10-31 17:25:04 +01001235 pte++;
1236 }
1237}
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001238
Matt Fleminge535ec02016-09-20 14:26:21 +01001239static long populate_pmd(struct cpa_data *cpa,
1240 unsigned long start, unsigned long end,
1241 unsigned num_pages, pud_t *pud, pgprot_t pgprot)
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001242{
Matt Fleminge535ec02016-09-20 14:26:21 +01001243 long cur_pages = 0;
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001244 pmd_t *pmd;
Juergen Grossf5b28312014-11-03 14:02:02 +01001245 pgprot_t pmd_pgprot;
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001246
1247 /*
1248 * Not on a 2M boundary?
1249 */
1250 if (start & (PMD_SIZE - 1)) {
1251 unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
1252 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
1253
1254 pre_end = min_t(unsigned long, pre_end, next_page);
1255 cur_pages = (pre_end - start) >> PAGE_SHIFT;
1256 cur_pages = min_t(unsigned int, num_pages, cur_pages);
1257
1258 /*
1259 * Need a PTE page?
1260 */
1261 pmd = pmd_offset(pud, start);
1262 if (pmd_none(*pmd))
1263 if (alloc_pte_page(pmd))
1264 return -1;
1265
1266 populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
1267
1268 start = pre_end;
1269 }
1270
1271 /*
1272 * We mapped them all?
1273 */
1274 if (num_pages == cur_pages)
1275 return cur_pages;
1276
Juergen Grossf5b28312014-11-03 14:02:02 +01001277 pmd_pgprot = pgprot_4k_2_large(pgprot);
1278
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001279 while (end - start >= PMD_SIZE) {
1280
1281 /*
1282 * We cannot use a 1G page so allocate a PMD page if needed.
1283 */
1284 if (pud_none(*pud))
1285 if (alloc_pmd_page(pud))
1286 return -1;
1287
1288 pmd = pmd_offset(pud, start);
1289
Andi Kleen958f79b2018-08-07 15:09:39 -07001290 set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
1291 canon_pgprot(pmd_pgprot))));
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001292
1293 start += PMD_SIZE;
Matt Flemingedc3b912015-11-27 21:09:31 +00001294 cpa->pfn += PMD_SIZE >> PAGE_SHIFT;
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001295 cur_pages += PMD_SIZE >> PAGE_SHIFT;
1296 }
1297
1298 /*
1299 * Map trailing 4K pages.
1300 */
1301 if (start < end) {
1302 pmd = pmd_offset(pud, start);
1303 if (pmd_none(*pmd))
1304 if (alloc_pte_page(pmd))
1305 return -1;
1306
1307 populate_pte(cpa, start, end, num_pages - cur_pages,
1308 pmd, pgprot);
1309 }
1310 return num_pages;
1311}
Borislav Petkov4b235382013-10-31 17:25:02 +01001312
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001313static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
1314 pgprot_t pgprot)
Borislav Petkov4b235382013-10-31 17:25:02 +01001315{
1316 pud_t *pud;
1317 unsigned long end;
Matt Fleminge535ec02016-09-20 14:26:21 +01001318 long cur_pages = 0;
Juergen Grossf5b28312014-11-03 14:02:02 +01001319 pgprot_t pud_pgprot;
Borislav Petkov4b235382013-10-31 17:25:02 +01001320
1321 end = start + (cpa->numpages << PAGE_SHIFT);
1322
1323 /*
1324 * Not on a Gb page boundary? => map everything up to it with
1325 * smaller pages.
1326 */
1327 if (start & (PUD_SIZE - 1)) {
1328 unsigned long pre_end;
1329 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
1330
1331 pre_end = min_t(unsigned long, end, next_page);
1332 cur_pages = (pre_end - start) >> PAGE_SHIFT;
1333 cur_pages = min_t(int, (int)cpa->numpages, cur_pages);
1334
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001335 pud = pud_offset(p4d, start);
Borislav Petkov4b235382013-10-31 17:25:02 +01001336
1337 /*
1338 * Need a PMD page?
1339 */
1340 if (pud_none(*pud))
1341 if (alloc_pmd_page(pud))
1342 return -1;
1343
1344 cur_pages = populate_pmd(cpa, start, pre_end, cur_pages,
1345 pud, pgprot);
1346 if (cur_pages < 0)
1347 return cur_pages;
1348
1349 start = pre_end;
1350 }
1351
1352 /* We mapped them all? */
1353 if (cpa->numpages == cur_pages)
1354 return cur_pages;
1355
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001356 pud = pud_offset(p4d, start);
Juergen Grossf5b28312014-11-03 14:02:02 +01001357 pud_pgprot = pgprot_4k_2_large(pgprot);
Borislav Petkov4b235382013-10-31 17:25:02 +01001358
1359 /*
1360 * Map everything starting from the Gb boundary, possibly with 1G pages
1361 */
Borislav Petkovb8291adc2016-03-29 17:41:58 +02001362 while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
Andi Kleen958f79b2018-08-07 15:09:39 -07001363 set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
1364 canon_pgprot(pud_pgprot))));
Borislav Petkov4b235382013-10-31 17:25:02 +01001365
1366 start += PUD_SIZE;
Matt Flemingedc3b912015-11-27 21:09:31 +00001367 cpa->pfn += PUD_SIZE >> PAGE_SHIFT;
Borislav Petkov4b235382013-10-31 17:25:02 +01001368 cur_pages += PUD_SIZE >> PAGE_SHIFT;
1369 pud++;
1370 }
1371
1372 /* Map trailing leftover */
1373 if (start < end) {
Matt Fleminge535ec02016-09-20 14:26:21 +01001374 long tmp;
Borislav Petkov4b235382013-10-31 17:25:02 +01001375
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001376 pud = pud_offset(p4d, start);
Borislav Petkov4b235382013-10-31 17:25:02 +01001377 if (pud_none(*pud))
1378 if (alloc_pmd_page(pud))
1379 return -1;
1380
1381 tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages,
1382 pud, pgprot);
1383 if (tmp < 0)
1384 return cur_pages;
1385
1386 cur_pages += tmp;
1387 }
1388 return cur_pages;
1389}
Borislav Petkovf3f72962013-10-31 17:25:01 +01001390
1391/*
1392 * Restrictions for kernel page table do not necessarily apply when mapping in
1393 * an alternate PGD.
1394 */
1395static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
1396{
1397 pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
Borislav Petkovf3f72962013-10-31 17:25:01 +01001398 pud_t *pud = NULL; /* shut up gcc */
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001399 p4d_t *p4d;
Borislav Petkov42a54772014-01-18 12:48:16 +01001400 pgd_t *pgd_entry;
Matt Fleminge535ec02016-09-20 14:26:21 +01001401 long ret;
Borislav Petkovf3f72962013-10-31 17:25:01 +01001402
1403 pgd_entry = cpa->pgd + pgd_index(addr);
1404
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001405 if (pgd_none(*pgd_entry)) {
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08001406 p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001407 if (!p4d)
1408 return -1;
1409
1410 set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE));
1411 }
1412
Borislav Petkovf3f72962013-10-31 17:25:01 +01001413 /*
1414 * Allocate a PUD page and hand it down for mapping.
1415 */
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001416 p4d = p4d_offset(pgd_entry, addr);
1417 if (p4d_none(*p4d)) {
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08001418 pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
Borislav Petkovf3f72962013-10-31 17:25:01 +01001419 if (!pud)
1420 return -1;
Andy Lutomirski530dd8d2016-07-22 21:58:08 -07001421
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001422 set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
Borislav Petkovf3f72962013-10-31 17:25:01 +01001423 }
1424
1425 pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
1426 pgprot_val(pgprot) |= pgprot_val(cpa->mask_set);
1427
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001428 ret = populate_pud(cpa, addr, p4d, pgprot);
Borislav Petkov0bb8aee2013-10-31 17:25:05 +01001429 if (ret < 0) {
Andy Lutomirski55920d32016-07-23 09:59:28 -07001430 /*
1431 * Leave the PUD page in place in case some other CPU or thread
1432 * already found it, but remove any useless entries we just
1433 * added to it.
1434 */
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001435 unmap_pud_range(p4d, addr,
Borislav Petkov0bb8aee2013-10-31 17:25:05 +01001436 addr + (cpa->numpages << PAGE_SHIFT));
Borislav Petkov0bb8aee2013-10-31 17:25:05 +01001437 return ret;
1438 }
Borislav Petkov42a54772014-01-18 12:48:16 +01001439
Borislav Petkovf3f72962013-10-31 17:25:01 +01001440 cpa->numpages = ret;
1441 return 0;
1442}
1443
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001444static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
1445 int primary)
1446{
Matt Fleming7fc84422016-04-25 21:06:35 +01001447 if (cpa->pgd) {
1448 /*
1449 * Right now, we only execute this code path when mapping
1450 * the EFI virtual memory map regions, no other users
1451 * provide a ->pgd value. This may change in the future.
1452 */
Borislav Petkov82f07122013-10-31 17:25:07 +01001453 return populate_pgd(cpa, vaddr);
Matt Fleming7fc84422016-04-25 21:06:35 +01001454 }
Borislav Petkov82f07122013-10-31 17:25:07 +01001455
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001456 /*
1457 * Ignore all non primary paths.
1458 */
Jan Beulich405e11332016-02-10 02:03:00 -07001459 if (!primary) {
1460 cpa->numpages = 1;
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001461 return 0;
Jan Beulich405e11332016-02-10 02:03:00 -07001462 }
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001463
1464 /*
1465 * Ignore the NULL PTE for kernel identity mapping, as it is expected
1466 * to have holes.
1467 * Also set numpages to '1' indicating that we processed cpa req for
1468 * one virtual address page and its pfn. TBD: numpages can be set based
1469 * on the initial value and the level returned by lookup_address().
1470 */
1471 if (within(vaddr, PAGE_OFFSET,
1472 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
1473 cpa->numpages = 1;
1474 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
1475 return 0;
Dave Hansen58e65b52018-04-20 15:20:21 -07001476
1477 } else if (__cpa_pfn_in_highmap(cpa->pfn)) {
1478 /* Faults in the highmap are OK, so do not warn: */
1479 return -EFAULT;
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001480 } else {
1481 WARN(1, KERN_WARNING "CPA: called for zero pte. "
1482 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
1483 *cpa->vaddr);
1484
1485 return -EFAULT;
1486 }
1487}
1488
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001489static int __change_page_attr(struct cpa_data *cpa, int primary)
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001490{
Shaohua Lid75586a2008-08-21 10:46:06 +08001491 unsigned long address;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +01001492 int do_split, err;
1493 unsigned int level;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001494 pte_t *kpte, old_pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495
Peter Zijlstra16ebf032018-12-03 18:03:46 +01001496 address = __cpa_addr(cpa, cpa->curpage);
Ingo Molnar97f99fe2008-01-30 13:33:55 +01001497repeat:
Borislav Petkov82f07122013-10-31 17:25:07 +01001498 kpte = _lookup_address_cpa(cpa, address, &level);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 if (!kpte)
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001500 return __cpa_process_fault(cpa, address, primary);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001501
1502 old_pte = *kpte;
Dave Hansendcb32d92016-07-07 17:19:15 -07001503 if (pte_none(old_pte))
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001504 return __cpa_process_fault(cpa, address, primary);
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001505
Thomas Gleixner30551bb2008-01-30 13:34:04 +01001506 if (level == PG_LEVEL_4K) {
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001507 pte_t new_pte;
Arjan van de Ven626c2c92008-02-04 16:48:05 +01001508 pgprot_t new_prot = pte_pgprot(old_pte);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001509 unsigned long pfn = pte_pfn(old_pte);
Thomas Gleixnera72a08a2008-01-30 13:34:07 +01001510
Thomas Gleixner72e458d2008-02-04 16:48:07 +01001511 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
1512 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
Ingo Molnar86f03982008-01-30 13:34:09 +01001513
Thomas Gleixner5c280cf2018-09-17 16:29:12 +02001514 cpa_inc_4k_install();
Thomas Gleixner7af01452019-08-29 00:31:34 +02001515 /* Hand in lpsize = 0 to enforce the protection mechanism */
1516 new_prot = static_protections(new_prot, address, pfn, 1, 0,
Thomas Gleixner40464602018-09-17 16:29:11 +02001517 CPA_PROTECT);
Ingo Molnar86f03982008-01-30 13:34:09 +01001518
Dave Hansend1440b22018-04-06 13:55:02 -07001519 new_prot = pgprot_clear_protnone_bits(new_prot);
Andrea Arcangelia8aed3e2013-02-22 15:11:51 -08001520
1521 /*
Arjan van de Ven626c2c92008-02-04 16:48:05 +01001522 * We need to keep the pfn from the existing PTE,
1523 * after all we're only going to change it's attributes
1524 * not the memory it points to
1525 */
Dave Hansen1a544202018-04-06 13:55:11 -07001526 new_pte = pfn_pte(pfn, new_prot);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001527 cpa->pfn = pfn;
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +01001528 /*
1529 * Do we really change anything ?
1530 */
1531 if (pte_val(old_pte) != pte_val(new_pte)) {
1532 set_pte_atomic(kpte, new_pte);
Shaohua Lid75586a2008-08-21 10:46:06 +08001533 cpa->flags |= CPA_FLUSHTLB;
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +01001534 }
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +01001535 cpa->numpages = 1;
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001536 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 }
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001538
1539 /*
1540 * Check, whether we can keep the large page intact
1541 * and just change the pte:
1542 */
Thomas Gleixner8679de02018-09-17 16:29:08 +02001543 do_split = should_split_large_page(kpte, address, cpa);
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001544 /*
1545 * When the range fits into the existing large page,
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +01001546 * return. cp->numpages and cpa->tlbflush have been updated in
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001547 * try_large_page:
1548 */
Ingo Molnar87f7f8f2008-02-04 16:48:10 +01001549 if (do_split <= 0)
1550 return do_split;
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001551
1552 /*
1553 * We have to split the large page:
1554 */
Borislav Petkov82f07122013-10-31 17:25:07 +01001555 err = split_large_page(cpa, kpte, address);
Peter Zijlstrac0a759a2018-09-19 10:50:18 +02001556 if (!err)
Ingo Molnar87f7f8f2008-02-04 16:48:10 +01001557 goto repeat;
Ingo Molnarbeaff632008-02-04 16:48:09 +01001558
Ingo Molnar87f7f8f2008-02-04 16:48:10 +01001559 return err;
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001560}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001562static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
1563
1564static int cpa_process_alias(struct cpa_data *cpa)
Ingo Molnar44af6c42008-01-30 13:34:03 +01001565{
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001566 struct cpa_data alias_cpa;
Tejun Heo992f4c12009-06-22 11:56:24 +09001567 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
Tejun Heoe933a732009-08-14 15:00:53 +09001568 unsigned long vaddr;
Tejun Heo992f4c12009-06-22 11:56:24 +09001569 int ret;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001570
Yinghai Lu8eb57792012-11-16 19:38:49 -08001571 if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1))
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001572 return 0;
1573
Thomas Gleixnerf34b4392008-02-15 22:17:57 +01001574 /*
1575 * No need to redo, when the primary call touched the direct
1576 * mapping already:
1577 */
Peter Zijlstra16ebf032018-12-03 18:03:46 +01001578 vaddr = __cpa_addr(cpa, cpa->curpage);
Shaohua Lid75586a2008-08-21 10:46:06 +08001579 if (!(within(vaddr, PAGE_OFFSET,
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001580 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001581
Thomas Gleixnerf34b4392008-02-15 22:17:57 +01001582 alias_cpa = *cpa;
Tejun Heo992f4c12009-06-22 11:56:24 +09001583 alias_cpa.vaddr = &laddr;
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001584 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
Peter Zijlstra98bfc9b2018-12-03 18:03:47 +01001585 alias_cpa.curpage = 0;
Shaohua Lid75586a2008-08-21 10:46:06 +08001586
Thomas Gleixnerf34b4392008-02-15 22:17:57 +01001587 ret = __change_page_attr_set_clr(&alias_cpa, 0);
Tejun Heo992f4c12009-06-22 11:56:24 +09001588 if (ret)
1589 return ret;
Thomas Gleixnerf34b4392008-02-15 22:17:57 +01001590 }
Ingo Molnar44af6c42008-01-30 13:34:03 +01001591
Arjan van de Ven488fd992008-01-30 13:34:07 +01001592#ifdef CONFIG_X86_64
Thomas Gleixner08797502008-01-30 13:34:09 +01001593 /*
Tejun Heo992f4c12009-06-22 11:56:24 +09001594 * If the primary call didn't touch the high mapping already
1595 * and the physical address is inside the kernel map, we need
Thomas Gleixner08797502008-01-30 13:34:09 +01001596 * to touch the high mapped kernel as well:
1597 */
Tejun Heo992f4c12009-06-22 11:56:24 +09001598 if (!within(vaddr, (unsigned long)_text, _brk_end) &&
Dave Hansen58e65b52018-04-20 15:20:21 -07001599 __cpa_pfn_in_highmap(cpa->pfn)) {
Tejun Heo992f4c12009-06-22 11:56:24 +09001600 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
1601 __START_KERNEL_map - phys_base;
1602 alias_cpa = *cpa;
1603 alias_cpa.vaddr = &temp_cpa_vaddr;
1604 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
Peter Zijlstra98bfc9b2018-12-03 18:03:47 +01001605 alias_cpa.curpage = 0;
Thomas Gleixner08797502008-01-30 13:34:09 +01001606
Tejun Heo992f4c12009-06-22 11:56:24 +09001607 /*
1608 * The high mapping range is imprecise, so ignore the
1609 * return value.
1610 */
1611 __change_page_attr_set_clr(&alias_cpa, 0);
1612 }
Thomas Gleixner08797502008-01-30 13:34:09 +01001613#endif
Tejun Heo992f4c12009-06-22 11:56:24 +09001614
1615 return 0;
Ingo Molnar44af6c42008-01-30 13:34:03 +01001616}
1617
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001618static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
Thomas Gleixnerff314522008-01-30 13:34:08 +01001619{
Matt Fleminge535ec02016-09-20 14:26:21 +01001620 unsigned long numpages = cpa->numpages;
Peter Zijlstra83b4e392018-12-03 18:03:50 +01001621 unsigned long rempages = numpages;
1622 int ret = 0;
Thomas Gleixnerff314522008-01-30 13:34:08 +01001623
Peter Zijlstra83b4e392018-12-03 18:03:50 +01001624 while (rempages) {
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001625 /*
1626 * Store the remaining nr of pages for the large page
1627 * preservation check.
1628 */
Peter Zijlstra83b4e392018-12-03 18:03:50 +01001629 cpa->numpages = rempages;
Shaohua Lid75586a2008-08-21 10:46:06 +08001630 /* for array changes, we can't use large page */
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001631 if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
Shaohua Lid75586a2008-08-21 10:46:06 +08001632 cpa->numpages = 1;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001633
Christian Borntraeger288cf3c2016-03-15 14:57:33 -07001634 if (!debug_pagealloc_enabled())
Suresh Siddhaad5ca552008-09-23 14:00:42 -07001635 spin_lock(&cpa_lock);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001636 ret = __change_page_attr(cpa, checkalias);
Christian Borntraeger288cf3c2016-03-15 14:57:33 -07001637 if (!debug_pagealloc_enabled())
Suresh Siddhaad5ca552008-09-23 14:00:42 -07001638 spin_unlock(&cpa_lock);
Thomas Gleixnerff314522008-01-30 13:34:08 +01001639 if (ret)
Peter Zijlstra83b4e392018-12-03 18:03:50 +01001640 goto out;
Thomas Gleixnerff314522008-01-30 13:34:08 +01001641
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001642 if (checkalias) {
1643 ret = cpa_process_alias(cpa);
1644 if (ret)
Peter Zijlstra83b4e392018-12-03 18:03:50 +01001645 goto out;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001646 }
1647
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001648 /*
1649 * Adjust the number of pages with the result of the
1650 * CPA operation. Either a large page has been
1651 * preserved or a single page update happened.
1652 */
Peter Zijlstra83b4e392018-12-03 18:03:50 +01001653 BUG_ON(cpa->numpages > rempages || !cpa->numpages);
1654 rempages -= cpa->numpages;
Peter Zijlstra98bfc9b2018-12-03 18:03:47 +01001655 cpa->curpage += cpa->numpages;
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001656 }
Peter Zijlstra83b4e392018-12-03 18:03:50 +01001657
1658out:
1659 /* Restore the original numpages */
1660 cpa->numpages = numpages;
1661 return ret;
Thomas Gleixnerff314522008-01-30 13:34:08 +01001662}
1663
Shaohua Lid75586a2008-08-21 10:46:06 +08001664static int change_page_attr_set_clr(unsigned long *addr, int numpages,
Andi Kleenc9caa022008-03-12 03:53:29 +01001665 pgprot_t mask_set, pgprot_t mask_clr,
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001666 int force_split, int in_flag,
1667 struct page **pages)
Thomas Gleixnerff314522008-01-30 13:34:08 +01001668{
Thomas Gleixner72e458d2008-02-04 16:48:07 +01001669 struct cpa_data cpa;
Ingo Molnarcacf8902008-08-21 13:46:33 +02001670 int ret, cache, checkalias;
Thomas Gleixner331e4062008-02-04 16:48:06 +01001671
Borislav Petkov82f07122013-10-31 17:25:07 +01001672 memset(&cpa, 0, sizeof(cpa));
1673
Thomas Gleixner331e4062008-02-04 16:48:06 +01001674 /*
Dave Hansen39114b72018-04-06 13:55:17 -07001675 * Check, if we are requested to set a not supported
1676 * feature. Clearing non-supported features is OK.
Thomas Gleixner331e4062008-02-04 16:48:06 +01001677 */
1678 mask_set = canon_pgprot(mask_set);
Dave Hansen39114b72018-04-06 13:55:17 -07001679
Andi Kleenc9caa022008-03-12 03:53:29 +01001680 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
Thomas Gleixner331e4062008-02-04 16:48:06 +01001681 return 0;
1682
Thomas Gleixner69b14152008-02-13 11:04:50 +01001683 /* Ensure we are PAGE_SIZE aligned */
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001684 if (in_flag & CPA_ARRAY) {
Shaohua Lid75586a2008-08-21 10:46:06 +08001685 int i;
1686 for (i = 0; i < numpages; i++) {
1687 if (addr[i] & ~PAGE_MASK) {
1688 addr[i] &= PAGE_MASK;
1689 WARN_ON_ONCE(1);
1690 }
1691 }
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001692 } else if (!(in_flag & CPA_PAGES_ARRAY)) {
1693 /*
1694 * in_flag of CPA_PAGES_ARRAY implies it is aligned.
Ingo Molnara97673a2018-12-03 10:47:34 +01001695 * No need to check in that case
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001696 */
1697 if (*addr & ~PAGE_MASK) {
1698 *addr &= PAGE_MASK;
1699 /*
1700 * People should not be passing in unaligned addresses:
1701 */
1702 WARN_ON_ONCE(1);
1703 }
Thomas Gleixner69b14152008-02-13 11:04:50 +01001704 }
1705
Nick Piggin5843d9a2008-08-01 03:15:21 +02001706 /* Must avoid aliasing mappings in the highmem code */
1707 kmap_flush_unused();
1708
Nick Piggindb64fe02008-10-18 20:27:03 -07001709 vm_unmap_aliases();
1710
Thomas Gleixner72e458d2008-02-04 16:48:07 +01001711 cpa.vaddr = addr;
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001712 cpa.pages = pages;
Thomas Gleixner72e458d2008-02-04 16:48:07 +01001713 cpa.numpages = numpages;
1714 cpa.mask_set = mask_set;
1715 cpa.mask_clr = mask_clr;
Shaohua Lid75586a2008-08-21 10:46:06 +08001716 cpa.flags = 0;
1717 cpa.curpage = 0;
Andi Kleenc9caa022008-03-12 03:53:29 +01001718 cpa.force_split = force_split;
Thomas Gleixner72e458d2008-02-04 16:48:07 +01001719
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001720 if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
1721 cpa.flags |= in_flag;
Shaohua Lid75586a2008-08-21 10:46:06 +08001722
Thomas Gleixneraf96e442008-02-15 21:49:46 +01001723 /* No alias checking for _NX bit modifications */
1724 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
Dave Hansenc40a56a2018-08-02 15:58:31 -07001725 /* Has caller explicitly disabled alias checking? */
1726 if (in_flag & CPA_NO_CHECK_ALIAS)
1727 checkalias = 0;
Thomas Gleixneraf96e442008-02-15 21:49:46 +01001728
1729 ret = __change_page_attr_set_clr(&cpa, checkalias);
Thomas Gleixnerff314522008-01-30 13:34:08 +01001730
Thomas Gleixner57a6a462008-01-30 13:34:08 +01001731 /*
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +01001732 * Check whether we really changed something:
1733 */
Shaohua Lid75586a2008-08-21 10:46:06 +08001734 if (!(cpa.flags & CPA_FLUSHTLB))
Shaohua Li1ac2f7d2008-08-04 14:51:24 +08001735 goto out;
Ingo Molnarcacf8902008-08-21 13:46:33 +02001736
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +01001737 /*
Andi Kleen6bb83832008-02-04 16:48:06 +01001738 * No need to flush, when we did not set any of the caching
1739 * attributes:
1740 */
Juergen Grossc06814d2014-11-03 14:01:57 +01001741 cache = !!pgprot2cachemode(mask_set);
Andi Kleen6bb83832008-02-04 16:48:06 +01001742
1743 /*
Peter Zijlstrafce2ce92018-09-19 10:50:22 +02001744 * On error; flush everything to be sure.
Thomas Gleixner57a6a462008-01-30 13:34:08 +01001745 */
Peter Zijlstrafce2ce92018-09-19 10:50:22 +02001746 if (ret) {
Andi Kleen6bb83832008-02-04 16:48:06 +01001747 cpa_flush_all(cache);
Peter Zijlstrafce2ce92018-09-19 10:50:22 +02001748 goto out;
1749 }
1750
Peter Zijlstrafe0937b2018-12-03 18:03:51 +01001751 cpa_flush(&cpa, cache);
Thomas Gleixner76ebd052008-02-09 23:24:09 +01001752out:
Thomas Gleixnerff314522008-01-30 13:34:08 +01001753 return ret;
1754}
1755
Shaohua Lid75586a2008-08-21 10:46:06 +08001756static inline int change_page_attr_set(unsigned long *addr, int numpages,
1757 pgprot_t mask, int array)
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001758{
Shaohua Lid75586a2008-08-21 10:46:06 +08001759 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001760 (array ? CPA_ARRAY : 0), NULL);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001761}
1762
Shaohua Lid75586a2008-08-21 10:46:06 +08001763static inline int change_page_attr_clear(unsigned long *addr, int numpages,
1764 pgprot_t mask, int array)
Thomas Gleixner72932c72008-01-30 13:34:08 +01001765{
Shaohua Lid75586a2008-08-21 10:46:06 +08001766 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001767 (array ? CPA_ARRAY : 0), NULL);
Thomas Gleixner72932c72008-01-30 13:34:08 +01001768}
1769
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07001770static inline int cpa_set_pages_array(struct page **pages, int numpages,
1771 pgprot_t mask)
1772{
1773 return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
1774 CPA_PAGES_ARRAY, pages);
1775}
1776
1777static inline int cpa_clear_pages_array(struct page **pages, int numpages,
1778 pgprot_t mask)
1779{
1780 return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
1781 CPA_PAGES_ARRAY, pages);
1782}
1783
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001784int _set_memory_uc(unsigned long addr, int numpages)
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001785{
Suresh Siddhade33c442008-04-25 17:07:22 -07001786 /*
1787 * for now UC MINUS. see comments in ioremap_nocache()
Luis R. Rodrigueze4b6be332015-05-11 10:15:53 +02001788 * If you really need strong UC use ioremap_uc(), but note
1789 * that you cannot override IO areas with set_memory_*() as
1790 * these helpers cannot work with IO memory.
Suresh Siddhade33c442008-04-25 17:07:22 -07001791 */
Shaohua Lid75586a2008-08-21 10:46:06 +08001792 return change_page_attr_set(&addr, numpages,
Juergen Grossc06814d2014-11-03 14:01:57 +01001793 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1794 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001795}
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001796
1797int set_memory_uc(unsigned long addr, int numpages)
1798{
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001799 int ret;
1800
Suresh Siddhade33c442008-04-25 17:07:22 -07001801 /*
1802 * for now UC MINUS. see comments in ioremap_nocache()
1803 */
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001804 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
Juergen Grosse00c8cc2014-11-03 14:01:59 +01001805 _PAGE_CACHE_MODE_UC_MINUS, NULL);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001806 if (ret)
1807 goto out_err;
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001808
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001809 ret = _set_memory_uc(addr, numpages);
1810 if (ret)
1811 goto out_free;
1812
1813 return 0;
1814
1815out_free:
1816 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1817out_err:
1818 return ret;
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001819}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001820EXPORT_SYMBOL(set_memory_uc);
1821
Peter Zijlstra3c567352018-12-03 18:03:53 +01001822static int _set_memory_array(unsigned long *addr, int numpages,
Juergen Grossc06814d2014-11-03 14:01:57 +01001823 enum page_cache_mode new_type)
Shaohua Lid75586a2008-08-21 10:46:06 +08001824{
Toshi Kani623dffb2015-06-04 18:55:20 +02001825 enum page_cache_mode set_type;
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001826 int i, j;
1827 int ret;
1828
Peter Zijlstra3c567352018-12-03 18:03:53 +01001829 for (i = 0; i < numpages; i++) {
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001830 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
Pauli Nieminen4f646252010-04-01 12:45:01 +00001831 new_type, NULL);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001832 if (ret)
1833 goto out_free;
Shaohua Lid75586a2008-08-21 10:46:06 +08001834 }
1835
Toshi Kani623dffb2015-06-04 18:55:20 +02001836 /* If WC, set to UC- first and then WC */
1837 set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
1838 _PAGE_CACHE_MODE_UC_MINUS : new_type;
1839
Peter Zijlstra3c567352018-12-03 18:03:53 +01001840 ret = change_page_attr_set(addr, numpages,
Toshi Kani623dffb2015-06-04 18:55:20 +02001841 cachemode2pgprot(set_type), 1);
Pauli Nieminen4f646252010-04-01 12:45:01 +00001842
Juergen Grossc06814d2014-11-03 14:01:57 +01001843 if (!ret && new_type == _PAGE_CACHE_MODE_WC)
Peter Zijlstra3c567352018-12-03 18:03:53 +01001844 ret = change_page_attr_set_clr(addr, numpages,
Juergen Grossc06814d2014-11-03 14:01:57 +01001845 cachemode2pgprot(
1846 _PAGE_CACHE_MODE_WC),
Pauli Nieminen4f646252010-04-01 12:45:01 +00001847 __pgprot(_PAGE_CACHE_MASK),
1848 0, CPA_ARRAY, NULL);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001849 if (ret)
1850 goto out_free;
Rene Hermanc5e147c2008-08-22 01:02:20 +02001851
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001852 return 0;
1853
1854out_free:
1855 for (j = 0; j < i; j++)
1856 free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
1857
1858 return ret;
Shaohua Lid75586a2008-08-21 10:46:06 +08001859}
Pauli Nieminen4f646252010-04-01 12:45:01 +00001860
Peter Zijlstra3c567352018-12-03 18:03:53 +01001861int set_memory_array_uc(unsigned long *addr, int numpages)
Pauli Nieminen4f646252010-04-01 12:45:01 +00001862{
Peter Zijlstra3c567352018-12-03 18:03:53 +01001863 return _set_memory_array(addr, numpages, _PAGE_CACHE_MODE_UC_MINUS);
Pauli Nieminen4f646252010-04-01 12:45:01 +00001864}
Shaohua Lid75586a2008-08-21 10:46:06 +08001865EXPORT_SYMBOL(set_memory_array_uc);
1866
Peter Zijlstra3c567352018-12-03 18:03:53 +01001867int set_memory_array_wc(unsigned long *addr, int numpages)
Pauli Nieminen4f646252010-04-01 12:45:01 +00001868{
Peter Zijlstra3c567352018-12-03 18:03:53 +01001869 return _set_memory_array(addr, numpages, _PAGE_CACHE_MODE_WC);
Pauli Nieminen4f646252010-04-01 12:45:01 +00001870}
1871EXPORT_SYMBOL(set_memory_array_wc);
1872
Peter Zijlstra3c567352018-12-03 18:03:53 +01001873int set_memory_array_wt(unsigned long *addr, int numpages)
Toshi Kani623dffb2015-06-04 18:55:20 +02001874{
Peter Zijlstra3c567352018-12-03 18:03:53 +01001875 return _set_memory_array(addr, numpages, _PAGE_CACHE_MODE_WT);
Toshi Kani623dffb2015-06-04 18:55:20 +02001876}
1877EXPORT_SYMBOL_GPL(set_memory_array_wt);
1878
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -07001879int _set_memory_wc(unsigned long addr, int numpages)
1880{
venkatesh.pallipadi@intel.com3869c4a2009-04-09 14:26:50 -07001881 int ret;
Pallipadi, Venkateshbdc63402009-07-30 14:43:19 -07001882
venkatesh.pallipadi@intel.com3869c4a2009-04-09 14:26:50 -07001883 ret = change_page_attr_set(&addr, numpages,
Juergen Grossc06814d2014-11-03 14:01:57 +01001884 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1885 0);
venkatesh.pallipadi@intel.com3869c4a2009-04-09 14:26:50 -07001886 if (!ret) {
Peter Zijlstra5fe26b72018-12-03 18:03:48 +01001887 ret = change_page_attr_set_clr(&addr, numpages,
1888 cachemode2pgprot(_PAGE_CACHE_MODE_WC),
Pallipadi, Venkateshbdc63402009-07-30 14:43:19 -07001889 __pgprot(_PAGE_CACHE_MASK),
1890 0, 0, NULL);
venkatesh.pallipadi@intel.com3869c4a2009-04-09 14:26:50 -07001891 }
1892 return ret;
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -07001893}
1894
1895int set_memory_wc(unsigned long addr, int numpages)
1896{
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001897 int ret;
1898
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001899 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
Juergen Grosse00c8cc2014-11-03 14:01:59 +01001900 _PAGE_CACHE_MODE_WC, NULL);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001901 if (ret)
Toshi Kani623dffb2015-06-04 18:55:20 +02001902 return ret;
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -07001903
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001904 ret = _set_memory_wc(addr, numpages);
1905 if (ret)
Toshi Kani623dffb2015-06-04 18:55:20 +02001906 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001907
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001908 return ret;
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -07001909}
1910EXPORT_SYMBOL(set_memory_wc);
1911
Toshi Kani623dffb2015-06-04 18:55:20 +02001912int _set_memory_wt(unsigned long addr, int numpages)
1913{
1914 return change_page_attr_set(&addr, numpages,
1915 cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0);
1916}
1917
1918int set_memory_wt(unsigned long addr, int numpages)
1919{
1920 int ret;
1921
1922 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1923 _PAGE_CACHE_MODE_WT, NULL);
1924 if (ret)
1925 return ret;
1926
1927 ret = _set_memory_wt(addr, numpages);
1928 if (ret)
1929 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1930
1931 return ret;
1932}
1933EXPORT_SYMBOL_GPL(set_memory_wt);
1934
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001935int _set_memory_wb(unsigned long addr, int numpages)
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001936{
Juergen Grossc06814d2014-11-03 14:01:57 +01001937 /* WB cache mode is hard wired to all cache attribute bits being 0 */
Shaohua Lid75586a2008-08-21 10:46:06 +08001938 return change_page_attr_clear(&addr, numpages,
1939 __pgprot(_PAGE_CACHE_MASK), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001940}
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001941
1942int set_memory_wb(unsigned long addr, int numpages)
1943{
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001944 int ret;
1945
1946 ret = _set_memory_wb(addr, numpages);
1947 if (ret)
1948 return ret;
1949
venkatesh.pallipadi@intel.comc15238d2008-08-20 16:45:51 -07001950 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001951 return 0;
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001952}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001953EXPORT_SYMBOL(set_memory_wb);
1954
Peter Zijlstra3c567352018-12-03 18:03:53 +01001955int set_memory_array_wb(unsigned long *addr, int numpages)
Shaohua Lid75586a2008-08-21 10:46:06 +08001956{
1957 int i;
venkatesh.pallipadi@intel.coma5593e02009-04-09 14:26:48 -07001958 int ret;
1959
Juergen Grossc06814d2014-11-03 14:01:57 +01001960 /* WB cache mode is hard wired to all cache attribute bits being 0 */
Peter Zijlstra3c567352018-12-03 18:03:53 +01001961 ret = change_page_attr_clear(addr, numpages,
venkatesh.pallipadi@intel.coma5593e02009-04-09 14:26:48 -07001962 __pgprot(_PAGE_CACHE_MASK), 1);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001963 if (ret)
1964 return ret;
Shaohua Lid75586a2008-08-21 10:46:06 +08001965
Peter Zijlstra3c567352018-12-03 18:03:53 +01001966 for (i = 0; i < numpages; i++)
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001967 free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
Rene Hermanc5e147c2008-08-22 01:02:20 +02001968
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001969 return 0;
Shaohua Lid75586a2008-08-21 10:46:06 +08001970}
1971EXPORT_SYMBOL(set_memory_array_wb);
1972
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001973int set_memory_x(unsigned long addr, int numpages)
1974{
H. Peter Anvin583140a2009-11-13 15:28:15 -08001975 if (!(__supported_pte_mask & _PAGE_NX))
1976 return 0;
1977
Shaohua Lid75586a2008-08-21 10:46:06 +08001978 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001979}
1980EXPORT_SYMBOL(set_memory_x);
1981
1982int set_memory_nx(unsigned long addr, int numpages)
1983{
H. Peter Anvin583140a2009-11-13 15:28:15 -08001984 if (!(__supported_pte_mask & _PAGE_NX))
1985 return 0;
1986
Shaohua Lid75586a2008-08-21 10:46:06 +08001987 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001988}
1989EXPORT_SYMBOL(set_memory_nx);
1990
1991int set_memory_ro(unsigned long addr, int numpages)
1992{
Shaohua Lid75586a2008-08-21 10:46:06 +08001993 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001994}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001995
1996int set_memory_rw(unsigned long addr, int numpages)
1997{
Shaohua Lid75586a2008-08-21 10:46:06 +08001998 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001999}
Ingo Molnarf62d0f02008-01-30 13:34:07 +01002000
2001int set_memory_np(unsigned long addr, int numpages)
2002{
Shaohua Lid75586a2008-08-21 10:46:06 +08002003 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
Ingo Molnarf62d0f02008-01-30 13:34:07 +01002004}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002005
Dave Hansenc40a56a2018-08-02 15:58:31 -07002006int set_memory_np_noalias(unsigned long addr, int numpages)
2007{
2008 int cpa_flags = CPA_NO_CHECK_ALIAS;
2009
2010 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
2011 __pgprot(_PAGE_PRESENT), 0,
2012 cpa_flags, NULL);
2013}
2014
Andi Kleenc9caa022008-03-12 03:53:29 +01002015int set_memory_4k(unsigned long addr, int numpages)
2016{
Shaohua Lid75586a2008-08-21 10:46:06 +08002017 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07002018 __pgprot(0), 1, 0, NULL);
Andi Kleenc9caa022008-03-12 03:53:29 +01002019}
2020
Dave Hansen39114b72018-04-06 13:55:17 -07002021int set_memory_nonglobal(unsigned long addr, int numpages)
2022{
2023 return change_page_attr_clear(&addr, numpages,
2024 __pgprot(_PAGE_GLOBAL), 0);
2025}
2026
Dave Hanseneac70732018-08-02 15:58:25 -07002027int set_memory_global(unsigned long addr, int numpages)
2028{
2029 return change_page_attr_set(&addr, numpages,
2030 __pgprot(_PAGE_GLOBAL), 0);
2031}
2032
Tom Lendacky77bd2342017-07-17 16:10:19 -05002033static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
2034{
2035 struct cpa_data cpa;
Tom Lendacky77bd2342017-07-17 16:10:19 -05002036 int ret;
2037
Tom Lendackya72ec5a2017-10-20 09:30:48 -05002038 /* Nothing to do if memory encryption is not active */
2039 if (!mem_encrypt_active())
Tom Lendacky77bd2342017-07-17 16:10:19 -05002040 return 0;
2041
2042 /* Should not be working on unaligned addresses */
2043 if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
2044 addr &= PAGE_MASK;
2045
Tom Lendacky77bd2342017-07-17 16:10:19 -05002046 memset(&cpa, 0, sizeof(cpa));
2047 cpa.vaddr = &addr;
2048 cpa.numpages = numpages;
2049 cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
2050 cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);
2051 cpa.pgd = init_mm.pgd;
2052
2053 /* Must avoid aliasing mappings in the highmem code */
2054 kmap_flush_unused();
2055 vm_unmap_aliases();
2056
2057 /*
2058 * Before changing the encryption attribute, we need to flush caches.
2059 */
Peter Zijlstrafe0937b2018-12-03 18:03:51 +01002060 cpa_flush(&cpa, 1);
Tom Lendacky77bd2342017-07-17 16:10:19 -05002061
2062 ret = __change_page_attr_set_clr(&cpa, 1);
2063
2064 /*
Peter Zijlstrafe0937b2018-12-03 18:03:51 +01002065 * After changing the encryption attribute, we need to flush TLBs again
2066 * in case any speculative TLB caching occurred (but no need to flush
2067 * caches again). We could just use cpa_flush_all(), but in case TLB
2068 * flushing gets optimized in the cpa_flush() path use the same logic
2069 * as above.
Tom Lendacky77bd2342017-07-17 16:10:19 -05002070 */
Peter Zijlstrafe0937b2018-12-03 18:03:51 +01002071 cpa_flush(&cpa, 0);
Tom Lendacky77bd2342017-07-17 16:10:19 -05002072
2073 return ret;
2074}
2075
2076int set_memory_encrypted(unsigned long addr, int numpages)
2077{
2078 return __set_memory_enc_dec(addr, numpages, true);
2079}
Tom Lendacky95cf9262017-07-17 16:10:26 -05002080EXPORT_SYMBOL_GPL(set_memory_encrypted);
Tom Lendacky77bd2342017-07-17 16:10:19 -05002081
2082int set_memory_decrypted(unsigned long addr, int numpages)
2083{
2084 return __set_memory_enc_dec(addr, numpages, false);
2085}
Tom Lendacky95cf9262017-07-17 16:10:26 -05002086EXPORT_SYMBOL_GPL(set_memory_decrypted);
Tom Lendacky77bd2342017-07-17 16:10:19 -05002087
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002088int set_pages_uc(struct page *page, int numpages)
2089{
2090 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002091
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01002092 return set_memory_uc(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002093}
2094EXPORT_SYMBOL(set_pages_uc);
2095
Peter Zijlstra3c567352018-12-03 18:03:53 +01002096static int _set_pages_array(struct page **pages, int numpages,
Juergen Grossc06814d2014-11-03 14:01:57 +01002097 enum page_cache_mode new_type)
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002098{
2099 unsigned long start;
2100 unsigned long end;
Toshi Kani623dffb2015-06-04 18:55:20 +02002101 enum page_cache_mode set_type;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002102 int i;
2103 int free_idx;
Pauli Nieminen4f646252010-04-01 12:45:01 +00002104 int ret;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002105
Peter Zijlstra3c567352018-12-03 18:03:53 +01002106 for (i = 0; i < numpages; i++) {
Thomas Hellstrom8523acf2009-08-03 09:25:45 +02002107 if (PageHighMem(pages[i]))
2108 continue;
2109 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002110 end = start + PAGE_SIZE;
Pauli Nieminen4f646252010-04-01 12:45:01 +00002111 if (reserve_memtype(start, end, new_type, NULL))
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002112 goto err_out;
2113 }
2114
Toshi Kani623dffb2015-06-04 18:55:20 +02002115 /* If WC, set to UC- first and then WC */
2116 set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
2117 _PAGE_CACHE_MODE_UC_MINUS : new_type;
2118
Peter Zijlstra3c567352018-12-03 18:03:53 +01002119 ret = cpa_set_pages_array(pages, numpages,
Toshi Kani623dffb2015-06-04 18:55:20 +02002120 cachemode2pgprot(set_type));
Juergen Grossc06814d2014-11-03 14:01:57 +01002121 if (!ret && new_type == _PAGE_CACHE_MODE_WC)
Peter Zijlstra3c567352018-12-03 18:03:53 +01002122 ret = change_page_attr_set_clr(NULL, numpages,
Juergen Grossc06814d2014-11-03 14:01:57 +01002123 cachemode2pgprot(
2124 _PAGE_CACHE_MODE_WC),
Pauli Nieminen4f646252010-04-01 12:45:01 +00002125 __pgprot(_PAGE_CACHE_MASK),
2126 0, CPA_PAGES_ARRAY, pages);
2127 if (ret)
2128 goto err_out;
2129 return 0; /* Success */
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002130err_out:
2131 free_idx = i;
2132 for (i = 0; i < free_idx; i++) {
Thomas Hellstrom8523acf2009-08-03 09:25:45 +02002133 if (PageHighMem(pages[i]))
2134 continue;
2135 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002136 end = start + PAGE_SIZE;
2137 free_memtype(start, end);
2138 }
2139 return -EINVAL;
2140}
Pauli Nieminen4f646252010-04-01 12:45:01 +00002141
Peter Zijlstra3c567352018-12-03 18:03:53 +01002142int set_pages_array_uc(struct page **pages, int numpages)
Pauli Nieminen4f646252010-04-01 12:45:01 +00002143{
Peter Zijlstra3c567352018-12-03 18:03:53 +01002144 return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_UC_MINUS);
Pauli Nieminen4f646252010-04-01 12:45:01 +00002145}
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002146EXPORT_SYMBOL(set_pages_array_uc);
2147
Peter Zijlstra3c567352018-12-03 18:03:53 +01002148int set_pages_array_wc(struct page **pages, int numpages)
Pauli Nieminen4f646252010-04-01 12:45:01 +00002149{
Peter Zijlstra3c567352018-12-03 18:03:53 +01002150 return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WC);
Pauli Nieminen4f646252010-04-01 12:45:01 +00002151}
2152EXPORT_SYMBOL(set_pages_array_wc);
2153
Peter Zijlstra3c567352018-12-03 18:03:53 +01002154int set_pages_array_wt(struct page **pages, int numpages)
Toshi Kani623dffb2015-06-04 18:55:20 +02002155{
Peter Zijlstra3c567352018-12-03 18:03:53 +01002156 return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WT);
Toshi Kani623dffb2015-06-04 18:55:20 +02002157}
2158EXPORT_SYMBOL_GPL(set_pages_array_wt);
2159
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002160int set_pages_wb(struct page *page, int numpages)
2161{
2162 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002163
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01002164 return set_memory_wb(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002165}
2166EXPORT_SYMBOL(set_pages_wb);
2167
Peter Zijlstra3c567352018-12-03 18:03:53 +01002168int set_pages_array_wb(struct page **pages, int numpages)
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002169{
2170 int retval;
2171 unsigned long start;
2172 unsigned long end;
2173 int i;
2174
Juergen Grossc06814d2014-11-03 14:01:57 +01002175 /* WB cache mode is hard wired to all cache attribute bits being 0 */
Peter Zijlstra3c567352018-12-03 18:03:53 +01002176 retval = cpa_clear_pages_array(pages, numpages,
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002177 __pgprot(_PAGE_CACHE_MASK));
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07002178 if (retval)
2179 return retval;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002180
Peter Zijlstra3c567352018-12-03 18:03:53 +01002181 for (i = 0; i < numpages; i++) {
Thomas Hellstrom8523acf2009-08-03 09:25:45 +02002182 if (PageHighMem(pages[i]))
2183 continue;
2184 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002185 end = start + PAGE_SIZE;
2186 free_memtype(start, end);
2187 }
2188
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07002189 return 0;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002190}
2191EXPORT_SYMBOL(set_pages_array_wb);
2192
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002193int set_pages_x(struct page *page, int numpages)
2194{
2195 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002196
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01002197 return set_memory_x(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002198}
2199EXPORT_SYMBOL(set_pages_x);
2200
2201int set_pages_nx(struct page *page, int numpages)
2202{
2203 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002204
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01002205 return set_memory_nx(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002206}
2207EXPORT_SYMBOL(set_pages_nx);
2208
2209int set_pages_ro(struct page *page, int numpages)
2210{
2211 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002212
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01002213 return set_memory_ro(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002214}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002215
2216int set_pages_rw(struct page *page, int numpages)
2217{
2218 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002219
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01002220 return set_memory_rw(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002221}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002222
Ingo Molnarf62d0f02008-01-30 13:34:07 +01002223static int __set_pages_p(struct page *page, int numpages)
2224{
Shaohua Lid75586a2008-08-21 10:46:06 +08002225 unsigned long tempaddr = (unsigned long) page_address(page);
2226 struct cpa_data cpa = { .vaddr = &tempaddr,
Borislav Petkov82f07122013-10-31 17:25:07 +01002227 .pgd = NULL,
Thomas Gleixner72e458d2008-02-04 16:48:07 +01002228 .numpages = numpages,
2229 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
Shaohua Lid75586a2008-08-21 10:46:06 +08002230 .mask_clr = __pgprot(0),
2231 .flags = 0};
Thomas Gleixner72932c72008-01-30 13:34:08 +01002232
Suresh Siddha55121b42008-09-23 14:00:40 -07002233 /*
2234 * No alias checking needed for setting present flag. otherwise,
2235 * we may need to break large pages for 64-bit kernel text
2236 * mappings (this adds to complexity if we want to do this from
2237 * atomic context especially). Let's keep it simple!
2238 */
2239 return __change_page_attr_set_clr(&cpa, 0);
Ingo Molnarf62d0f02008-01-30 13:34:07 +01002240}
2241
2242static int __set_pages_np(struct page *page, int numpages)
2243{
Shaohua Lid75586a2008-08-21 10:46:06 +08002244 unsigned long tempaddr = (unsigned long) page_address(page);
2245 struct cpa_data cpa = { .vaddr = &tempaddr,
Borislav Petkov82f07122013-10-31 17:25:07 +01002246 .pgd = NULL,
Thomas Gleixner72e458d2008-02-04 16:48:07 +01002247 .numpages = numpages,
2248 .mask_set = __pgprot(0),
Shaohua Lid75586a2008-08-21 10:46:06 +08002249 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2250 .flags = 0};
Thomas Gleixner72932c72008-01-30 13:34:08 +01002251
Suresh Siddha55121b42008-09-23 14:00:40 -07002252 /*
2253 * No alias checking needed for setting not present flag. otherwise,
2254 * we may need to break large pages for 64-bit kernel text
2255 * mappings (this adds to complexity if we want to do this from
2256 * atomic context especially). Let's keep it simple!
2257 */
2258 return __change_page_attr_set_clr(&cpa, 0);
Ingo Molnarf62d0f02008-01-30 13:34:07 +01002259}
2260
Rick Edgecombed253ca02019-04-25 17:11:34 -07002261int set_direct_map_invalid_noflush(struct page *page)
2262{
2263 return __set_pages_np(page, 1);
2264}
2265
2266int set_direct_map_default_noflush(struct page *page)
2267{
2268 return __set_pages_p(page, 1);
2269}
2270
Joonsoo Kim031bc572014-12-12 16:55:52 -08002271void __kernel_map_pages(struct page *page, int numpages, int enable)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272{
2273 if (PageHighMem(page))
2274 return;
Ingo Molnar9f4c8152008-01-30 13:33:41 +01002275 if (!enable) {
Ingo Molnarf9b84042006-06-27 02:54:49 -07002276 debug_check_no_locks_freed(page_address(page),
2277 numpages * PAGE_SIZE);
Ingo Molnar9f4c8152008-01-30 13:33:41 +01002278 }
Ingo Molnarde5097c2006-01-09 15:59:21 -08002279
Ingo Molnar9f4c8152008-01-30 13:33:41 +01002280 /*
Ingo Molnarf8d84062008-02-13 14:09:53 +01002281 * The return value is ignored as the calls cannot fail.
Suresh Siddha55121b42008-09-23 14:00:40 -07002282 * Large pages for identity mappings are not used at boot time
2283 * and hence no memory allocations during large page split.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 */
Ingo Molnarf62d0f02008-01-30 13:34:07 +01002285 if (enable)
2286 __set_pages_p(page, numpages);
2287 else
2288 __set_pages_np(page, numpages);
Ingo Molnar9f4c8152008-01-30 13:33:41 +01002289
2290 /*
Ingo Molnare4b71dc2008-01-30 13:34:04 +01002291 * We should perform an IPI and flush all tlbs,
Sebastian Andrzej Siewiorf77084d2018-10-17 12:34:32 +02002292 * but that can deadlock->flush only current cpu.
2293 * Preemption needs to be disabled around __flush_tlb_all() due to
2294 * CR3 reload in __native_flush_tlb().
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 */
Sebastian Andrzej Siewiorf77084d2018-10-17 12:34:32 +02002296 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 __flush_tlb_all();
Sebastian Andrzej Siewiorf77084d2018-10-17 12:34:32 +02002298 preempt_enable();
Boris Ostrovsky26564602013-04-11 13:59:52 -04002299
2300 arch_flush_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301}
Rafael J. Wysocki8a235ef2008-02-20 01:47:44 +01002302
2303#ifdef CONFIG_HIBERNATION
Rafael J. Wysocki8a235ef2008-02-20 01:47:44 +01002304bool kernel_page_present(struct page *page)
2305{
2306 unsigned int level;
2307 pte_t *pte;
2308
2309 if (PageHighMem(page))
2310 return false;
2311
2312 pte = lookup_address((unsigned long)page_address(page), &level);
2313 return (pte_val(*pte) & _PAGE_PRESENT);
2314}
Rafael J. Wysocki8a235ef2008-02-20 01:47:44 +01002315#endif /* CONFIG_HIBERNATION */
2316
Sai Praneeth Prakhya7e0dabd2018-11-29 18:12:23 +01002317int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
2318 unsigned numpages, unsigned long page_flags)
Borislav Petkov82f07122013-10-31 17:25:07 +01002319{
2320 int retval = -EINVAL;
2321
2322 struct cpa_data cpa = {
2323 .vaddr = &address,
2324 .pfn = pfn,
2325 .pgd = pgd,
2326 .numpages = numpages,
2327 .mask_set = __pgprot(0),
2328 .mask_clr = __pgprot(0),
2329 .flags = 0,
2330 };
2331
Sai Praneeth Prakhya7e0dabd2018-11-29 18:12:23 +01002332 WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
2333
Borislav Petkov82f07122013-10-31 17:25:07 +01002334 if (!(__supported_pte_mask & _PAGE_NX))
2335 goto out;
2336
2337 if (!(page_flags & _PAGE_NX))
2338 cpa.mask_clr = __pgprot(_PAGE_NX);
2339
Sai Praneeth15f003d2016-02-17 12:36:04 +00002340 if (!(page_flags & _PAGE_RW))
2341 cpa.mask_clr = __pgprot(_PAGE_RW);
2342
Tom Lendacky21729f82017-07-17 16:10:07 -05002343 if (!(page_flags & _PAGE_ENC))
2344 cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);
2345
Borislav Petkov82f07122013-10-31 17:25:07 +01002346 cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);
2347
2348 retval = __change_page_attr_set_clr(&cpa, 0);
2349 __flush_tlb_all();
2350
2351out:
2352 return retval;
2353}
2354
Arjan van de Vend1028a12008-01-30 13:34:07 +01002355/*
Sai Praneeth Prakhya7e0dabd2018-11-29 18:12:23 +01002356 * __flush_tlb_all() flushes mappings only on current CPU and hence this
2357 * function shouldn't be used in an SMP environment. Presently, it's used only
2358 * during boot (way before smp_init()) by EFI subsystem and hence is ok.
2359 */
2360int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
2361 unsigned long numpages)
2362{
2363 int retval;
2364
2365 /*
2366 * The typical sequence for unmapping is to find a pte through
2367 * lookup_address_in_pgd() (ideally, it should never return NULL because
2368 * the address is already mapped) and change it's protections. As pfn is
2369 * the *target* of a mapping, it's not useful while unmapping.
2370 */
2371 struct cpa_data cpa = {
2372 .vaddr = &address,
2373 .pfn = 0,
2374 .pgd = pgd,
2375 .numpages = numpages,
2376 .mask_set = __pgprot(0),
2377 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2378 .flags = 0,
2379 };
2380
2381 WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
2382
2383 retval = __change_page_attr_set_clr(&cpa, 0);
2384 __flush_tlb_all();
2385
2386 return retval;
2387}
2388
2389/*
Arjan van de Vend1028a12008-01-30 13:34:07 +01002390 * The testcases use internal knowledge of the implementation that shouldn't
2391 * be exposed to the rest of the kernel. Include these directly here.
2392 */
2393#ifdef CONFIG_CPA_DEBUG
2394#include "pageattr-test.c"
2395#endif