blob: a1004dec98ea7dafa404a1899ceb359a878c2b38 [file] [log] [blame]
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Thanks to Ben LaHaise for precious feedback.
Ingo Molnar9f4c8152008-01-30 13:33:41 +01004 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/highmem.h>
Ingo Molnar81922062008-01-30 13:34:04 +01006#include <linux/bootmem.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +01007#include <linux/sched.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +01008#include <linux/mm.h>
Thomas Gleixner76ebd052008-02-09 23:24:09 +01009#include <linux/interrupt.h>
Thomas Gleixneree7ae7a2008-04-17 17:40:45 +020010#include <linux/seq_file.h>
11#include <linux/debugfs.h>
Tejun Heoe59a1bb2009-06-22 11:56:24 +090012#include <linux/pfn.h>
Tejun Heo8c4bfc62009-07-04 08:10:59 +090013#include <linux/percpu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/gfp.h>
Matthieu Castet5bd5a452010-11-16 22:31:26 +010015#include <linux/pci.h>
Stephen Rothwelld6472302015-06-02 19:01:38 +100016#include <linux/vmalloc.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010017
Ingo Molnar66441bd2017-01-27 10:27:10 +010018#include <asm/e820/api.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/processor.h>
20#include <asm/tlbflush.h>
Dave Jonesf8af0952006-01-06 00:12:10 -080021#include <asm/sections.h>
Jeremy Fitzhardinge93dbda72009-02-26 17:35:44 -080022#include <asm/setup.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080023#include <linux/uaccess.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010024#include <asm/pgalloc.h>
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +010025#include <asm/proto.h>
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070026#include <asm/pat.h>
Laura Abbottd1163652017-05-08 15:58:11 -070027#include <asm/set_memory.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Ingo Molnar9df84992008-02-04 16:48:09 +010029/*
30 * The current flushing context - we pass it instead of 5 arguments:
31 */
Thomas Gleixner72e458d2008-02-04 16:48:07 +010032struct cpa_data {
Shaohua Lid75586a2008-08-21 10:46:06 +080033 unsigned long *vaddr;
Borislav Petkov0fd64c22013-10-31 17:25:00 +010034 pgd_t *pgd;
Thomas Gleixner72e458d2008-02-04 16:48:07 +010035 pgprot_t mask_set;
36 pgprot_t mask_clr;
Matt Fleming74256372016-01-29 11:36:10 +000037 unsigned long numpages;
Shaohua Lid75586a2008-08-21 10:46:06 +080038 int flags;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +010039 unsigned long pfn;
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +020040 unsigned force_split : 1,
41 force_static_prot : 1;
Shaohua Lid75586a2008-08-21 10:46:06 +080042 int curpage;
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -070043 struct page **pages;
Thomas Gleixner72e458d2008-02-04 16:48:07 +010044};
45
Thomas Gleixner40464602018-09-17 16:29:11 +020046enum cpa_warn {
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +020047 CPA_CONFLICT,
Thomas Gleixner40464602018-09-17 16:29:11 +020048 CPA_PROTECT,
49 CPA_DETECT,
50};
51
52static const int cpa_warn_level = CPA_PROTECT;
53
Suresh Siddhaad5ca552008-09-23 14:00:42 -070054/*
55 * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
56 * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
57 * entries change the page attribute in parallel to some other cpu
58 * splitting a large page entry along with changing the attribute.
59 */
60static DEFINE_SPINLOCK(cpa_lock);
61
Shaohua Lid75586a2008-08-21 10:46:06 +080062#define CPA_FLUSHTLB 1
63#define CPA_ARRAY 2
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -070064#define CPA_PAGES_ARRAY 4
Dave Hansenc40a56a2018-08-02 15:58:31 -070065#define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */
Shaohua Lid75586a2008-08-21 10:46:06 +080066
Thomas Gleixner65280e62008-05-05 16:35:21 +020067#ifdef CONFIG_PROC_FS
Andi Kleence0c0e52008-05-02 11:46:49 +020068static unsigned long direct_pages_count[PG_LEVEL_NUM];
69
Thomas Gleixner65280e62008-05-05 16:35:21 +020070void update_page_count(int level, unsigned long pages)
Andi Kleence0c0e52008-05-02 11:46:49 +020071{
Andi Kleence0c0e52008-05-02 11:46:49 +020072 /* Protect against CPA */
Andrea Arcangelia79e53d2011-02-16 15:45:22 -080073 spin_lock(&pgd_lock);
Andi Kleence0c0e52008-05-02 11:46:49 +020074 direct_pages_count[level] += pages;
Andrea Arcangelia79e53d2011-02-16 15:45:22 -080075 spin_unlock(&pgd_lock);
Andi Kleence0c0e52008-05-02 11:46:49 +020076}
77
Thomas Gleixner65280e62008-05-05 16:35:21 +020078static void split_page_count(int level)
79{
Dave Jonesc9e0d392016-01-11 12:04:28 -050080 if (direct_pages_count[level] == 0)
81 return;
82
Thomas Gleixner65280e62008-05-05 16:35:21 +020083 direct_pages_count[level]--;
84 direct_pages_count[level - 1] += PTRS_PER_PTE;
85}
86
Alexey Dobriyane1759c22008-10-15 23:50:22 +040087void arch_report_meminfo(struct seq_file *m)
Thomas Gleixner65280e62008-05-05 16:35:21 +020088{
Hugh Dickinsb9c3bfc2008-11-06 12:05:40 +000089 seq_printf(m, "DirectMap4k: %8lu kB\n",
Hugh Dickinsa06de632008-08-15 13:58:32 +010090 direct_pages_count[PG_LEVEL_4K] << 2);
91#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
Hugh Dickinsb9c3bfc2008-11-06 12:05:40 +000092 seq_printf(m, "DirectMap2M: %8lu kB\n",
Hugh Dickinsa06de632008-08-15 13:58:32 +010093 direct_pages_count[PG_LEVEL_2M] << 11);
94#else
Hugh Dickinsb9c3bfc2008-11-06 12:05:40 +000095 seq_printf(m, "DirectMap4M: %8lu kB\n",
Hugh Dickinsa06de632008-08-15 13:58:32 +010096 direct_pages_count[PG_LEVEL_2M] << 12);
97#endif
Hugh Dickinsa06de632008-08-15 13:58:32 +010098 if (direct_gbpages)
Hugh Dickinsb9c3bfc2008-11-06 12:05:40 +000099 seq_printf(m, "DirectMap1G: %8lu kB\n",
Hugh Dickinsa06de632008-08-15 13:58:32 +0100100 direct_pages_count[PG_LEVEL_1G] << 20);
Thomas Gleixner65280e62008-05-05 16:35:21 +0200101}
102#else
103static inline void split_page_count(int level) { }
104#endif
105
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200106#ifdef CONFIG_X86_CPA_STATISTICS
107
108static unsigned long cpa_1g_checked;
109static unsigned long cpa_1g_sameprot;
110static unsigned long cpa_1g_preserved;
111static unsigned long cpa_2m_checked;
112static unsigned long cpa_2m_sameprot;
113static unsigned long cpa_2m_preserved;
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200114static unsigned long cpa_4k_install;
115
116static inline void cpa_inc_1g_checked(void)
117{
118 cpa_1g_checked++;
119}
120
121static inline void cpa_inc_2m_checked(void)
122{
123 cpa_2m_checked++;
124}
125
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200126static inline void cpa_inc_4k_install(void)
127{
128 cpa_4k_install++;
129}
130
131static inline void cpa_inc_lp_sameprot(int level)
132{
133 if (level == PG_LEVEL_1G)
134 cpa_1g_sameprot++;
135 else
136 cpa_2m_sameprot++;
137}
138
139static inline void cpa_inc_lp_preserved(int level)
140{
141 if (level == PG_LEVEL_1G)
142 cpa_1g_preserved++;
143 else
144 cpa_2m_preserved++;
145}
146
147static int cpastats_show(struct seq_file *m, void *p)
148{
149 seq_printf(m, "1G pages checked: %16lu\n", cpa_1g_checked);
150 seq_printf(m, "1G pages sameprot: %16lu\n", cpa_1g_sameprot);
151 seq_printf(m, "1G pages preserved: %16lu\n", cpa_1g_preserved);
152 seq_printf(m, "2M pages checked: %16lu\n", cpa_2m_checked);
153 seq_printf(m, "2M pages sameprot: %16lu\n", cpa_2m_sameprot);
154 seq_printf(m, "2M pages preserved: %16lu\n", cpa_2m_preserved);
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200155 seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install);
156 return 0;
157}
158
159static int cpastats_open(struct inode *inode, struct file *file)
160{
161 return single_open(file, cpastats_show, NULL);
162}
163
164static const struct file_operations cpastats_fops = {
165 .open = cpastats_open,
166 .read = seq_read,
167 .llseek = seq_lseek,
168 .release = single_release,
169};
170
171static int __init cpa_stats_init(void)
172{
173 debugfs_create_file("cpa_stats", S_IRUSR, arch_debugfs_dir, NULL,
174 &cpastats_fops);
175 return 0;
176}
177late_initcall(cpa_stats_init);
178#else
179static inline void cpa_inc_1g_checked(void) { }
180static inline void cpa_inc_2m_checked(void) { }
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200181static inline void cpa_inc_4k_install(void) { }
182static inline void cpa_inc_lp_sameprot(int level) { }
183static inline void cpa_inc_lp_preserved(int level) { }
184#endif
185
186
Dave Hansen58e65b52018-04-20 15:20:21 -0700187static inline int
188within(unsigned long addr, unsigned long start, unsigned long end)
189{
190 return addr >= start && addr < end;
191}
192
193static inline int
194within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
195{
196 return addr >= start && addr <= end;
197}
198
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100199#ifdef CONFIG_X86_64
200
201static inline unsigned long highmap_start_pfn(void)
202{
Alexander Duyckfc8d7822012-11-16 13:57:13 -0800203 return __pa_symbol(_text) >> PAGE_SHIFT;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100204}
205
206static inline unsigned long highmap_end_pfn(void)
207{
Thomas Garnier4ff53082016-06-15 12:05:45 -0700208 /* Do not reference physical address outside the kernel. */
209 return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100210}
211
Dave Hansen58e65b52018-04-20 15:20:21 -0700212static bool __cpa_pfn_in_highmap(unsigned long pfn)
213{
214 /*
215 * Kernel text has an alias mapping at a high address, known
216 * here as "highmap".
217 */
218 return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn());
219}
220
221#else
222
223static bool __cpa_pfn_in_highmap(unsigned long pfn)
224{
225 /* There is no highmap on 32-bit */
226 return false;
227}
228
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100229#endif
230
Arjan van de Vened724be2008-01-30 13:34:04 +0100231/*
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100232 * Flushing functions
233 */
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +0100234
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +0100235/**
236 * clflush_cache_range - flush a cache range with clflush
Wanpeng Li9efc31b2012-06-10 10:50:52 +0800237 * @vaddr: virtual start address
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +0100238 * @size: number of bytes to flush
239 *
Ross Zwisler8b80fd82014-02-26 12:06:50 -0700240 * clflushopt is an unordered instruction which needs fencing with mfence or
241 * sfence to avoid ordering issues.
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +0100242 */
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100243void clflush_cache_range(void *vaddr, unsigned int size)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100244{
Chris Wilson1f1a89a2016-01-08 09:55:33 +0000245 const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
246 void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
Ross Zwisler6c434d62015-05-11 10:15:49 +0200247 void *vend = vaddr + size;
Chris Wilson1f1a89a2016-01-08 09:55:33 +0000248
249 if (p >= vend)
250 return;
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100251
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +0100252 mb();
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100253
Chris Wilson1f1a89a2016-01-08 09:55:33 +0000254 for (; p < vend; p += clflush_size)
Ross Zwisler6c434d62015-05-11 10:15:49 +0200255 clflushopt(p);
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100256
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +0100257 mb();
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100258}
Eric Anholte517a5e2009-09-10 17:48:48 -0700259EXPORT_SYMBOL_GPL(clflush_cache_range);
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100260
Dan Williamsf2b61252017-05-29 23:00:34 -0700261void arch_invalidate_pmem(void *addr, size_t size)
262{
263 clflush_cache_range(addr, size);
264}
265EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
266
Thomas Gleixneraf1e6842008-01-30 13:34:08 +0100267static void __cpa_flush_all(void *arg)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100268{
Andi Kleen6bb83832008-02-04 16:48:06 +0100269 unsigned long cache = (unsigned long)arg;
270
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100271 /*
272 * Flush all to work around Errata in early athlons regarding
273 * large page flushing.
274 */
275 __flush_tlb_all();
276
venkatesh.pallipadi@intel.com0b827532009-05-22 13:23:37 -0700277 if (cache && boot_cpu_data.x86 >= 4)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100278 wbinvd();
279}
280
Andi Kleen6bb83832008-02-04 16:48:06 +0100281static void cpa_flush_all(unsigned long cache)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100282{
Dave Hansend2479a32018-04-20 15:20:19 -0700283 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100284
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200285 on_each_cpu(__cpa_flush_all, (void *) cache, 1);
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100286}
287
Peter Zijlstra47e262a2018-09-19 10:50:23 +0200288static bool __cpa_flush_range(unsigned long start, int numpages, int cache)
289{
290 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
291
292 WARN_ON(PAGE_ALIGN(start) != start);
293
Peter Zijlstra7904ba82018-09-19 10:50:24 +0200294 if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
Peter Zijlstra47e262a2018-09-19 10:50:23 +0200295 cpa_flush_all(cache);
296 return true;
297 }
298
299 flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages);
300
301 return !cache;
302}
303
Andi Kleen6bb83832008-02-04 16:48:06 +0100304static void cpa_flush_range(unsigned long start, int numpages, int cache)
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100305{
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100306 unsigned int i, level;
307 unsigned long addr;
308
Peter Zijlstra47e262a2018-09-19 10:50:23 +0200309 if (__cpa_flush_range(start, numpages, cache))
Andi Kleen6bb83832008-02-04 16:48:06 +0100310 return;
311
Thomas Gleixner3b233e52008-01-30 13:34:08 +0100312 /*
313 * We only need to flush on one CPU,
314 * clflush is a MESI-coherent instruction that
315 * will cause all other CPUs to flush the same
316 * cachelines:
317 */
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100318 for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
319 pte_t *pte = lookup_address(addr, &level);
320
321 /*
322 * Only flush present addresses:
323 */
Thomas Gleixner7bfb72e2008-02-04 16:48:08 +0100324 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100325 clflush_cache_range((void *) addr, PAGE_SIZE);
326 }
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100327}
328
Peter Zijlstraa7295fd2018-09-19 10:50:20 +0200329static void cpa_flush_array(unsigned long baddr, unsigned long *start,
330 int numpages, int cache,
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700331 int in_flags, struct page **pages)
Shaohua Lid75586a2008-08-21 10:46:06 +0800332{
333 unsigned int i, level;
Shaohua Lid75586a2008-08-21 10:46:06 +0800334
Peter Zijlstra47e262a2018-09-19 10:50:23 +0200335 if (__cpa_flush_range(baddr, numpages, cache))
Shaohua Lid75586a2008-08-21 10:46:06 +0800336 return;
337
Shaohua Lid75586a2008-08-21 10:46:06 +0800338 /*
339 * We only need to flush on one CPU,
340 * clflush is a MESI-coherent instruction that
341 * will cause all other CPUs to flush the same
342 * cachelines:
343 */
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700344 for (i = 0; i < numpages; i++) {
345 unsigned long addr;
346 pte_t *pte;
347
348 if (in_flags & CPA_PAGES_ARRAY)
349 addr = (unsigned long)page_address(pages[i]);
350 else
351 addr = start[i];
352
353 pte = lookup_address(addr, &level);
Shaohua Lid75586a2008-08-21 10:46:06 +0800354
355 /*
356 * Only flush present addresses:
357 */
358 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700359 clflush_cache_range((void *)addr, PAGE_SIZE);
Shaohua Lid75586a2008-08-21 10:46:06 +0800360 }
361}
362
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200363static bool overlaps(unsigned long r1_start, unsigned long r1_end,
364 unsigned long r2_start, unsigned long r2_end)
365{
366 return (r1_start <= r2_end && r1_end >= r2_start) ||
367 (r2_start <= r1_end && r2_end >= r1_start);
368}
369
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200370#ifdef CONFIG_PCI_BIOS
371/*
372 * The BIOS area between 640k and 1Mb needs to be executable for PCI BIOS
373 * based config access (CONFIG_PCI_GOBIOS) support.
374 */
375#define BIOS_PFN PFN_DOWN(BIOS_BEGIN)
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200376#define BIOS_PFN_END PFN_DOWN(BIOS_END - 1)
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200377
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200378static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200379{
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200380 if (pcibios_enabled && overlaps(spfn, epfn, BIOS_PFN, BIOS_PFN_END))
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200381 return _PAGE_NX;
382 return 0;
383}
384#else
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200385static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200386{
387 return 0;
388}
389#endif
390
391/*
392 * The .rodata section needs to be read-only. Using the pfn catches all
393 * aliases. This also includes __ro_after_init, so do not enforce until
394 * kernel_set_to_readonly is true.
395 */
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200396static pgprotval_t protect_rodata(unsigned long spfn, unsigned long epfn)
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200397{
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200398 unsigned long epfn_ro, spfn_ro = PFN_DOWN(__pa_symbol(__start_rodata));
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200399
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200400 /*
401 * Note: __end_rodata is at page aligned and not inclusive, so
402 * subtract 1 to get the last enforced PFN in the rodata area.
403 */
404 epfn_ro = PFN_DOWN(__pa_symbol(__end_rodata)) - 1;
405
406 if (kernel_set_to_readonly && overlaps(spfn, epfn, spfn_ro, epfn_ro))
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200407 return _PAGE_RW;
408 return 0;
409}
410
411/*
412 * Protect kernel text against becoming non executable by forbidding
413 * _PAGE_NX. This protects only the high kernel mapping (_text -> _etext)
414 * out of which the kernel actually executes. Do not protect the low
415 * mapping.
416 *
417 * This does not cover __inittext since that is gone after boot.
418 */
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200419static pgprotval_t protect_kernel_text(unsigned long start, unsigned long end)
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200420{
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200421 unsigned long t_end = (unsigned long)_etext - 1;
422 unsigned long t_start = (unsigned long)_text;
423
424 if (overlaps(start, end, t_start, t_end))
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200425 return _PAGE_NX;
426 return 0;
427}
428
429#if defined(CONFIG_X86_64)
430/*
431 * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
432 * kernel text mappings for the large page aligned text, rodata sections
433 * will be always read-only. For the kernel identity mappings covering the
434 * holes caused by this alignment can be anything that user asks.
435 *
436 * This will preserve the large page mappings for kernel text/data at no
437 * extra cost.
438 */
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200439static pgprotval_t protect_kernel_text_ro(unsigned long start,
440 unsigned long end)
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200441{
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200442 unsigned long t_end = (unsigned long)__end_rodata_hpage_align - 1;
443 unsigned long t_start = (unsigned long)_text;
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200444 unsigned int level;
445
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200446 if (!kernel_set_to_readonly || !overlaps(start, end, t_start, t_end))
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200447 return 0;
448 /*
449 * Don't enforce the !RW mapping for the kernel text mapping, if
450 * the current mapping is already using small page mapping. No
451 * need to work hard to preserve large page mappings in this case.
452 *
453 * This also fixes the Linux Xen paravirt guest boot failure caused
454 * by unexpected read-only mappings for kernel identity
455 * mappings. In this paravirt guest case, the kernel text mapping
456 * and the kernel identity mapping share the same page-table pages,
457 * so the protections for kernel text and identity mappings have to
458 * be the same.
459 */
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200460 if (lookup_address(start, &level) && (level != PG_LEVEL_4K))
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200461 return _PAGE_RW;
462 return 0;
463}
464#else
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200465static pgprotval_t protect_kernel_text_ro(unsigned long start,
466 unsigned long end)
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200467{
468 return 0;
469}
470#endif
471
Thomas Gleixner40464602018-09-17 16:29:11 +0200472static inline bool conflicts(pgprot_t prot, pgprotval_t val)
473{
474 return (pgprot_val(prot) & ~val) != pgprot_val(prot);
475}
476
477static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
478 unsigned long start, unsigned long end,
479 unsigned long pfn, const char *txt)
480{
481 static const char *lvltxt[] = {
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200482 [CPA_CONFLICT] = "conflict",
Thomas Gleixner40464602018-09-17 16:29:11 +0200483 [CPA_PROTECT] = "protect",
484 [CPA_DETECT] = "detect",
485 };
486
487 if (warnlvl > cpa_warn_level || !conflicts(prot, val))
488 return;
489
490 pr_warn("CPA %8s %10s: 0x%016lx - 0x%016lx PFN %lx req %016llx prevent %016llx\n",
491 lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot),
492 (unsigned long long)val);
493}
494
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100495/*
Arjan van de Vened724be2008-01-30 13:34:04 +0100496 * Certain areas of memory on x86 require very specific protection flags,
497 * for example the BIOS area or kernel text. Callers don't always get this
498 * right (again, ioremap() on BIOS memory is not uncommon) so this function
499 * checks and fixes these known static required protection bits.
500 */
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200501static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
Thomas Gleixner40464602018-09-17 16:29:11 +0200502 unsigned long pfn, unsigned long npg,
503 int warnlvl)
Arjan van de Vened724be2008-01-30 13:34:04 +0100504{
Thomas Gleixner40464602018-09-17 16:29:11 +0200505 pgprotval_t forbidden, res;
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200506 unsigned long end;
Arjan van de Vened724be2008-01-30 13:34:04 +0100507
Thomas Gleixner69c31e62018-09-17 16:29:13 +0200508 /*
509 * There is no point in checking RW/NX conflicts when the requested
510 * mapping is setting the page !PRESENT.
511 */
512 if (!(pgprot_val(prot) & _PAGE_PRESENT))
513 return prot;
514
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200515 /* Operate on the virtual address */
Thomas Gleixner91ee8f52018-09-17 16:29:10 +0200516 end = start + npg * PAGE_SIZE - 1;
Thomas Gleixner40464602018-09-17 16:29:11 +0200517
518 res = protect_kernel_text(start, end);
519 check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
520 forbidden = res;
521
522 res = protect_kernel_text_ro(start, end);
523 check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
524 forbidden |= res;
Arjan van de Vened724be2008-01-30 13:34:04 +0100525
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200526 /* Check the PFN directly */
Thomas Gleixner40464602018-09-17 16:29:11 +0200527 res = protect_pci_bios(pfn, pfn + npg - 1);
528 check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX");
529 forbidden |= res;
530
531 res = protect_rodata(pfn, pfn + npg - 1);
532 check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO");
533 forbidden |= res;
Arjan van de Vencc0f21b2008-02-04 16:48:05 +0100534
Thomas Gleixnerafd79692018-09-17 16:29:09 +0200535 return __pgprot(pgprot_val(prot) & ~forbidden);
Ingo Molnar687c4822008-01-30 13:34:04 +0100536}
537
Matt Fleming426e34c2013-12-06 21:13:04 +0000538/*
539 * Lookup the page table entry for a virtual address in a specific pgd.
540 * Return a pointer to the entry and the level of the mapping.
541 */
542pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
543 unsigned int *level)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100544{
Kirill A. Shutemov45478332017-03-17 21:55:12 +0300545 p4d_t *p4d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 pud_t *pud;
547 pmd_t *pmd;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100548
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100549 *level = PG_LEVEL_NONE;
550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 if (pgd_none(*pgd))
552 return NULL;
Ingo Molnar9df84992008-02-04 16:48:09 +0100553
Kirill A. Shutemov45478332017-03-17 21:55:12 +0300554 p4d = p4d_offset(pgd, address);
555 if (p4d_none(*p4d))
556 return NULL;
557
558 *level = PG_LEVEL_512G;
559 if (p4d_large(*p4d) || !p4d_present(*p4d))
560 return (pte_t *)p4d;
561
562 pud = pud_offset(p4d, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 if (pud_none(*pud))
564 return NULL;
Andi Kleenc2f71ee2008-02-04 16:48:09 +0100565
566 *level = PG_LEVEL_1G;
567 if (pud_large(*pud) || !pud_present(*pud))
568 return (pte_t *)pud;
569
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 pmd = pmd_offset(pud, address);
571 if (pmd_none(*pmd))
572 return NULL;
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100573
574 *level = PG_LEVEL_2M;
Thomas Gleixner9a14aef2008-02-04 16:48:07 +0100575 if (pmd_large(*pmd) || !pmd_present(*pmd))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 return (pte_t *)pmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100578 *level = PG_LEVEL_4K;
Ingo Molnar9df84992008-02-04 16:48:09 +0100579
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100580 return pte_offset_kernel(pmd, address);
581}
Borislav Petkov0fd64c22013-10-31 17:25:00 +0100582
583/*
584 * Lookup the page table entry for a virtual address. Return a pointer
585 * to the entry and the level of the mapping.
586 *
587 * Note: We return pud and pmd either when the entry is marked large
588 * or when the present bit is not set. Otherwise we would return a
589 * pointer to a nonexisting mapping.
590 */
591pte_t *lookup_address(unsigned long address, unsigned int *level)
592{
Thomas Gleixner8679de02018-09-17 16:29:08 +0200593 return lookup_address_in_pgd(pgd_offset_k(address), address, level);
Borislav Petkov0fd64c22013-10-31 17:25:00 +0100594}
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200595EXPORT_SYMBOL_GPL(lookup_address);
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100596
Borislav Petkov0fd64c22013-10-31 17:25:00 +0100597static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
598 unsigned int *level)
599{
Thomas Gleixner8679de02018-09-17 16:29:08 +0200600 if (cpa->pgd)
Matt Fleming426e34c2013-12-06 21:13:04 +0000601 return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
Borislav Petkov0fd64c22013-10-31 17:25:00 +0100602 address, level);
603
Thomas Gleixner8679de02018-09-17 16:29:08 +0200604 return lookup_address(address, level);
Borislav Petkov0fd64c22013-10-31 17:25:00 +0100605}
606
Ingo Molnar9df84992008-02-04 16:48:09 +0100607/*
Juergen Gross792230c2014-11-28 11:53:56 +0100608 * Lookup the PMD entry for a virtual address. Return a pointer to the entry
609 * or NULL if not present.
610 */
611pmd_t *lookup_pmd_address(unsigned long address)
612{
613 pgd_t *pgd;
Kirill A. Shutemov45478332017-03-17 21:55:12 +0300614 p4d_t *p4d;
Juergen Gross792230c2014-11-28 11:53:56 +0100615 pud_t *pud;
616
617 pgd = pgd_offset_k(address);
618 if (pgd_none(*pgd))
619 return NULL;
620
Kirill A. Shutemov45478332017-03-17 21:55:12 +0300621 p4d = p4d_offset(pgd, address);
622 if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d))
623 return NULL;
624
625 pud = pud_offset(p4d, address);
Juergen Gross792230c2014-11-28 11:53:56 +0100626 if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
627 return NULL;
628
629 return pmd_offset(pud, address);
630}
631
632/*
Dave Hansend7656532013-01-22 13:24:33 -0800633 * This is necessary because __pa() does not work on some
634 * kinds of memory, like vmalloc() or the alloc_remap()
635 * areas on 32-bit NUMA systems. The percpu areas can
636 * end up in this kind of memory, for instance.
637 *
638 * This could be optimized, but it is only intended to be
639 * used at inititalization time, and keeping it
640 * unoptimized should increase the testing coverage for
641 * the more obscure platforms.
642 */
643phys_addr_t slow_virt_to_phys(void *__virt_addr)
644{
645 unsigned long virt_addr = (unsigned long)__virt_addr;
Dexuan Cuibf70e552016-02-25 01:58:12 -0800646 phys_addr_t phys_addr;
647 unsigned long offset;
Dave Hansend7656532013-01-22 13:24:33 -0800648 enum pg_level level;
Dave Hansend7656532013-01-22 13:24:33 -0800649 pte_t *pte;
650
651 pte = lookup_address(virt_addr, &level);
652 BUG_ON(!pte);
Toshi Kani34437e62015-09-17 12:24:20 -0600653
Dexuan Cuibf70e552016-02-25 01:58:12 -0800654 /*
655 * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
656 * before being left-shifted PAGE_SHIFT bits -- this trick is to
657 * make 32-PAE kernel work correctly.
658 */
Toshi Kani34437e62015-09-17 12:24:20 -0600659 switch (level) {
660 case PG_LEVEL_1G:
Dexuan Cuibf70e552016-02-25 01:58:12 -0800661 phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
Toshi Kani34437e62015-09-17 12:24:20 -0600662 offset = virt_addr & ~PUD_PAGE_MASK;
663 break;
664 case PG_LEVEL_2M:
Dexuan Cuibf70e552016-02-25 01:58:12 -0800665 phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
Toshi Kani34437e62015-09-17 12:24:20 -0600666 offset = virt_addr & ~PMD_PAGE_MASK;
667 break;
668 default:
Dexuan Cuibf70e552016-02-25 01:58:12 -0800669 phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
Toshi Kani34437e62015-09-17 12:24:20 -0600670 offset = virt_addr & ~PAGE_MASK;
671 }
672
673 return (phys_addr_t)(phys_addr | offset);
Dave Hansend7656532013-01-22 13:24:33 -0800674}
675EXPORT_SYMBOL_GPL(slow_virt_to_phys);
676
677/*
Ingo Molnar9df84992008-02-04 16:48:09 +0100678 * Set the new pmd in all the pgds we know about:
679 */
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100680static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100681{
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100682 /* change init_mm */
683 set_pte_atomic(kpte, pte);
Ingo Molnar44af6c42008-01-30 13:34:03 +0100684#ifdef CONFIG_X86_32
Ingo Molnare4b71dc2008-01-30 13:34:04 +0100685 if (!SHARED_KERNEL_PMD) {
Ingo Molnar44af6c42008-01-30 13:34:03 +0100686 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +0100688 list_for_each_entry(page, &pgd_list, lru) {
Ingo Molnar44af6c42008-01-30 13:34:03 +0100689 pgd_t *pgd;
Kirill A. Shutemov45478332017-03-17 21:55:12 +0300690 p4d_t *p4d;
Ingo Molnar44af6c42008-01-30 13:34:03 +0100691 pud_t *pud;
692 pmd_t *pmd;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100693
Ingo Molnar44af6c42008-01-30 13:34:03 +0100694 pgd = (pgd_t *)page_address(page) + pgd_index(address);
Kirill A. Shutemov45478332017-03-17 21:55:12 +0300695 p4d = p4d_offset(pgd, address);
696 pud = pud_offset(p4d, address);
Ingo Molnar44af6c42008-01-30 13:34:03 +0100697 pmd = pmd_offset(pud, address);
698 set_pte_atomic((pte_t *)pmd, pte);
699 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 }
Ingo Molnar44af6c42008-01-30 13:34:03 +0100701#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702}
703
Dave Hansend1440b22018-04-06 13:55:02 -0700704static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot)
705{
706 /*
707 * _PAGE_GLOBAL means "global page" for present PTEs.
708 * But, it is also used to indicate _PAGE_PROTNONE
709 * for non-present PTEs.
710 *
711 * This ensures that a _PAGE_GLOBAL PTE going from
712 * present to non-present is not confused as
713 * _PAGE_PROTNONE.
714 */
715 if (!(pgprot_val(prot) & _PAGE_PRESENT))
716 pgprot_val(prot) &= ~_PAGE_GLOBAL;
717
718 return prot;
719}
720
Thomas Gleixner8679de02018-09-17 16:29:08 +0200721static int __should_split_large_page(pte_t *kpte, unsigned long address,
722 struct cpa_data *cpa)
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100723{
Thomas Gleixner585948f42018-09-17 16:29:17 +0200724 unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn;
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200725 pgprot_t old_prot, new_prot, req_prot, chk_prot;
Thomas Gleixner8679de02018-09-17 16:29:08 +0200726 pte_t new_pte, old_pte, *tmp;
Dave Hansenf3c4fbb2013-01-22 13:24:32 -0800727 enum pg_level level;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100728
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100729 /*
730 * Check for races, another CPU might have split this page
731 * up already:
732 */
Borislav Petkov82f07122013-10-31 17:25:07 +0100733 tmp = _lookup_address_cpa(cpa, address, &level);
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100734 if (tmp != kpte)
Thomas Gleixner8679de02018-09-17 16:29:08 +0200735 return 1;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100736
737 switch (level) {
738 case PG_LEVEL_2M:
Toshi Kani3a191092015-09-17 12:24:22 -0600739 old_prot = pmd_pgprot(*(pmd_t *)kpte);
740 old_pfn = pmd_pfn(*(pmd_t *)kpte);
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200741 cpa_inc_2m_checked();
Toshi Kani3a191092015-09-17 12:24:22 -0600742 break;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100743 case PG_LEVEL_1G:
Toshi Kani3a191092015-09-17 12:24:22 -0600744 old_prot = pud_pgprot(*(pud_t *)kpte);
745 old_pfn = pud_pfn(*(pud_t *)kpte);
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200746 cpa_inc_1g_checked();
Dave Hansenf3c4fbb2013-01-22 13:24:32 -0800747 break;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100748 default:
Thomas Gleixner8679de02018-09-17 16:29:08 +0200749 return -EINVAL;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100750 }
751
Toshi Kani3a191092015-09-17 12:24:22 -0600752 psize = page_level_size(level);
753 pmask = page_level_mask(level);
754
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100755 /*
756 * Calculate the number of pages, which fit into this large
757 * page starting at address:
758 */
Thomas Gleixner8679de02018-09-17 16:29:08 +0200759 lpaddr = (address + psize) & pmask;
760 numpages = (lpaddr - address) >> PAGE_SHIFT;
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100761 if (numpages < cpa->numpages)
762 cpa->numpages = numpages;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100763
764 /*
765 * We are safe now. Check whether the new pgprot is the same:
Juergen Grossf5b28312014-11-03 14:02:02 +0100766 * Convert protection attributes to 4k-format, as cpa->mask* are set
767 * up accordingly.
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100768 */
769 old_pte = *kpte;
Dave Hansen606c7192018-04-06 13:55:04 -0700770 /* Clear PSE (aka _PAGE_PAT) and move PAT bit to correct position */
Toshi Kani55696b12015-09-17 12:24:24 -0600771 req_prot = pgprot_large_2_4k(old_prot);
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100772
matthieu castet64edc8e2010-11-16 22:30:27 +0100773 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
774 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100775
776 /*
Juergen Grossf5b28312014-11-03 14:02:02 +0100777 * req_prot is in format of 4k pages. It must be converted to large
778 * page format: the caching mode includes the PAT bit located at
779 * different bit positions in the two formats.
780 */
781 req_prot = pgprot_4k_2_large(req_prot);
Dave Hansend1440b22018-04-06 13:55:02 -0700782 req_prot = pgprot_clear_protnone_bits(req_prot);
Andrea Arcangelif76cfa32013-04-10 15:28:25 +0200783 if (pgprot_val(req_prot) & _PAGE_PRESENT)
Dave Hansend1440b22018-04-06 13:55:02 -0700784 pgprot_val(req_prot) |= _PAGE_PSE;
Andrea Arcangelia8aed3e2013-02-22 15:11:51 -0800785
786 /*
Thomas Gleixner8679de02018-09-17 16:29:08 +0200787 * old_pfn points to the large page base pfn. So we need to add the
788 * offset of the virtual address:
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100789 */
Toshi Kani3a191092015-09-17 12:24:22 -0600790 pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100791 cpa->pfn = pfn;
792
Thomas Gleixner8679de02018-09-17 16:29:08 +0200793 /*
794 * Calculate the large page base address and the number of 4K pages
795 * in the large page
796 */
797 lpaddr = address & pmask;
798 numpages = psize >> PAGE_SHIFT;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100799
800 /*
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200801 * Sanity check that the existing mapping is correct versus the static
802 * protections. static_protections() guards against !PRESENT, so no
803 * extra conditional required here.
804 */
805 chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
806 CPA_CONFLICT);
807
808 if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
809 /*
810 * Split the large page and tell the split code to
811 * enforce static protections.
812 */
813 cpa->force_static_prot = 1;
814 return 1;
815 }
816
817 /*
Thomas Gleixner1c4b4062018-09-17 16:29:15 +0200818 * Optimization: If the requested pgprot is the same as the current
819 * pgprot, then the large page can be preserved and no updates are
820 * required independent of alignment and length of the requested
821 * range. The above already established that the current pgprot is
822 * correct, which in consequence makes the requested pgprot correct
823 * as well if it is the same. The static protection scan below will
824 * not come to a different conclusion.
825 */
826 if (pgprot_val(req_prot) == pgprot_val(old_prot)) {
827 cpa_inc_lp_sameprot(level);
828 return 0;
829 }
830
831 /*
Thomas Gleixner585948f42018-09-17 16:29:17 +0200832 * If the requested range does not cover the full page, split it up
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100833 */
Thomas Gleixner8679de02018-09-17 16:29:08 +0200834 if (address != lpaddr || cpa->numpages != numpages)
835 return 1;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100836
Thomas Gleixner585948f42018-09-17 16:29:17 +0200837 /*
838 * Check whether the requested pgprot is conflicting with a static
839 * protection requirement in the large page.
840 */
841 new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
842 CPA_DETECT);
843
844 /*
845 * If there is a conflict, split the large page.
846 *
847 * There used to be a 4k wise evaluation trying really hard to
848 * preserve the large pages, but experimentation has shown, that this
849 * does not help at all. There might be corner cases which would
850 * preserve one large page occasionally, but it's really not worth the
851 * extra code and cycles for the common case.
852 */
853 if (pgprot_val(req_prot) != pgprot_val(new_prot))
854 return 1;
855
Thomas Gleixner8679de02018-09-17 16:29:08 +0200856 /* All checks passed. Update the large page mapping. */
857 new_pte = pfn_pte(old_pfn, new_prot);
858 __set_pmd_pte(kpte, address, new_pte);
859 cpa->flags |= CPA_FLUSHTLB;
Thomas Gleixner5c280cf2018-09-17 16:29:12 +0200860 cpa_inc_lp_preserved(level);
Thomas Gleixner8679de02018-09-17 16:29:08 +0200861 return 0;
862}
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100863
Thomas Gleixner8679de02018-09-17 16:29:08 +0200864static int should_split_large_page(pte_t *kpte, unsigned long address,
865 struct cpa_data *cpa)
866{
867 int do_split;
868
869 if (cpa->force_split)
870 return 1;
871
872 spin_lock(&pgd_lock);
873 do_split = __should_split_large_page(kpte, address, cpa);
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800874 spin_unlock(&pgd_lock);
Ingo Molnar9df84992008-02-04 16:48:09 +0100875
Ingo Molnarbeaff632008-02-04 16:48:09 +0100876 return do_split;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100877}
878
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200879static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
880 pgprot_t ref_prot, unsigned long address,
881 unsigned long size)
882{
883 unsigned int npg = PFN_DOWN(size);
884 pgprot_t prot;
885
886 /*
887 * If should_split_large_page() discovered an inconsistent mapping,
888 * remove the invalid protection in the split mapping.
889 */
890 if (!cpa->force_static_prot)
891 goto set;
892
893 prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT);
894
895 if (pgprot_val(prot) == pgprot_val(ref_prot))
896 goto set;
897
898 /*
899 * If this is splitting a PMD, fix it up. PUD splits cannot be
900 * fixed trivially as that would require to rescan the newly
901 * installed PMD mappings after returning from split_large_page()
902 * so an eventual further split can allocate the necessary PTE
903 * pages. Warn for now and revisit it in case this actually
904 * happens.
905 */
906 if (size == PAGE_SIZE)
907 ref_prot = prot;
908 else
909 pr_warn_once("CPA: Cannot fixup static protections for PUD split\n");
910set:
911 set_pte(pte, pfn_pte(pfn, ref_prot));
912}
913
Borislav Petkov59528862013-03-21 18:16:57 +0100914static int
Borislav Petkov82f07122013-10-31 17:25:07 +0100915__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
916 struct page *base)
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100917{
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200918 unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1;
Borislav Petkov59528862013-03-21 18:16:57 +0100919 pte_t *pbase = (pte_t *)page_address(base);
Ingo Molnar86f03982008-01-30 13:34:09 +0100920 unsigned int i, level;
Ingo Molnar9df84992008-02-04 16:48:09 +0100921 pgprot_t ref_prot;
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200922 pte_t *tmp;
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100923
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800924 spin_lock(&pgd_lock);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100925 /*
926 * Check for races, another CPU might have split this page
927 * up for us already:
928 */
Borislav Petkov82f07122013-10-31 17:25:07 +0100929 tmp = _lookup_address_cpa(cpa, address, &level);
Wen Congyangae9aae92013-02-22 16:33:04 -0800930 if (tmp != kpte) {
931 spin_unlock(&pgd_lock);
932 return 1;
933 }
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100934
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700935 paravirt_alloc_pte(&init_mm, page_to_pfn(base));
Juergen Grossf5b28312014-11-03 14:02:02 +0100936
Toshi Kanid551aaa2015-09-17 12:24:23 -0600937 switch (level) {
938 case PG_LEVEL_2M:
939 ref_prot = pmd_pgprot(*(pmd_t *)kpte);
Dave Hansen606c7192018-04-06 13:55:04 -0700940 /*
941 * Clear PSE (aka _PAGE_PAT) and move
942 * PAT bit to correct position.
943 */
Juergen Grossf5b28312014-11-03 14:02:02 +0100944 ref_prot = pgprot_large_2_4k(ref_prot);
Toshi Kanid551aaa2015-09-17 12:24:23 -0600945 ref_pfn = pmd_pfn(*(pmd_t *)kpte);
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200946 lpaddr = address & PMD_MASK;
947 lpinc = PAGE_SIZE;
Toshi Kanid551aaa2015-09-17 12:24:23 -0600948 break;
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100949
Toshi Kanid551aaa2015-09-17 12:24:23 -0600950 case PG_LEVEL_1G:
951 ref_prot = pud_pgprot(*(pud_t *)kpte);
952 ref_pfn = pud_pfn(*(pud_t *)kpte);
Andi Kleenf07333f2008-02-04 16:48:09 +0100953 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200954 lpaddr = address & PUD_MASK;
955 lpinc = PMD_SIZE;
Andrea Arcangelia8aed3e2013-02-22 15:11:51 -0800956 /*
Toshi Kanid551aaa2015-09-17 12:24:23 -0600957 * Clear the PSE flags if the PRESENT flag is not set
Andrea Arcangelia8aed3e2013-02-22 15:11:51 -0800958 * otherwise pmd_present/pmd_huge will return true
959 * even on a non present pmd.
960 */
Toshi Kanid551aaa2015-09-17 12:24:23 -0600961 if (!(pgprot_val(ref_prot) & _PAGE_PRESENT))
Andrea Arcangelia8aed3e2013-02-22 15:11:51 -0800962 pgprot_val(ref_prot) &= ~_PAGE_PSE;
Toshi Kanid551aaa2015-09-17 12:24:23 -0600963 break;
964
965 default:
966 spin_unlock(&pgd_lock);
967 return 1;
Andi Kleenf07333f2008-02-04 16:48:09 +0100968 }
Andi Kleenf07333f2008-02-04 16:48:09 +0100969
Dave Hansend1440b22018-04-06 13:55:02 -0700970 ref_prot = pgprot_clear_protnone_bits(ref_prot);
Andrea Arcangelia8aed3e2013-02-22 15:11:51 -0800971
972 /*
Thomas Gleixner63c1dcf2008-02-04 16:48:05 +0100973 * Get the target pfn from the original entry:
974 */
Toshi Kanid551aaa2015-09-17 12:24:23 -0600975 pfn = ref_pfn;
Thomas Gleixnerf61c5ba2018-09-17 16:29:14 +0200976 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
977 split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100978
Sai Praneeth2c66e24d2015-10-16 16:20:27 -0700979 if (virt_addr_valid(address)) {
980 unsigned long pfn = PFN_DOWN(__pa(address));
981
982 if (pfn_range_is_mapped(pfn, pfn + 1))
983 split_page_count(level);
984 }
Yinghai Luf361a452008-07-10 20:38:26 -0700985
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100986 /*
Ingo Molnar07a66d72009-02-20 08:04:13 +0100987 * Install the new, split up pagetable.
Huang, Ying4c881ca2008-01-30 13:34:04 +0100988 *
Ingo Molnar07a66d72009-02-20 08:04:13 +0100989 * We use the standard kernel pagetable protections for the new
990 * pagetable protections, the actual ptes set above control the
991 * primary protection behavior:
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100992 */
Ingo Molnar07a66d72009-02-20 08:04:13 +0100993 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
Ingo Molnar211b3d02009-03-10 22:31:03 +0100994
995 /*
Peter Zijlstrac0a759a2018-09-19 10:50:18 +0200996 * Do a global flush tlb after splitting the large page
997 * and before we do the actual change page attribute in the PTE.
Ingo Molnar211b3d02009-03-10 22:31:03 +0100998 *
Peter Zijlstrac0a759a2018-09-19 10:50:18 +0200999 * Without this, we violate the TLB application note, that says:
1000 * "The TLBs may contain both ordinary and large-page
1001 * translations for a 4-KByte range of linear addresses. This
1002 * may occur if software modifies the paging structures so that
1003 * the page size used for the address range changes. If the two
1004 * translations differ with respect to page frame or attributes
1005 * (e.g., permissions), processor behavior is undefined and may
1006 * be implementation-specific."
1007 *
1008 * We do this global tlb flush inside the cpa_lock, so that we
1009 * don't allow any other cpu, with stale tlb entries change the
1010 * page attribute in parallel, that also falls into the
1011 * just split large page entry.
Ingo Molnar211b3d02009-03-10 22:31:03 +01001012 */
Peter Zijlstrac0a759a2018-09-19 10:50:18 +02001013 flush_tlb_all();
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001014 spin_unlock(&pgd_lock);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +01001015
Ingo Molnarbb5c2db2008-01-30 13:33:56 +01001016 return 0;
1017}
1018
Borislav Petkov82f07122013-10-31 17:25:07 +01001019static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
1020 unsigned long address)
Wen Congyangae9aae92013-02-22 16:33:04 -08001021{
Wen Congyangae9aae92013-02-22 16:33:04 -08001022 struct page *base;
1023
Christian Borntraeger288cf3c2016-03-15 14:57:33 -07001024 if (!debug_pagealloc_enabled())
Wen Congyangae9aae92013-02-22 16:33:04 -08001025 spin_unlock(&cpa_lock);
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08001026 base = alloc_pages(GFP_KERNEL, 0);
Christian Borntraeger288cf3c2016-03-15 14:57:33 -07001027 if (!debug_pagealloc_enabled())
Wen Congyangae9aae92013-02-22 16:33:04 -08001028 spin_lock(&cpa_lock);
1029 if (!base)
1030 return -ENOMEM;
1031
Borislav Petkov82f07122013-10-31 17:25:07 +01001032 if (__split_large_page(cpa, kpte, address, base))
Wen Congyangae9aae92013-02-22 16:33:04 -08001033 __free_page(base);
1034
1035 return 0;
1036}
1037
Borislav Petkov52a628f2013-10-31 17:25:06 +01001038static bool try_to_free_pte_page(pte_t *pte)
1039{
1040 int i;
1041
1042 for (i = 0; i < PTRS_PER_PTE; i++)
1043 if (!pte_none(pte[i]))
1044 return false;
1045
1046 free_page((unsigned long)pte);
1047 return true;
1048}
1049
1050static bool try_to_free_pmd_page(pmd_t *pmd)
1051{
1052 int i;
1053
1054 for (i = 0; i < PTRS_PER_PMD; i++)
1055 if (!pmd_none(pmd[i]))
1056 return false;
1057
1058 free_page((unsigned long)pmd);
1059 return true;
1060}
1061
1062static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
1063{
1064 pte_t *pte = pte_offset_kernel(pmd, start);
1065
1066 while (start < end) {
1067 set_pte(pte, __pte(0));
1068
1069 start += PAGE_SIZE;
1070 pte++;
1071 }
1072
1073 if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
1074 pmd_clear(pmd);
1075 return true;
1076 }
1077 return false;
1078}
1079
1080static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
1081 unsigned long start, unsigned long end)
1082{
1083 if (unmap_pte_range(pmd, start, end))
1084 if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
1085 pud_clear(pud);
1086}
1087
1088static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
1089{
1090 pmd_t *pmd = pmd_offset(pud, start);
1091
1092 /*
1093 * Not on a 2MB page boundary?
1094 */
1095 if (start & (PMD_SIZE - 1)) {
1096 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
1097 unsigned long pre_end = min_t(unsigned long, end, next_page);
1098
1099 __unmap_pmd_range(pud, pmd, start, pre_end);
1100
1101 start = pre_end;
1102 pmd++;
1103 }
1104
1105 /*
1106 * Try to unmap in 2M chunks.
1107 */
1108 while (end - start >= PMD_SIZE) {
1109 if (pmd_large(*pmd))
1110 pmd_clear(pmd);
1111 else
1112 __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
1113
1114 start += PMD_SIZE;
1115 pmd++;
1116 }
1117
1118 /*
1119 * 4K leftovers?
1120 */
1121 if (start < end)
1122 return __unmap_pmd_range(pud, pmd, start, end);
1123
1124 /*
1125 * Try again to free the PMD page if haven't succeeded above.
1126 */
1127 if (!pud_none(*pud))
1128 if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
1129 pud_clear(pud);
1130}
Borislav Petkov0bb8aee2013-10-31 17:25:05 +01001131
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001132static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
Borislav Petkov0bb8aee2013-10-31 17:25:05 +01001133{
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001134 pud_t *pud = pud_offset(p4d, start);
Borislav Petkov0bb8aee2013-10-31 17:25:05 +01001135
1136 /*
1137 * Not on a GB page boundary?
1138 */
1139 if (start & (PUD_SIZE - 1)) {
1140 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
1141 unsigned long pre_end = min_t(unsigned long, end, next_page);
1142
1143 unmap_pmd_range(pud, start, pre_end);
1144
1145 start = pre_end;
1146 pud++;
1147 }
1148
1149 /*
1150 * Try to unmap in 1G chunks?
1151 */
1152 while (end - start >= PUD_SIZE) {
1153
1154 if (pud_large(*pud))
1155 pud_clear(pud);
1156 else
1157 unmap_pmd_range(pud, start, start + PUD_SIZE);
1158
1159 start += PUD_SIZE;
1160 pud++;
1161 }
1162
1163 /*
1164 * 2M leftovers?
1165 */
1166 if (start < end)
1167 unmap_pmd_range(pud, start, end);
1168
1169 /*
1170 * No need to try to free the PUD page because we'll free it in
1171 * populate_pgd's error path
1172 */
1173}
1174
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001175static int alloc_pte_page(pmd_t *pmd)
1176{
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08001177 pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001178 if (!pte)
1179 return -1;
1180
1181 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
1182 return 0;
1183}
1184
Borislav Petkov4b235382013-10-31 17:25:02 +01001185static int alloc_pmd_page(pud_t *pud)
1186{
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08001187 pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
Borislav Petkov4b235382013-10-31 17:25:02 +01001188 if (!pmd)
1189 return -1;
1190
1191 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
1192 return 0;
1193}
1194
Borislav Petkovc6b6f362013-10-31 17:25:04 +01001195static void populate_pte(struct cpa_data *cpa,
1196 unsigned long start, unsigned long end,
1197 unsigned num_pages, pmd_t *pmd, pgprot_t pgprot)
1198{
1199 pte_t *pte;
1200
1201 pte = pte_offset_kernel(pmd, start);
1202
Dave Hansend1440b22018-04-06 13:55:02 -07001203 pgprot = pgprot_clear_protnone_bits(pgprot);
Sai Praneeth3976301502016-02-17 12:35:56 +00001204
Borislav Petkovc6b6f362013-10-31 17:25:04 +01001205 while (num_pages-- && start < end) {
Matt Flemingedc3b912015-11-27 21:09:31 +00001206 set_pte(pte, pfn_pte(cpa->pfn, pgprot));
Borislav Petkovc6b6f362013-10-31 17:25:04 +01001207
1208 start += PAGE_SIZE;
Matt Flemingedc3b912015-11-27 21:09:31 +00001209 cpa->pfn++;
Borislav Petkovc6b6f362013-10-31 17:25:04 +01001210 pte++;
1211 }
1212}
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001213
Matt Fleminge535ec02016-09-20 14:26:21 +01001214static long populate_pmd(struct cpa_data *cpa,
1215 unsigned long start, unsigned long end,
1216 unsigned num_pages, pud_t *pud, pgprot_t pgprot)
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001217{
Matt Fleminge535ec02016-09-20 14:26:21 +01001218 long cur_pages = 0;
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001219 pmd_t *pmd;
Juergen Grossf5b28312014-11-03 14:02:02 +01001220 pgprot_t pmd_pgprot;
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001221
1222 /*
1223 * Not on a 2M boundary?
1224 */
1225 if (start & (PMD_SIZE - 1)) {
1226 unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
1227 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
1228
1229 pre_end = min_t(unsigned long, pre_end, next_page);
1230 cur_pages = (pre_end - start) >> PAGE_SHIFT;
1231 cur_pages = min_t(unsigned int, num_pages, cur_pages);
1232
1233 /*
1234 * Need a PTE page?
1235 */
1236 pmd = pmd_offset(pud, start);
1237 if (pmd_none(*pmd))
1238 if (alloc_pte_page(pmd))
1239 return -1;
1240
1241 populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
1242
1243 start = pre_end;
1244 }
1245
1246 /*
1247 * We mapped them all?
1248 */
1249 if (num_pages == cur_pages)
1250 return cur_pages;
1251
Juergen Grossf5b28312014-11-03 14:02:02 +01001252 pmd_pgprot = pgprot_4k_2_large(pgprot);
1253
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001254 while (end - start >= PMD_SIZE) {
1255
1256 /*
1257 * We cannot use a 1G page so allocate a PMD page if needed.
1258 */
1259 if (pud_none(*pud))
1260 if (alloc_pmd_page(pud))
1261 return -1;
1262
1263 pmd = pmd_offset(pud, start);
1264
Andi Kleen958f79b2018-08-07 15:09:39 -07001265 set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
1266 canon_pgprot(pmd_pgprot))));
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001267
1268 start += PMD_SIZE;
Matt Flemingedc3b912015-11-27 21:09:31 +00001269 cpa->pfn += PMD_SIZE >> PAGE_SHIFT;
Borislav Petkovf900a4b2013-10-31 17:25:03 +01001270 cur_pages += PMD_SIZE >> PAGE_SHIFT;
1271 }
1272
1273 /*
1274 * Map trailing 4K pages.
1275 */
1276 if (start < end) {
1277 pmd = pmd_offset(pud, start);
1278 if (pmd_none(*pmd))
1279 if (alloc_pte_page(pmd))
1280 return -1;
1281
1282 populate_pte(cpa, start, end, num_pages - cur_pages,
1283 pmd, pgprot);
1284 }
1285 return num_pages;
1286}
Borislav Petkov4b235382013-10-31 17:25:02 +01001287
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001288static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
1289 pgprot_t pgprot)
Borislav Petkov4b235382013-10-31 17:25:02 +01001290{
1291 pud_t *pud;
1292 unsigned long end;
Matt Fleminge535ec02016-09-20 14:26:21 +01001293 long cur_pages = 0;
Juergen Grossf5b28312014-11-03 14:02:02 +01001294 pgprot_t pud_pgprot;
Borislav Petkov4b235382013-10-31 17:25:02 +01001295
1296 end = start + (cpa->numpages << PAGE_SHIFT);
1297
1298 /*
1299 * Not on a Gb page boundary? => map everything up to it with
1300 * smaller pages.
1301 */
1302 if (start & (PUD_SIZE - 1)) {
1303 unsigned long pre_end;
1304 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
1305
1306 pre_end = min_t(unsigned long, end, next_page);
1307 cur_pages = (pre_end - start) >> PAGE_SHIFT;
1308 cur_pages = min_t(int, (int)cpa->numpages, cur_pages);
1309
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001310 pud = pud_offset(p4d, start);
Borislav Petkov4b235382013-10-31 17:25:02 +01001311
1312 /*
1313 * Need a PMD page?
1314 */
1315 if (pud_none(*pud))
1316 if (alloc_pmd_page(pud))
1317 return -1;
1318
1319 cur_pages = populate_pmd(cpa, start, pre_end, cur_pages,
1320 pud, pgprot);
1321 if (cur_pages < 0)
1322 return cur_pages;
1323
1324 start = pre_end;
1325 }
1326
1327 /* We mapped them all? */
1328 if (cpa->numpages == cur_pages)
1329 return cur_pages;
1330
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001331 pud = pud_offset(p4d, start);
Juergen Grossf5b28312014-11-03 14:02:02 +01001332 pud_pgprot = pgprot_4k_2_large(pgprot);
Borislav Petkov4b235382013-10-31 17:25:02 +01001333
1334 /*
1335 * Map everything starting from the Gb boundary, possibly with 1G pages
1336 */
Borislav Petkovb8291adc2016-03-29 17:41:58 +02001337 while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
Andi Kleen958f79b2018-08-07 15:09:39 -07001338 set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
1339 canon_pgprot(pud_pgprot))));
Borislav Petkov4b235382013-10-31 17:25:02 +01001340
1341 start += PUD_SIZE;
Matt Flemingedc3b912015-11-27 21:09:31 +00001342 cpa->pfn += PUD_SIZE >> PAGE_SHIFT;
Borislav Petkov4b235382013-10-31 17:25:02 +01001343 cur_pages += PUD_SIZE >> PAGE_SHIFT;
1344 pud++;
1345 }
1346
1347 /* Map trailing leftover */
1348 if (start < end) {
Matt Fleminge535ec02016-09-20 14:26:21 +01001349 long tmp;
Borislav Petkov4b235382013-10-31 17:25:02 +01001350
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001351 pud = pud_offset(p4d, start);
Borislav Petkov4b235382013-10-31 17:25:02 +01001352 if (pud_none(*pud))
1353 if (alloc_pmd_page(pud))
1354 return -1;
1355
1356 tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages,
1357 pud, pgprot);
1358 if (tmp < 0)
1359 return cur_pages;
1360
1361 cur_pages += tmp;
1362 }
1363 return cur_pages;
1364}
Borislav Petkovf3f72962013-10-31 17:25:01 +01001365
1366/*
1367 * Restrictions for kernel page table do not necessarily apply when mapping in
1368 * an alternate PGD.
1369 */
1370static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
1371{
1372 pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
Borislav Petkovf3f72962013-10-31 17:25:01 +01001373 pud_t *pud = NULL; /* shut up gcc */
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001374 p4d_t *p4d;
Borislav Petkov42a54772014-01-18 12:48:16 +01001375 pgd_t *pgd_entry;
Matt Fleminge535ec02016-09-20 14:26:21 +01001376 long ret;
Borislav Petkovf3f72962013-10-31 17:25:01 +01001377
1378 pgd_entry = cpa->pgd + pgd_index(addr);
1379
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001380 if (pgd_none(*pgd_entry)) {
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08001381 p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001382 if (!p4d)
1383 return -1;
1384
1385 set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE));
1386 }
1387
Borislav Petkovf3f72962013-10-31 17:25:01 +01001388 /*
1389 * Allocate a PUD page and hand it down for mapping.
1390 */
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001391 p4d = p4d_offset(pgd_entry, addr);
1392 if (p4d_none(*p4d)) {
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08001393 pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
Borislav Petkovf3f72962013-10-31 17:25:01 +01001394 if (!pud)
1395 return -1;
Andy Lutomirski530dd8d2016-07-22 21:58:08 -07001396
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001397 set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
Borislav Petkovf3f72962013-10-31 17:25:01 +01001398 }
1399
1400 pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
1401 pgprot_val(pgprot) |= pgprot_val(cpa->mask_set);
1402
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001403 ret = populate_pud(cpa, addr, p4d, pgprot);
Borislav Petkov0bb8aee2013-10-31 17:25:05 +01001404 if (ret < 0) {
Andy Lutomirski55920d32016-07-23 09:59:28 -07001405 /*
1406 * Leave the PUD page in place in case some other CPU or thread
1407 * already found it, but remove any useless entries we just
1408 * added to it.
1409 */
Kirill A. Shutemov45478332017-03-17 21:55:12 +03001410 unmap_pud_range(p4d, addr,
Borislav Petkov0bb8aee2013-10-31 17:25:05 +01001411 addr + (cpa->numpages << PAGE_SHIFT));
Borislav Petkov0bb8aee2013-10-31 17:25:05 +01001412 return ret;
1413 }
Borislav Petkov42a54772014-01-18 12:48:16 +01001414
Borislav Petkovf3f72962013-10-31 17:25:01 +01001415 cpa->numpages = ret;
1416 return 0;
1417}
1418
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001419static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
1420 int primary)
1421{
Matt Fleming7fc84422016-04-25 21:06:35 +01001422 if (cpa->pgd) {
1423 /*
1424 * Right now, we only execute this code path when mapping
1425 * the EFI virtual memory map regions, no other users
1426 * provide a ->pgd value. This may change in the future.
1427 */
Borislav Petkov82f07122013-10-31 17:25:07 +01001428 return populate_pgd(cpa, vaddr);
Matt Fleming7fc84422016-04-25 21:06:35 +01001429 }
Borislav Petkov82f07122013-10-31 17:25:07 +01001430
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001431 /*
1432 * Ignore all non primary paths.
1433 */
Jan Beulich405e11332016-02-10 02:03:00 -07001434 if (!primary) {
1435 cpa->numpages = 1;
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001436 return 0;
Jan Beulich405e11332016-02-10 02:03:00 -07001437 }
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001438
1439 /*
1440 * Ignore the NULL PTE for kernel identity mapping, as it is expected
1441 * to have holes.
1442 * Also set numpages to '1' indicating that we processed cpa req for
1443 * one virtual address page and its pfn. TBD: numpages can be set based
1444 * on the initial value and the level returned by lookup_address().
1445 */
1446 if (within(vaddr, PAGE_OFFSET,
1447 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
1448 cpa->numpages = 1;
1449 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
1450 return 0;
Dave Hansen58e65b52018-04-20 15:20:21 -07001451
1452 } else if (__cpa_pfn_in_highmap(cpa->pfn)) {
1453 /* Faults in the highmap are OK, so do not warn: */
1454 return -EFAULT;
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001455 } else {
1456 WARN(1, KERN_WARNING "CPA: called for zero pte. "
1457 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
1458 *cpa->vaddr);
1459
1460 return -EFAULT;
1461 }
1462}
1463
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001464static int __change_page_attr(struct cpa_data *cpa, int primary)
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001465{
Shaohua Lid75586a2008-08-21 10:46:06 +08001466 unsigned long address;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +01001467 int do_split, err;
1468 unsigned int level;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001469 pte_t *kpte, old_pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470
Thomas Hellstrom8523acf2009-08-03 09:25:45 +02001471 if (cpa->flags & CPA_PAGES_ARRAY) {
1472 struct page *page = cpa->pages[cpa->curpage];
1473 if (unlikely(PageHighMem(page)))
1474 return 0;
1475 address = (unsigned long)page_address(page);
1476 } else if (cpa->flags & CPA_ARRAY)
Shaohua Lid75586a2008-08-21 10:46:06 +08001477 address = cpa->vaddr[cpa->curpage];
1478 else
1479 address = *cpa->vaddr;
Ingo Molnar97f99fe2008-01-30 13:33:55 +01001480repeat:
Borislav Petkov82f07122013-10-31 17:25:07 +01001481 kpte = _lookup_address_cpa(cpa, address, &level);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 if (!kpte)
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001483 return __cpa_process_fault(cpa, address, primary);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001484
1485 old_pte = *kpte;
Dave Hansendcb32d92016-07-07 17:19:15 -07001486 if (pte_none(old_pte))
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001487 return __cpa_process_fault(cpa, address, primary);
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001488
Thomas Gleixner30551bb2008-01-30 13:34:04 +01001489 if (level == PG_LEVEL_4K) {
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001490 pte_t new_pte;
Arjan van de Ven626c2c92008-02-04 16:48:05 +01001491 pgprot_t new_prot = pte_pgprot(old_pte);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001492 unsigned long pfn = pte_pfn(old_pte);
Thomas Gleixnera72a08a2008-01-30 13:34:07 +01001493
Thomas Gleixner72e458d2008-02-04 16:48:07 +01001494 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
1495 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
Ingo Molnar86f03982008-01-30 13:34:09 +01001496
Thomas Gleixner5c280cf2018-09-17 16:29:12 +02001497 cpa_inc_4k_install();
Thomas Gleixner40464602018-09-17 16:29:11 +02001498 new_prot = static_protections(new_prot, address, pfn, 1,
1499 CPA_PROTECT);
Ingo Molnar86f03982008-01-30 13:34:09 +01001500
Dave Hansend1440b22018-04-06 13:55:02 -07001501 new_prot = pgprot_clear_protnone_bits(new_prot);
Andrea Arcangelia8aed3e2013-02-22 15:11:51 -08001502
1503 /*
Arjan van de Ven626c2c92008-02-04 16:48:05 +01001504 * We need to keep the pfn from the existing PTE,
1505 * after all we're only going to change it's attributes
1506 * not the memory it points to
1507 */
Dave Hansen1a544202018-04-06 13:55:11 -07001508 new_pte = pfn_pte(pfn, new_prot);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001509 cpa->pfn = pfn;
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +01001510 /*
1511 * Do we really change anything ?
1512 */
1513 if (pte_val(old_pte) != pte_val(new_pte)) {
1514 set_pte_atomic(kpte, new_pte);
Shaohua Lid75586a2008-08-21 10:46:06 +08001515 cpa->flags |= CPA_FLUSHTLB;
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +01001516 }
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +01001517 cpa->numpages = 1;
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001518 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 }
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001520
1521 /*
1522 * Check, whether we can keep the large page intact
1523 * and just change the pte:
1524 */
Thomas Gleixner8679de02018-09-17 16:29:08 +02001525 do_split = should_split_large_page(kpte, address, cpa);
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001526 /*
1527 * When the range fits into the existing large page,
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +01001528 * return. cp->numpages and cpa->tlbflush have been updated in
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001529 * try_large_page:
1530 */
Ingo Molnar87f7f8f2008-02-04 16:48:10 +01001531 if (do_split <= 0)
1532 return do_split;
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001533
1534 /*
1535 * We have to split the large page:
1536 */
Borislav Petkov82f07122013-10-31 17:25:07 +01001537 err = split_large_page(cpa, kpte, address);
Peter Zijlstrac0a759a2018-09-19 10:50:18 +02001538 if (!err)
Ingo Molnar87f7f8f2008-02-04 16:48:10 +01001539 goto repeat;
Ingo Molnarbeaff632008-02-04 16:48:09 +01001540
Ingo Molnar87f7f8f2008-02-04 16:48:10 +01001541 return err;
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001542}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001544static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
1545
1546static int cpa_process_alias(struct cpa_data *cpa)
Ingo Molnar44af6c42008-01-30 13:34:03 +01001547{
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001548 struct cpa_data alias_cpa;
Tejun Heo992f4c12009-06-22 11:56:24 +09001549 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
Tejun Heoe933a732009-08-14 15:00:53 +09001550 unsigned long vaddr;
Tejun Heo992f4c12009-06-22 11:56:24 +09001551 int ret;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001552
Yinghai Lu8eb57792012-11-16 19:38:49 -08001553 if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1))
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001554 return 0;
1555
Thomas Gleixnerf34b4392008-02-15 22:17:57 +01001556 /*
1557 * No need to redo, when the primary call touched the direct
1558 * mapping already:
1559 */
Thomas Hellstrom8523acf2009-08-03 09:25:45 +02001560 if (cpa->flags & CPA_PAGES_ARRAY) {
1561 struct page *page = cpa->pages[cpa->curpage];
1562 if (unlikely(PageHighMem(page)))
1563 return 0;
1564 vaddr = (unsigned long)page_address(page);
1565 } else if (cpa->flags & CPA_ARRAY)
Shaohua Lid75586a2008-08-21 10:46:06 +08001566 vaddr = cpa->vaddr[cpa->curpage];
1567 else
1568 vaddr = *cpa->vaddr;
1569
1570 if (!(within(vaddr, PAGE_OFFSET,
Suresh Siddhaa1e46212009-01-20 14:20:21 -08001571 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001572
Thomas Gleixnerf34b4392008-02-15 22:17:57 +01001573 alias_cpa = *cpa;
Tejun Heo992f4c12009-06-22 11:56:24 +09001574 alias_cpa.vaddr = &laddr;
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001575 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
Shaohua Lid75586a2008-08-21 10:46:06 +08001576
Thomas Gleixnerf34b4392008-02-15 22:17:57 +01001577 ret = __change_page_attr_set_clr(&alias_cpa, 0);
Tejun Heo992f4c12009-06-22 11:56:24 +09001578 if (ret)
1579 return ret;
Thomas Gleixnerf34b4392008-02-15 22:17:57 +01001580 }
Ingo Molnar44af6c42008-01-30 13:34:03 +01001581
Arjan van de Ven488fd992008-01-30 13:34:07 +01001582#ifdef CONFIG_X86_64
Thomas Gleixner08797502008-01-30 13:34:09 +01001583 /*
Tejun Heo992f4c12009-06-22 11:56:24 +09001584 * If the primary call didn't touch the high mapping already
1585 * and the physical address is inside the kernel map, we need
Thomas Gleixner08797502008-01-30 13:34:09 +01001586 * to touch the high mapped kernel as well:
1587 */
Tejun Heo992f4c12009-06-22 11:56:24 +09001588 if (!within(vaddr, (unsigned long)_text, _brk_end) &&
Dave Hansen58e65b52018-04-20 15:20:21 -07001589 __cpa_pfn_in_highmap(cpa->pfn)) {
Tejun Heo992f4c12009-06-22 11:56:24 +09001590 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
1591 __START_KERNEL_map - phys_base;
1592 alias_cpa = *cpa;
1593 alias_cpa.vaddr = &temp_cpa_vaddr;
1594 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
Thomas Gleixner08797502008-01-30 13:34:09 +01001595
Tejun Heo992f4c12009-06-22 11:56:24 +09001596 /*
1597 * The high mapping range is imprecise, so ignore the
1598 * return value.
1599 */
1600 __change_page_attr_set_clr(&alias_cpa, 0);
1601 }
Thomas Gleixner08797502008-01-30 13:34:09 +01001602#endif
Tejun Heo992f4c12009-06-22 11:56:24 +09001603
1604 return 0;
Ingo Molnar44af6c42008-01-30 13:34:03 +01001605}
1606
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001607static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
Thomas Gleixnerff314522008-01-30 13:34:08 +01001608{
Matt Fleminge535ec02016-09-20 14:26:21 +01001609 unsigned long numpages = cpa->numpages;
1610 int ret;
Thomas Gleixnerff314522008-01-30 13:34:08 +01001611
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001612 while (numpages) {
1613 /*
1614 * Store the remaining nr of pages for the large page
1615 * preservation check.
1616 */
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +01001617 cpa->numpages = numpages;
Shaohua Lid75586a2008-08-21 10:46:06 +08001618 /* for array changes, we can't use large page */
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001619 if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
Shaohua Lid75586a2008-08-21 10:46:06 +08001620 cpa->numpages = 1;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001621
Christian Borntraeger288cf3c2016-03-15 14:57:33 -07001622 if (!debug_pagealloc_enabled())
Suresh Siddhaad5ca552008-09-23 14:00:42 -07001623 spin_lock(&cpa_lock);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001624 ret = __change_page_attr(cpa, checkalias);
Christian Borntraeger288cf3c2016-03-15 14:57:33 -07001625 if (!debug_pagealloc_enabled())
Suresh Siddhaad5ca552008-09-23 14:00:42 -07001626 spin_unlock(&cpa_lock);
Thomas Gleixnerff314522008-01-30 13:34:08 +01001627 if (ret)
1628 return ret;
Thomas Gleixnerff314522008-01-30 13:34:08 +01001629
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +01001630 if (checkalias) {
1631 ret = cpa_process_alias(cpa);
1632 if (ret)
1633 return ret;
1634 }
1635
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001636 /*
1637 * Adjust the number of pages with the result of the
1638 * CPA operation. Either a large page has been
1639 * preserved or a single page update happened.
1640 */
Matt Fleming74256372016-01-29 11:36:10 +00001641 BUG_ON(cpa->numpages > numpages || !cpa->numpages);
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +01001642 numpages -= cpa->numpages;
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001643 if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
Shaohua Lid75586a2008-08-21 10:46:06 +08001644 cpa->curpage++;
1645 else
1646 *cpa->vaddr += cpa->numpages * PAGE_SIZE;
1647
Thomas Gleixner65e074d2008-02-04 16:48:07 +01001648 }
Thomas Gleixnerff314522008-01-30 13:34:08 +01001649 return 0;
1650}
1651
LuckTonyc7486102018-08-31 09:55:06 -07001652/*
1653 * Machine check recovery code needs to change cache mode of poisoned
1654 * pages to UC to avoid speculative access logging another error. But
1655 * passing the address of the 1:1 mapping to set_memory_uc() is a fine
1656 * way to encourage a speculative access. So we cheat and flip the top
1657 * bit of the address. This works fine for the code that updates the
1658 * page tables. But at the end of the process we need to flush the cache
1659 * and the non-canonical address causes a #GP fault when used by the
1660 * CLFLUSH instruction.
1661 *
1662 * But in the common case we already have a canonical address. This code
1663 * will fix the top bit if needed and is a no-op otherwise.
1664 */
1665static inline unsigned long make_addr_canonical_again(unsigned long addr)
1666{
1667#ifdef CONFIG_X86_64
1668 return (long)(addr << 1) >> 1;
1669#else
1670 return addr;
1671#endif
1672}
1673
1674
Shaohua Lid75586a2008-08-21 10:46:06 +08001675static int change_page_attr_set_clr(unsigned long *addr, int numpages,
Andi Kleenc9caa022008-03-12 03:53:29 +01001676 pgprot_t mask_set, pgprot_t mask_clr,
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001677 int force_split, int in_flag,
1678 struct page **pages)
Thomas Gleixnerff314522008-01-30 13:34:08 +01001679{
Thomas Gleixner72e458d2008-02-04 16:48:07 +01001680 struct cpa_data cpa;
Ingo Molnarcacf8902008-08-21 13:46:33 +02001681 int ret, cache, checkalias;
Jack Steinerfa526d02009-09-03 12:56:02 -05001682 unsigned long baddr = 0;
Thomas Gleixner331e4062008-02-04 16:48:06 +01001683
Borislav Petkov82f07122013-10-31 17:25:07 +01001684 memset(&cpa, 0, sizeof(cpa));
1685
Thomas Gleixner331e4062008-02-04 16:48:06 +01001686 /*
Dave Hansen39114b72018-04-06 13:55:17 -07001687 * Check, if we are requested to set a not supported
1688 * feature. Clearing non-supported features is OK.
Thomas Gleixner331e4062008-02-04 16:48:06 +01001689 */
1690 mask_set = canon_pgprot(mask_set);
Dave Hansen39114b72018-04-06 13:55:17 -07001691
Andi Kleenc9caa022008-03-12 03:53:29 +01001692 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
Thomas Gleixner331e4062008-02-04 16:48:06 +01001693 return 0;
1694
Thomas Gleixner69b14152008-02-13 11:04:50 +01001695 /* Ensure we are PAGE_SIZE aligned */
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001696 if (in_flag & CPA_ARRAY) {
Shaohua Lid75586a2008-08-21 10:46:06 +08001697 int i;
1698 for (i = 0; i < numpages; i++) {
1699 if (addr[i] & ~PAGE_MASK) {
1700 addr[i] &= PAGE_MASK;
1701 WARN_ON_ONCE(1);
1702 }
1703 }
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001704 } else if (!(in_flag & CPA_PAGES_ARRAY)) {
1705 /*
1706 * in_flag of CPA_PAGES_ARRAY implies it is aligned.
1707 * No need to cehck in that case
1708 */
1709 if (*addr & ~PAGE_MASK) {
1710 *addr &= PAGE_MASK;
1711 /*
1712 * People should not be passing in unaligned addresses:
1713 */
1714 WARN_ON_ONCE(1);
1715 }
Jack Steinerfa526d02009-09-03 12:56:02 -05001716 /*
1717 * Save address for cache flush. *addr is modified in the call
1718 * to __change_page_attr_set_clr() below.
1719 */
LuckTonyc7486102018-08-31 09:55:06 -07001720 baddr = make_addr_canonical_again(*addr);
Thomas Gleixner69b14152008-02-13 11:04:50 +01001721 }
1722
Nick Piggin5843d9a2008-08-01 03:15:21 +02001723 /* Must avoid aliasing mappings in the highmem code */
1724 kmap_flush_unused();
1725
Nick Piggindb64fe02008-10-18 20:27:03 -07001726 vm_unmap_aliases();
1727
Thomas Gleixner72e458d2008-02-04 16:48:07 +01001728 cpa.vaddr = addr;
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001729 cpa.pages = pages;
Thomas Gleixner72e458d2008-02-04 16:48:07 +01001730 cpa.numpages = numpages;
1731 cpa.mask_set = mask_set;
1732 cpa.mask_clr = mask_clr;
Shaohua Lid75586a2008-08-21 10:46:06 +08001733 cpa.flags = 0;
1734 cpa.curpage = 0;
Andi Kleenc9caa022008-03-12 03:53:29 +01001735 cpa.force_split = force_split;
Thomas Gleixner72e458d2008-02-04 16:48:07 +01001736
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001737 if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
1738 cpa.flags |= in_flag;
Shaohua Lid75586a2008-08-21 10:46:06 +08001739
Thomas Gleixneraf96e442008-02-15 21:49:46 +01001740 /* No alias checking for _NX bit modifications */
1741 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
Dave Hansenc40a56a2018-08-02 15:58:31 -07001742 /* Has caller explicitly disabled alias checking? */
1743 if (in_flag & CPA_NO_CHECK_ALIAS)
1744 checkalias = 0;
Thomas Gleixneraf96e442008-02-15 21:49:46 +01001745
1746 ret = __change_page_attr_set_clr(&cpa, checkalias);
Thomas Gleixnerff314522008-01-30 13:34:08 +01001747
Thomas Gleixner57a6a462008-01-30 13:34:08 +01001748 /*
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +01001749 * Check whether we really changed something:
1750 */
Shaohua Lid75586a2008-08-21 10:46:06 +08001751 if (!(cpa.flags & CPA_FLUSHTLB))
Shaohua Li1ac2f7d2008-08-04 14:51:24 +08001752 goto out;
Ingo Molnarcacf8902008-08-21 13:46:33 +02001753
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +01001754 /*
Andi Kleen6bb83832008-02-04 16:48:06 +01001755 * No need to flush, when we did not set any of the caching
1756 * attributes:
1757 */
Juergen Grossc06814d2014-11-03 14:01:57 +01001758 cache = !!pgprot2cachemode(mask_set);
Andi Kleen6bb83832008-02-04 16:48:06 +01001759
1760 /*
Peter Zijlstrafce2ce92018-09-19 10:50:22 +02001761 * On error; flush everything to be sure.
Thomas Gleixner57a6a462008-01-30 13:34:08 +01001762 */
Peter Zijlstrafce2ce92018-09-19 10:50:22 +02001763 if (ret) {
Andi Kleen6bb83832008-02-04 16:48:06 +01001764 cpa_flush_all(cache);
Peter Zijlstrafce2ce92018-09-19 10:50:22 +02001765 goto out;
1766 }
1767
1768 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
1769 cpa_flush_array(baddr, addr, numpages, cache,
1770 cpa.flags, pages);
1771 } else {
1772 cpa_flush_range(baddr, numpages, cache);
1773 }
Ingo Molnarcacf8902008-08-21 13:46:33 +02001774
Thomas Gleixner76ebd052008-02-09 23:24:09 +01001775out:
Thomas Gleixnerff314522008-01-30 13:34:08 +01001776 return ret;
1777}
1778
Shaohua Lid75586a2008-08-21 10:46:06 +08001779static inline int change_page_attr_set(unsigned long *addr, int numpages,
1780 pgprot_t mask, int array)
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001781{
Shaohua Lid75586a2008-08-21 10:46:06 +08001782 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001783 (array ? CPA_ARRAY : 0), NULL);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001784}
1785
Shaohua Lid75586a2008-08-21 10:46:06 +08001786static inline int change_page_attr_clear(unsigned long *addr, int numpages,
1787 pgprot_t mask, int array)
Thomas Gleixner72932c72008-01-30 13:34:08 +01001788{
Shaohua Lid75586a2008-08-21 10:46:06 +08001789 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001790 (array ? CPA_ARRAY : 0), NULL);
Thomas Gleixner72932c72008-01-30 13:34:08 +01001791}
1792
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07001793static inline int cpa_set_pages_array(struct page **pages, int numpages,
1794 pgprot_t mask)
1795{
1796 return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
1797 CPA_PAGES_ARRAY, pages);
1798}
1799
1800static inline int cpa_clear_pages_array(struct page **pages, int numpages,
1801 pgprot_t mask)
1802{
1803 return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
1804 CPA_PAGES_ARRAY, pages);
1805}
1806
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001807int _set_memory_uc(unsigned long addr, int numpages)
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001808{
Suresh Siddhade33c442008-04-25 17:07:22 -07001809 /*
1810 * for now UC MINUS. see comments in ioremap_nocache()
Luis R. Rodrigueze4b6be332015-05-11 10:15:53 +02001811 * If you really need strong UC use ioremap_uc(), but note
1812 * that you cannot override IO areas with set_memory_*() as
1813 * these helpers cannot work with IO memory.
Suresh Siddhade33c442008-04-25 17:07:22 -07001814 */
Shaohua Lid75586a2008-08-21 10:46:06 +08001815 return change_page_attr_set(&addr, numpages,
Juergen Grossc06814d2014-11-03 14:01:57 +01001816 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1817 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001818}
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001819
1820int set_memory_uc(unsigned long addr, int numpages)
1821{
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001822 int ret;
1823
Suresh Siddhade33c442008-04-25 17:07:22 -07001824 /*
1825 * for now UC MINUS. see comments in ioremap_nocache()
1826 */
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001827 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
Juergen Grosse00c8cc2014-11-03 14:01:59 +01001828 _PAGE_CACHE_MODE_UC_MINUS, NULL);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001829 if (ret)
1830 goto out_err;
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001831
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001832 ret = _set_memory_uc(addr, numpages);
1833 if (ret)
1834 goto out_free;
1835
1836 return 0;
1837
1838out_free:
1839 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1840out_err:
1841 return ret;
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001842}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001843EXPORT_SYMBOL(set_memory_uc);
1844
H Hartley Sweeten2d070ef2011-11-15 14:49:00 -08001845static int _set_memory_array(unsigned long *addr, int addrinarray,
Juergen Grossc06814d2014-11-03 14:01:57 +01001846 enum page_cache_mode new_type)
Shaohua Lid75586a2008-08-21 10:46:06 +08001847{
Toshi Kani623dffb2015-06-04 18:55:20 +02001848 enum page_cache_mode set_type;
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001849 int i, j;
1850 int ret;
1851
Shaohua Lid75586a2008-08-21 10:46:06 +08001852 for (i = 0; i < addrinarray; i++) {
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001853 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
Pauli Nieminen4f646252010-04-01 12:45:01 +00001854 new_type, NULL);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001855 if (ret)
1856 goto out_free;
Shaohua Lid75586a2008-08-21 10:46:06 +08001857 }
1858
Toshi Kani623dffb2015-06-04 18:55:20 +02001859 /* If WC, set to UC- first and then WC */
1860 set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
1861 _PAGE_CACHE_MODE_UC_MINUS : new_type;
1862
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001863 ret = change_page_attr_set(addr, addrinarray,
Toshi Kani623dffb2015-06-04 18:55:20 +02001864 cachemode2pgprot(set_type), 1);
Pauli Nieminen4f646252010-04-01 12:45:01 +00001865
Juergen Grossc06814d2014-11-03 14:01:57 +01001866 if (!ret && new_type == _PAGE_CACHE_MODE_WC)
Pauli Nieminen4f646252010-04-01 12:45:01 +00001867 ret = change_page_attr_set_clr(addr, addrinarray,
Juergen Grossc06814d2014-11-03 14:01:57 +01001868 cachemode2pgprot(
1869 _PAGE_CACHE_MODE_WC),
Pauli Nieminen4f646252010-04-01 12:45:01 +00001870 __pgprot(_PAGE_CACHE_MASK),
1871 0, CPA_ARRAY, NULL);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001872 if (ret)
1873 goto out_free;
Rene Hermanc5e147c2008-08-22 01:02:20 +02001874
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001875 return 0;
1876
1877out_free:
1878 for (j = 0; j < i; j++)
1879 free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
1880
1881 return ret;
Shaohua Lid75586a2008-08-21 10:46:06 +08001882}
Pauli Nieminen4f646252010-04-01 12:45:01 +00001883
1884int set_memory_array_uc(unsigned long *addr, int addrinarray)
1885{
Juergen Grossc06814d2014-11-03 14:01:57 +01001886 return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_UC_MINUS);
Pauli Nieminen4f646252010-04-01 12:45:01 +00001887}
Shaohua Lid75586a2008-08-21 10:46:06 +08001888EXPORT_SYMBOL(set_memory_array_uc);
1889
Pauli Nieminen4f646252010-04-01 12:45:01 +00001890int set_memory_array_wc(unsigned long *addr, int addrinarray)
1891{
Juergen Grossc06814d2014-11-03 14:01:57 +01001892 return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WC);
Pauli Nieminen4f646252010-04-01 12:45:01 +00001893}
1894EXPORT_SYMBOL(set_memory_array_wc);
1895
Toshi Kani623dffb2015-06-04 18:55:20 +02001896int set_memory_array_wt(unsigned long *addr, int addrinarray)
1897{
1898 return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WT);
1899}
1900EXPORT_SYMBOL_GPL(set_memory_array_wt);
1901
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -07001902int _set_memory_wc(unsigned long addr, int numpages)
1903{
venkatesh.pallipadi@intel.com3869c4a2009-04-09 14:26:50 -07001904 int ret;
Pallipadi, Venkateshbdc63402009-07-30 14:43:19 -07001905 unsigned long addr_copy = addr;
1906
venkatesh.pallipadi@intel.com3869c4a2009-04-09 14:26:50 -07001907 ret = change_page_attr_set(&addr, numpages,
Juergen Grossc06814d2014-11-03 14:01:57 +01001908 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1909 0);
venkatesh.pallipadi@intel.com3869c4a2009-04-09 14:26:50 -07001910 if (!ret) {
Pallipadi, Venkateshbdc63402009-07-30 14:43:19 -07001911 ret = change_page_attr_set_clr(&addr_copy, numpages,
Juergen Grossc06814d2014-11-03 14:01:57 +01001912 cachemode2pgprot(
1913 _PAGE_CACHE_MODE_WC),
Pallipadi, Venkateshbdc63402009-07-30 14:43:19 -07001914 __pgprot(_PAGE_CACHE_MASK),
1915 0, 0, NULL);
venkatesh.pallipadi@intel.com3869c4a2009-04-09 14:26:50 -07001916 }
1917 return ret;
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -07001918}
1919
1920int set_memory_wc(unsigned long addr, int numpages)
1921{
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001922 int ret;
1923
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001924 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
Juergen Grosse00c8cc2014-11-03 14:01:59 +01001925 _PAGE_CACHE_MODE_WC, NULL);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001926 if (ret)
Toshi Kani623dffb2015-06-04 18:55:20 +02001927 return ret;
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -07001928
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001929 ret = _set_memory_wc(addr, numpages);
1930 if (ret)
Toshi Kani623dffb2015-06-04 18:55:20 +02001931 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001932
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001933 return ret;
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -07001934}
1935EXPORT_SYMBOL(set_memory_wc);
1936
Toshi Kani623dffb2015-06-04 18:55:20 +02001937int _set_memory_wt(unsigned long addr, int numpages)
1938{
1939 return change_page_attr_set(&addr, numpages,
1940 cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0);
1941}
1942
1943int set_memory_wt(unsigned long addr, int numpages)
1944{
1945 int ret;
1946
1947 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1948 _PAGE_CACHE_MODE_WT, NULL);
1949 if (ret)
1950 return ret;
1951
1952 ret = _set_memory_wt(addr, numpages);
1953 if (ret)
1954 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1955
1956 return ret;
1957}
1958EXPORT_SYMBOL_GPL(set_memory_wt);
1959
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001960int _set_memory_wb(unsigned long addr, int numpages)
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001961{
Juergen Grossc06814d2014-11-03 14:01:57 +01001962 /* WB cache mode is hard wired to all cache attribute bits being 0 */
Shaohua Lid75586a2008-08-21 10:46:06 +08001963 return change_page_attr_clear(&addr, numpages,
1964 __pgprot(_PAGE_CACHE_MASK), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001965}
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001966
1967int set_memory_wb(unsigned long addr, int numpages)
1968{
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001969 int ret;
1970
1971 ret = _set_memory_wb(addr, numpages);
1972 if (ret)
1973 return ret;
1974
venkatesh.pallipadi@intel.comc15238d2008-08-20 16:45:51 -07001975 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001976 return 0;
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001977}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001978EXPORT_SYMBOL(set_memory_wb);
1979
Shaohua Lid75586a2008-08-21 10:46:06 +08001980int set_memory_array_wb(unsigned long *addr, int addrinarray)
1981{
1982 int i;
venkatesh.pallipadi@intel.coma5593e02009-04-09 14:26:48 -07001983 int ret;
1984
Juergen Grossc06814d2014-11-03 14:01:57 +01001985 /* WB cache mode is hard wired to all cache attribute bits being 0 */
venkatesh.pallipadi@intel.coma5593e02009-04-09 14:26:48 -07001986 ret = change_page_attr_clear(addr, addrinarray,
1987 __pgprot(_PAGE_CACHE_MASK), 1);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001988 if (ret)
1989 return ret;
Shaohua Lid75586a2008-08-21 10:46:06 +08001990
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001991 for (i = 0; i < addrinarray; i++)
1992 free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
Rene Hermanc5e147c2008-08-22 01:02:20 +02001993
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001994 return 0;
Shaohua Lid75586a2008-08-21 10:46:06 +08001995}
1996EXPORT_SYMBOL(set_memory_array_wb);
1997
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001998int set_memory_x(unsigned long addr, int numpages)
1999{
H. Peter Anvin583140a2009-11-13 15:28:15 -08002000 if (!(__supported_pte_mask & _PAGE_NX))
2001 return 0;
2002
Shaohua Lid75586a2008-08-21 10:46:06 +08002003 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002004}
2005EXPORT_SYMBOL(set_memory_x);
2006
2007int set_memory_nx(unsigned long addr, int numpages)
2008{
H. Peter Anvin583140a2009-11-13 15:28:15 -08002009 if (!(__supported_pte_mask & _PAGE_NX))
2010 return 0;
2011
Shaohua Lid75586a2008-08-21 10:46:06 +08002012 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002013}
2014EXPORT_SYMBOL(set_memory_nx);
2015
2016int set_memory_ro(unsigned long addr, int numpages)
2017{
Shaohua Lid75586a2008-08-21 10:46:06 +08002018 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002019}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002020
2021int set_memory_rw(unsigned long addr, int numpages)
2022{
Shaohua Lid75586a2008-08-21 10:46:06 +08002023 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002024}
Ingo Molnarf62d0f02008-01-30 13:34:07 +01002025
2026int set_memory_np(unsigned long addr, int numpages)
2027{
Shaohua Lid75586a2008-08-21 10:46:06 +08002028 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
Ingo Molnarf62d0f02008-01-30 13:34:07 +01002029}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002030
Dave Hansenc40a56a2018-08-02 15:58:31 -07002031int set_memory_np_noalias(unsigned long addr, int numpages)
2032{
2033 int cpa_flags = CPA_NO_CHECK_ALIAS;
2034
2035 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
2036 __pgprot(_PAGE_PRESENT), 0,
2037 cpa_flags, NULL);
2038}
2039
Andi Kleenc9caa022008-03-12 03:53:29 +01002040int set_memory_4k(unsigned long addr, int numpages)
2041{
Shaohua Lid75586a2008-08-21 10:46:06 +08002042 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07002043 __pgprot(0), 1, 0, NULL);
Andi Kleenc9caa022008-03-12 03:53:29 +01002044}
2045
Dave Hansen39114b72018-04-06 13:55:17 -07002046int set_memory_nonglobal(unsigned long addr, int numpages)
2047{
2048 return change_page_attr_clear(&addr, numpages,
2049 __pgprot(_PAGE_GLOBAL), 0);
2050}
2051
Dave Hanseneac70732018-08-02 15:58:25 -07002052int set_memory_global(unsigned long addr, int numpages)
2053{
2054 return change_page_attr_set(&addr, numpages,
2055 __pgprot(_PAGE_GLOBAL), 0);
2056}
2057
Tom Lendacky77bd2342017-07-17 16:10:19 -05002058static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
2059{
2060 struct cpa_data cpa;
2061 unsigned long start;
2062 int ret;
2063
Tom Lendackya72ec5a2017-10-20 09:30:48 -05002064 /* Nothing to do if memory encryption is not active */
2065 if (!mem_encrypt_active())
Tom Lendacky77bd2342017-07-17 16:10:19 -05002066 return 0;
2067
2068 /* Should not be working on unaligned addresses */
2069 if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
2070 addr &= PAGE_MASK;
2071
2072 start = addr;
2073
2074 memset(&cpa, 0, sizeof(cpa));
2075 cpa.vaddr = &addr;
2076 cpa.numpages = numpages;
2077 cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
2078 cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);
2079 cpa.pgd = init_mm.pgd;
2080
2081 /* Must avoid aliasing mappings in the highmem code */
2082 kmap_flush_unused();
2083 vm_unmap_aliases();
2084
2085 /*
2086 * Before changing the encryption attribute, we need to flush caches.
2087 */
Peter Zijlstra5f464b32018-09-19 10:50:21 +02002088 cpa_flush_range(start, numpages, 1);
Tom Lendacky77bd2342017-07-17 16:10:19 -05002089
2090 ret = __change_page_attr_set_clr(&cpa, 1);
2091
2092 /*
2093 * After changing the encryption attribute, we need to flush TLBs
2094 * again in case any speculative TLB caching occurred (but no need
2095 * to flush caches again). We could just use cpa_flush_all(), but
2096 * in case TLB flushing gets optimized in the cpa_flush_range()
2097 * path use the same logic as above.
2098 */
Peter Zijlstra5f464b32018-09-19 10:50:21 +02002099 cpa_flush_range(start, numpages, 0);
Tom Lendacky77bd2342017-07-17 16:10:19 -05002100
2101 return ret;
2102}
2103
2104int set_memory_encrypted(unsigned long addr, int numpages)
2105{
2106 return __set_memory_enc_dec(addr, numpages, true);
2107}
Tom Lendacky95cf9262017-07-17 16:10:26 -05002108EXPORT_SYMBOL_GPL(set_memory_encrypted);
Tom Lendacky77bd2342017-07-17 16:10:19 -05002109
2110int set_memory_decrypted(unsigned long addr, int numpages)
2111{
2112 return __set_memory_enc_dec(addr, numpages, false);
2113}
Tom Lendacky95cf9262017-07-17 16:10:26 -05002114EXPORT_SYMBOL_GPL(set_memory_decrypted);
Tom Lendacky77bd2342017-07-17 16:10:19 -05002115
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002116int set_pages_uc(struct page *page, int numpages)
2117{
2118 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002119
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01002120 return set_memory_uc(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002121}
2122EXPORT_SYMBOL(set_pages_uc);
2123
Pauli Nieminen4f646252010-04-01 12:45:01 +00002124static int _set_pages_array(struct page **pages, int addrinarray,
Juergen Grossc06814d2014-11-03 14:01:57 +01002125 enum page_cache_mode new_type)
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002126{
2127 unsigned long start;
2128 unsigned long end;
Toshi Kani623dffb2015-06-04 18:55:20 +02002129 enum page_cache_mode set_type;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002130 int i;
2131 int free_idx;
Pauli Nieminen4f646252010-04-01 12:45:01 +00002132 int ret;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002133
2134 for (i = 0; i < addrinarray; i++) {
Thomas Hellstrom8523acf2009-08-03 09:25:45 +02002135 if (PageHighMem(pages[i]))
2136 continue;
2137 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002138 end = start + PAGE_SIZE;
Pauli Nieminen4f646252010-04-01 12:45:01 +00002139 if (reserve_memtype(start, end, new_type, NULL))
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002140 goto err_out;
2141 }
2142
Toshi Kani623dffb2015-06-04 18:55:20 +02002143 /* If WC, set to UC- first and then WC */
2144 set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
2145 _PAGE_CACHE_MODE_UC_MINUS : new_type;
2146
Pauli Nieminen4f646252010-04-01 12:45:01 +00002147 ret = cpa_set_pages_array(pages, addrinarray,
Toshi Kani623dffb2015-06-04 18:55:20 +02002148 cachemode2pgprot(set_type));
Juergen Grossc06814d2014-11-03 14:01:57 +01002149 if (!ret && new_type == _PAGE_CACHE_MODE_WC)
Pauli Nieminen4f646252010-04-01 12:45:01 +00002150 ret = change_page_attr_set_clr(NULL, addrinarray,
Juergen Grossc06814d2014-11-03 14:01:57 +01002151 cachemode2pgprot(
2152 _PAGE_CACHE_MODE_WC),
Pauli Nieminen4f646252010-04-01 12:45:01 +00002153 __pgprot(_PAGE_CACHE_MASK),
2154 0, CPA_PAGES_ARRAY, pages);
2155 if (ret)
2156 goto err_out;
2157 return 0; /* Success */
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002158err_out:
2159 free_idx = i;
2160 for (i = 0; i < free_idx; i++) {
Thomas Hellstrom8523acf2009-08-03 09:25:45 +02002161 if (PageHighMem(pages[i]))
2162 continue;
2163 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002164 end = start + PAGE_SIZE;
2165 free_memtype(start, end);
2166 }
2167 return -EINVAL;
2168}
Pauli Nieminen4f646252010-04-01 12:45:01 +00002169
2170int set_pages_array_uc(struct page **pages, int addrinarray)
2171{
Juergen Grossc06814d2014-11-03 14:01:57 +01002172 return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS);
Pauli Nieminen4f646252010-04-01 12:45:01 +00002173}
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002174EXPORT_SYMBOL(set_pages_array_uc);
2175
Pauli Nieminen4f646252010-04-01 12:45:01 +00002176int set_pages_array_wc(struct page **pages, int addrinarray)
2177{
Juergen Grossc06814d2014-11-03 14:01:57 +01002178 return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC);
Pauli Nieminen4f646252010-04-01 12:45:01 +00002179}
2180EXPORT_SYMBOL(set_pages_array_wc);
2181
Toshi Kani623dffb2015-06-04 18:55:20 +02002182int set_pages_array_wt(struct page **pages, int addrinarray)
2183{
2184 return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WT);
2185}
2186EXPORT_SYMBOL_GPL(set_pages_array_wt);
2187
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002188int set_pages_wb(struct page *page, int numpages)
2189{
2190 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002191
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01002192 return set_memory_wb(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002193}
2194EXPORT_SYMBOL(set_pages_wb);
2195
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002196int set_pages_array_wb(struct page **pages, int addrinarray)
2197{
2198 int retval;
2199 unsigned long start;
2200 unsigned long end;
2201 int i;
2202
Juergen Grossc06814d2014-11-03 14:01:57 +01002203 /* WB cache mode is hard wired to all cache attribute bits being 0 */
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002204 retval = cpa_clear_pages_array(pages, addrinarray,
2205 __pgprot(_PAGE_CACHE_MASK));
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07002206 if (retval)
2207 return retval;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002208
2209 for (i = 0; i < addrinarray; i++) {
Thomas Hellstrom8523acf2009-08-03 09:25:45 +02002210 if (PageHighMem(pages[i]))
2211 continue;
2212 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002213 end = start + PAGE_SIZE;
2214 free_memtype(start, end);
2215 }
2216
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07002217 return 0;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07002218}
2219EXPORT_SYMBOL(set_pages_array_wb);
2220
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002221int set_pages_x(struct page *page, int numpages)
2222{
2223 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002224
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01002225 return set_memory_x(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002226}
2227EXPORT_SYMBOL(set_pages_x);
2228
2229int set_pages_nx(struct page *page, int numpages)
2230{
2231 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002232
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01002233 return set_memory_nx(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002234}
2235EXPORT_SYMBOL(set_pages_nx);
2236
2237int set_pages_ro(struct page *page, int numpages)
2238{
2239 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002240
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01002241 return set_memory_ro(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002242}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002243
2244int set_pages_rw(struct page *page, int numpages)
2245{
2246 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002247
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01002248 return set_memory_rw(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002249}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01002250
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251#ifdef CONFIG_DEBUG_PAGEALLOC
Ingo Molnarf62d0f02008-01-30 13:34:07 +01002252
2253static int __set_pages_p(struct page *page, int numpages)
2254{
Shaohua Lid75586a2008-08-21 10:46:06 +08002255 unsigned long tempaddr = (unsigned long) page_address(page);
2256 struct cpa_data cpa = { .vaddr = &tempaddr,
Borislav Petkov82f07122013-10-31 17:25:07 +01002257 .pgd = NULL,
Thomas Gleixner72e458d2008-02-04 16:48:07 +01002258 .numpages = numpages,
2259 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
Shaohua Lid75586a2008-08-21 10:46:06 +08002260 .mask_clr = __pgprot(0),
2261 .flags = 0};
Thomas Gleixner72932c72008-01-30 13:34:08 +01002262
Suresh Siddha55121b42008-09-23 14:00:40 -07002263 /*
2264 * No alias checking needed for setting present flag. otherwise,
2265 * we may need to break large pages for 64-bit kernel text
2266 * mappings (this adds to complexity if we want to do this from
2267 * atomic context especially). Let's keep it simple!
2268 */
2269 return __change_page_attr_set_clr(&cpa, 0);
Ingo Molnarf62d0f02008-01-30 13:34:07 +01002270}
2271
2272static int __set_pages_np(struct page *page, int numpages)
2273{
Shaohua Lid75586a2008-08-21 10:46:06 +08002274 unsigned long tempaddr = (unsigned long) page_address(page);
2275 struct cpa_data cpa = { .vaddr = &tempaddr,
Borislav Petkov82f07122013-10-31 17:25:07 +01002276 .pgd = NULL,
Thomas Gleixner72e458d2008-02-04 16:48:07 +01002277 .numpages = numpages,
2278 .mask_set = __pgprot(0),
Shaohua Lid75586a2008-08-21 10:46:06 +08002279 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2280 .flags = 0};
Thomas Gleixner72932c72008-01-30 13:34:08 +01002281
Suresh Siddha55121b42008-09-23 14:00:40 -07002282 /*
2283 * No alias checking needed for setting not present flag. otherwise,
2284 * we may need to break large pages for 64-bit kernel text
2285 * mappings (this adds to complexity if we want to do this from
2286 * atomic context especially). Let's keep it simple!
2287 */
2288 return __change_page_attr_set_clr(&cpa, 0);
Ingo Molnarf62d0f02008-01-30 13:34:07 +01002289}
2290
Joonsoo Kim031bc572014-12-12 16:55:52 -08002291void __kernel_map_pages(struct page *page, int numpages, int enable)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292{
2293 if (PageHighMem(page))
2294 return;
Ingo Molnar9f4c8152008-01-30 13:33:41 +01002295 if (!enable) {
Ingo Molnarf9b84042006-06-27 02:54:49 -07002296 debug_check_no_locks_freed(page_address(page),
2297 numpages * PAGE_SIZE);
Ingo Molnar9f4c8152008-01-30 13:33:41 +01002298 }
Ingo Molnarde5097c2006-01-09 15:59:21 -08002299
Ingo Molnar9f4c8152008-01-30 13:33:41 +01002300 /*
Ingo Molnarf8d84062008-02-13 14:09:53 +01002301 * The return value is ignored as the calls cannot fail.
Suresh Siddha55121b42008-09-23 14:00:40 -07002302 * Large pages for identity mappings are not used at boot time
2303 * and hence no memory allocations during large page split.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 */
Ingo Molnarf62d0f02008-01-30 13:34:07 +01002305 if (enable)
2306 __set_pages_p(page, numpages);
2307 else
2308 __set_pages_np(page, numpages);
Ingo Molnar9f4c8152008-01-30 13:33:41 +01002309
2310 /*
Ingo Molnare4b71dc2008-01-30 13:34:04 +01002311 * We should perform an IPI and flush all tlbs,
Sebastian Andrzej Siewiorf77084d2018-10-17 12:34:32 +02002312 * but that can deadlock->flush only current cpu.
2313 * Preemption needs to be disabled around __flush_tlb_all() due to
2314 * CR3 reload in __native_flush_tlb().
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 */
Sebastian Andrzej Siewiorf77084d2018-10-17 12:34:32 +02002316 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 __flush_tlb_all();
Sebastian Andrzej Siewiorf77084d2018-10-17 12:34:32 +02002318 preempt_enable();
Boris Ostrovsky26564602013-04-11 13:59:52 -04002319
2320 arch_flush_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321}
Rafael J. Wysocki8a235ef2008-02-20 01:47:44 +01002322
2323#ifdef CONFIG_HIBERNATION
2324
2325bool kernel_page_present(struct page *page)
2326{
2327 unsigned int level;
2328 pte_t *pte;
2329
2330 if (PageHighMem(page))
2331 return false;
2332
2333 pte = lookup_address((unsigned long)page_address(page), &level);
2334 return (pte_val(*pte) & _PAGE_PRESENT);
2335}
2336
2337#endif /* CONFIG_HIBERNATION */
2338
2339#endif /* CONFIG_DEBUG_PAGEALLOC */
Arjan van de Vend1028a12008-01-30 13:34:07 +01002340
Borislav Petkov82f07122013-10-31 17:25:07 +01002341int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
2342 unsigned numpages, unsigned long page_flags)
2343{
2344 int retval = -EINVAL;
2345
2346 struct cpa_data cpa = {
2347 .vaddr = &address,
2348 .pfn = pfn,
2349 .pgd = pgd,
2350 .numpages = numpages,
2351 .mask_set = __pgprot(0),
2352 .mask_clr = __pgprot(0),
2353 .flags = 0,
2354 };
2355
2356 if (!(__supported_pte_mask & _PAGE_NX))
2357 goto out;
2358
2359 if (!(page_flags & _PAGE_NX))
2360 cpa.mask_clr = __pgprot(_PAGE_NX);
2361
Sai Praneeth15f003d2016-02-17 12:36:04 +00002362 if (!(page_flags & _PAGE_RW))
2363 cpa.mask_clr = __pgprot(_PAGE_RW);
2364
Tom Lendacky21729f82017-07-17 16:10:07 -05002365 if (!(page_flags & _PAGE_ENC))
2366 cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);
2367
Borislav Petkov82f07122013-10-31 17:25:07 +01002368 cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);
2369
2370 retval = __change_page_attr_set_clr(&cpa, 0);
2371 __flush_tlb_all();
2372
2373out:
2374 return retval;
2375}
2376
Arjan van de Vend1028a12008-01-30 13:34:07 +01002377/*
2378 * The testcases use internal knowledge of the implementation that shouldn't
2379 * be exposed to the rest of the kernel. Include these directly here.
2380 */
2381#ifdef CONFIG_CPA_DEBUG
2382#include "pageattr-test.c"
2383#endif