Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2002 Andi Kleen, SuSE Labs. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * Thanks to Ben LaHaise for precious feedback. |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 4 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/highmem.h> |
Ingo Molnar | 8192206 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 6 | #include <linux/bootmem.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 7 | #include <linux/sched.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 8 | #include <linux/mm.h> |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 9 | #include <linux/interrupt.h> |
Thomas Gleixner | ee7ae7a | 2008-04-17 17:40:45 +0200 | [diff] [blame] | 10 | #include <linux/seq_file.h> |
| 11 | #include <linux/debugfs.h> |
Tejun Heo | e59a1bb | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 12 | #include <linux/pfn.h> |
Tejun Heo | 8c4bfc6 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 13 | #include <linux/percpu.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 14 | #include <linux/gfp.h> |
Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 15 | #include <linux/pci.h> |
Stephen Rothwell | d647230 | 2015-06-02 19:01:38 +1000 | [diff] [blame] | 16 | #include <linux/vmalloc.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 17 | |
Ingo Molnar | 66441bd | 2017-01-27 10:27:10 +0100 | [diff] [blame] | 18 | #include <asm/e820/api.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <asm/processor.h> |
| 20 | #include <asm/tlbflush.h> |
Dave Jones | f8af095 | 2006-01-06 00:12:10 -0800 | [diff] [blame] | 21 | #include <asm/sections.h> |
Jeremy Fitzhardinge | 93dbda7 | 2009-02-26 17:35:44 -0800 | [diff] [blame] | 22 | #include <asm/setup.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 23 | #include <linux/uaccess.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 24 | #include <asm/pgalloc.h> |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 25 | #include <asm/proto.h> |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 26 | #include <asm/pat.h> |
Laura Abbott | d116365 | 2017-05-08 15:58:11 -0700 | [diff] [blame] | 27 | #include <asm/set_memory.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 29 | /* |
| 30 | * The current flushing context - we pass it instead of 5 arguments: |
| 31 | */ |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 32 | struct cpa_data { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 33 | unsigned long *vaddr; |
Borislav Petkov | 0fd64c2 | 2013-10-31 17:25:00 +0100 | [diff] [blame] | 34 | pgd_t *pgd; |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 35 | pgprot_t mask_set; |
| 36 | pgprot_t mask_clr; |
Matt Fleming | 7425637 | 2016-01-29 11:36:10 +0000 | [diff] [blame] | 37 | unsigned long numpages; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 38 | int flags; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 39 | unsigned long pfn; |
Thomas Gleixner | f61c5ba | 2018-09-17 16:29:14 +0200 | [diff] [blame] | 40 | unsigned force_split : 1, |
| 41 | force_static_prot : 1; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 42 | int curpage; |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 43 | struct page **pages; |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 44 | }; |
| 45 | |
Thomas Gleixner | 4046460 | 2018-09-17 16:29:11 +0200 | [diff] [blame] | 46 | enum cpa_warn { |
Thomas Gleixner | f61c5ba | 2018-09-17 16:29:14 +0200 | [diff] [blame] | 47 | CPA_CONFLICT, |
Thomas Gleixner | 4046460 | 2018-09-17 16:29:11 +0200 | [diff] [blame] | 48 | CPA_PROTECT, |
| 49 | CPA_DETECT, |
| 50 | }; |
| 51 | |
| 52 | static const int cpa_warn_level = CPA_PROTECT; |
| 53 | |
Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 54 | /* |
| 55 | * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings) |
| 56 | * using cpa_lock. So that we don't allow any other cpu, with stale large tlb |
| 57 | * entries change the page attribute in parallel to some other cpu |
| 58 | * splitting a large page entry along with changing the attribute. |
| 59 | */ |
| 60 | static DEFINE_SPINLOCK(cpa_lock); |
| 61 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 62 | #define CPA_FLUSHTLB 1 |
| 63 | #define CPA_ARRAY 2 |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 64 | #define CPA_PAGES_ARRAY 4 |
Dave Hansen | c40a56a | 2018-08-02 15:58:31 -0700 | [diff] [blame] | 65 | #define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */ |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 66 | |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 67 | #ifdef CONFIG_PROC_FS |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 68 | static unsigned long direct_pages_count[PG_LEVEL_NUM]; |
| 69 | |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 70 | void update_page_count(int level, unsigned long pages) |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 71 | { |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 72 | /* Protect against CPA */ |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 73 | spin_lock(&pgd_lock); |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 74 | direct_pages_count[level] += pages; |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 75 | spin_unlock(&pgd_lock); |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 76 | } |
| 77 | |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 78 | static void split_page_count(int level) |
| 79 | { |
Dave Jones | c9e0d39 | 2016-01-11 12:04:28 -0500 | [diff] [blame] | 80 | if (direct_pages_count[level] == 0) |
| 81 | return; |
| 82 | |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 83 | direct_pages_count[level]--; |
| 84 | direct_pages_count[level - 1] += PTRS_PER_PTE; |
| 85 | } |
| 86 | |
Alexey Dobriyan | e1759c2 | 2008-10-15 23:50:22 +0400 | [diff] [blame] | 87 | void arch_report_meminfo(struct seq_file *m) |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 88 | { |
Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 89 | seq_printf(m, "DirectMap4k: %8lu kB\n", |
Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 90 | direct_pages_count[PG_LEVEL_4K] << 2); |
| 91 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 92 | seq_printf(m, "DirectMap2M: %8lu kB\n", |
Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 93 | direct_pages_count[PG_LEVEL_2M] << 11); |
| 94 | #else |
Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 95 | seq_printf(m, "DirectMap4M: %8lu kB\n", |
Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 96 | direct_pages_count[PG_LEVEL_2M] << 12); |
| 97 | #endif |
Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 98 | if (direct_gbpages) |
Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 99 | seq_printf(m, "DirectMap1G: %8lu kB\n", |
Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 100 | direct_pages_count[PG_LEVEL_1G] << 20); |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 101 | } |
| 102 | #else |
| 103 | static inline void split_page_count(int level) { } |
| 104 | #endif |
| 105 | |
Thomas Gleixner | 5c280cf | 2018-09-17 16:29:12 +0200 | [diff] [blame] | 106 | #ifdef CONFIG_X86_CPA_STATISTICS |
| 107 | |
| 108 | static unsigned long cpa_1g_checked; |
| 109 | static unsigned long cpa_1g_sameprot; |
| 110 | static unsigned long cpa_1g_preserved; |
| 111 | static unsigned long cpa_2m_checked; |
| 112 | static unsigned long cpa_2m_sameprot; |
| 113 | static unsigned long cpa_2m_preserved; |
Thomas Gleixner | 5c280cf | 2018-09-17 16:29:12 +0200 | [diff] [blame] | 114 | static unsigned long cpa_4k_install; |
| 115 | |
| 116 | static inline void cpa_inc_1g_checked(void) |
| 117 | { |
| 118 | cpa_1g_checked++; |
| 119 | } |
| 120 | |
| 121 | static inline void cpa_inc_2m_checked(void) |
| 122 | { |
| 123 | cpa_2m_checked++; |
| 124 | } |
| 125 | |
Thomas Gleixner | 5c280cf | 2018-09-17 16:29:12 +0200 | [diff] [blame] | 126 | static inline void cpa_inc_4k_install(void) |
| 127 | { |
| 128 | cpa_4k_install++; |
| 129 | } |
| 130 | |
| 131 | static inline void cpa_inc_lp_sameprot(int level) |
| 132 | { |
| 133 | if (level == PG_LEVEL_1G) |
| 134 | cpa_1g_sameprot++; |
| 135 | else |
| 136 | cpa_2m_sameprot++; |
| 137 | } |
| 138 | |
| 139 | static inline void cpa_inc_lp_preserved(int level) |
| 140 | { |
| 141 | if (level == PG_LEVEL_1G) |
| 142 | cpa_1g_preserved++; |
| 143 | else |
| 144 | cpa_2m_preserved++; |
| 145 | } |
| 146 | |
| 147 | static int cpastats_show(struct seq_file *m, void *p) |
| 148 | { |
| 149 | seq_printf(m, "1G pages checked: %16lu\n", cpa_1g_checked); |
| 150 | seq_printf(m, "1G pages sameprot: %16lu\n", cpa_1g_sameprot); |
| 151 | seq_printf(m, "1G pages preserved: %16lu\n", cpa_1g_preserved); |
| 152 | seq_printf(m, "2M pages checked: %16lu\n", cpa_2m_checked); |
| 153 | seq_printf(m, "2M pages sameprot: %16lu\n", cpa_2m_sameprot); |
| 154 | seq_printf(m, "2M pages preserved: %16lu\n", cpa_2m_preserved); |
Thomas Gleixner | 5c280cf | 2018-09-17 16:29:12 +0200 | [diff] [blame] | 155 | seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install); |
| 156 | return 0; |
| 157 | } |
| 158 | |
| 159 | static int cpastats_open(struct inode *inode, struct file *file) |
| 160 | { |
| 161 | return single_open(file, cpastats_show, NULL); |
| 162 | } |
| 163 | |
| 164 | static const struct file_operations cpastats_fops = { |
| 165 | .open = cpastats_open, |
| 166 | .read = seq_read, |
| 167 | .llseek = seq_lseek, |
| 168 | .release = single_release, |
| 169 | }; |
| 170 | |
| 171 | static int __init cpa_stats_init(void) |
| 172 | { |
| 173 | debugfs_create_file("cpa_stats", S_IRUSR, arch_debugfs_dir, NULL, |
| 174 | &cpastats_fops); |
| 175 | return 0; |
| 176 | } |
| 177 | late_initcall(cpa_stats_init); |
| 178 | #else |
| 179 | static inline void cpa_inc_1g_checked(void) { } |
| 180 | static inline void cpa_inc_2m_checked(void) { } |
Thomas Gleixner | 5c280cf | 2018-09-17 16:29:12 +0200 | [diff] [blame] | 181 | static inline void cpa_inc_4k_install(void) { } |
| 182 | static inline void cpa_inc_lp_sameprot(int level) { } |
| 183 | static inline void cpa_inc_lp_preserved(int level) { } |
| 184 | #endif |
| 185 | |
| 186 | |
Dave Hansen | 58e65b5 | 2018-04-20 15:20:21 -0700 | [diff] [blame] | 187 | static inline int |
| 188 | within(unsigned long addr, unsigned long start, unsigned long end) |
| 189 | { |
| 190 | return addr >= start && addr < end; |
| 191 | } |
| 192 | |
| 193 | static inline int |
| 194 | within_inclusive(unsigned long addr, unsigned long start, unsigned long end) |
| 195 | { |
| 196 | return addr >= start && addr <= end; |
| 197 | } |
| 198 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 199 | #ifdef CONFIG_X86_64 |
| 200 | |
| 201 | static inline unsigned long highmap_start_pfn(void) |
| 202 | { |
Alexander Duyck | fc8d782 | 2012-11-16 13:57:13 -0800 | [diff] [blame] | 203 | return __pa_symbol(_text) >> PAGE_SHIFT; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 204 | } |
| 205 | |
| 206 | static inline unsigned long highmap_end_pfn(void) |
| 207 | { |
Thomas Garnier | 4ff5308 | 2016-06-15 12:05:45 -0700 | [diff] [blame] | 208 | /* Do not reference physical address outside the kernel. */ |
| 209 | return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 210 | } |
| 211 | |
Dave Hansen | 58e65b5 | 2018-04-20 15:20:21 -0700 | [diff] [blame] | 212 | static bool __cpa_pfn_in_highmap(unsigned long pfn) |
| 213 | { |
| 214 | /* |
| 215 | * Kernel text has an alias mapping at a high address, known |
| 216 | * here as "highmap". |
| 217 | */ |
| 218 | return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn()); |
| 219 | } |
| 220 | |
| 221 | #else |
| 222 | |
| 223 | static bool __cpa_pfn_in_highmap(unsigned long pfn) |
| 224 | { |
| 225 | /* There is no highmap on 32-bit */ |
| 226 | return false; |
| 227 | } |
| 228 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 229 | #endif |
| 230 | |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 231 | /* |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 232 | * Flushing functions |
| 233 | */ |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 234 | |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 235 | /** |
| 236 | * clflush_cache_range - flush a cache range with clflush |
Wanpeng Li | 9efc31b | 2012-06-10 10:50:52 +0800 | [diff] [blame] | 237 | * @vaddr: virtual start address |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 238 | * @size: number of bytes to flush |
| 239 | * |
Ross Zwisler | 8b80fd8 | 2014-02-26 12:06:50 -0700 | [diff] [blame] | 240 | * clflushopt is an unordered instruction which needs fencing with mfence or |
| 241 | * sfence to avoid ordering issues. |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 242 | */ |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 243 | void clflush_cache_range(void *vaddr, unsigned int size) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 244 | { |
Chris Wilson | 1f1a89a | 2016-01-08 09:55:33 +0000 | [diff] [blame] | 245 | const unsigned long clflush_size = boot_cpu_data.x86_clflush_size; |
| 246 | void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1)); |
Ross Zwisler | 6c434d6 | 2015-05-11 10:15:49 +0200 | [diff] [blame] | 247 | void *vend = vaddr + size; |
Chris Wilson | 1f1a89a | 2016-01-08 09:55:33 +0000 | [diff] [blame] | 248 | |
| 249 | if (p >= vend) |
| 250 | return; |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 251 | |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 252 | mb(); |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 253 | |
Chris Wilson | 1f1a89a | 2016-01-08 09:55:33 +0000 | [diff] [blame] | 254 | for (; p < vend; p += clflush_size) |
Ross Zwisler | 6c434d6 | 2015-05-11 10:15:49 +0200 | [diff] [blame] | 255 | clflushopt(p); |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 256 | |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 257 | mb(); |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 258 | } |
Eric Anholt | e517a5e | 2009-09-10 17:48:48 -0700 | [diff] [blame] | 259 | EXPORT_SYMBOL_GPL(clflush_cache_range); |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 260 | |
Dan Williams | f2b6125 | 2017-05-29 23:00:34 -0700 | [diff] [blame] | 261 | void arch_invalidate_pmem(void *addr, size_t size) |
| 262 | { |
| 263 | clflush_cache_range(addr, size); |
| 264 | } |
| 265 | EXPORT_SYMBOL_GPL(arch_invalidate_pmem); |
| 266 | |
Thomas Gleixner | af1e684 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 267 | static void __cpa_flush_all(void *arg) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 268 | { |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 269 | unsigned long cache = (unsigned long)arg; |
| 270 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 271 | /* |
| 272 | * Flush all to work around Errata in early athlons regarding |
| 273 | * large page flushing. |
| 274 | */ |
| 275 | __flush_tlb_all(); |
| 276 | |
venkatesh.pallipadi@intel.com | 0b82753 | 2009-05-22 13:23:37 -0700 | [diff] [blame] | 277 | if (cache && boot_cpu_data.x86 >= 4) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 278 | wbinvd(); |
| 279 | } |
| 280 | |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 281 | static void cpa_flush_all(unsigned long cache) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 282 | { |
Dave Hansen | d2479a3 | 2018-04-20 15:20:19 -0700 | [diff] [blame] | 283 | BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 284 | |
Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 285 | on_each_cpu(__cpa_flush_all, (void *) cache, 1); |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 286 | } |
| 287 | |
Peter Zijlstra | 47e262a | 2018-09-19 10:50:23 +0200 | [diff] [blame] | 288 | static bool __cpa_flush_range(unsigned long start, int numpages, int cache) |
| 289 | { |
| 290 | BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); |
| 291 | |
| 292 | WARN_ON(PAGE_ALIGN(start) != start); |
| 293 | |
Peter Zijlstra | 7904ba8 | 2018-09-19 10:50:24 +0200 | [diff] [blame] | 294 | if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) { |
Peter Zijlstra | 47e262a | 2018-09-19 10:50:23 +0200 | [diff] [blame] | 295 | cpa_flush_all(cache); |
| 296 | return true; |
| 297 | } |
| 298 | |
| 299 | flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages); |
| 300 | |
| 301 | return !cache; |
| 302 | } |
| 303 | |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 304 | static void cpa_flush_range(unsigned long start, int numpages, int cache) |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 305 | { |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 306 | unsigned int i, level; |
| 307 | unsigned long addr; |
| 308 | |
Peter Zijlstra | 47e262a | 2018-09-19 10:50:23 +0200 | [diff] [blame] | 309 | if (__cpa_flush_range(start, numpages, cache)) |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 310 | return; |
| 311 | |
Thomas Gleixner | 3b233e5 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 312 | /* |
| 313 | * We only need to flush on one CPU, |
| 314 | * clflush is a MESI-coherent instruction that |
| 315 | * will cause all other CPUs to flush the same |
| 316 | * cachelines: |
| 317 | */ |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 318 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { |
| 319 | pte_t *pte = lookup_address(addr, &level); |
| 320 | |
| 321 | /* |
| 322 | * Only flush present addresses: |
| 323 | */ |
Thomas Gleixner | 7bfb72e | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 324 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 325 | clflush_cache_range((void *) addr, PAGE_SIZE); |
| 326 | } |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 327 | } |
| 328 | |
Peter Zijlstra | a7295fd | 2018-09-19 10:50:20 +0200 | [diff] [blame] | 329 | static void cpa_flush_array(unsigned long baddr, unsigned long *start, |
| 330 | int numpages, int cache, |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 331 | int in_flags, struct page **pages) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 332 | { |
| 333 | unsigned int i, level; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 334 | |
Peter Zijlstra | 47e262a | 2018-09-19 10:50:23 +0200 | [diff] [blame] | 335 | if (__cpa_flush_range(baddr, numpages, cache)) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 336 | return; |
| 337 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 338 | /* |
| 339 | * We only need to flush on one CPU, |
| 340 | * clflush is a MESI-coherent instruction that |
| 341 | * will cause all other CPUs to flush the same |
| 342 | * cachelines: |
| 343 | */ |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 344 | for (i = 0; i < numpages; i++) { |
| 345 | unsigned long addr; |
| 346 | pte_t *pte; |
| 347 | |
| 348 | if (in_flags & CPA_PAGES_ARRAY) |
| 349 | addr = (unsigned long)page_address(pages[i]); |
| 350 | else |
| 351 | addr = start[i]; |
| 352 | |
| 353 | pte = lookup_address(addr, &level); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 354 | |
| 355 | /* |
| 356 | * Only flush present addresses: |
| 357 | */ |
| 358 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 359 | clflush_cache_range((void *)addr, PAGE_SIZE); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 360 | } |
| 361 | } |
| 362 | |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 363 | static bool overlaps(unsigned long r1_start, unsigned long r1_end, |
| 364 | unsigned long r2_start, unsigned long r2_end) |
| 365 | { |
| 366 | return (r1_start <= r2_end && r1_end >= r2_start) || |
| 367 | (r2_start <= r1_end && r2_end >= r1_start); |
| 368 | } |
| 369 | |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 370 | #ifdef CONFIG_PCI_BIOS |
| 371 | /* |
| 372 | * The BIOS area between 640k and 1Mb needs to be executable for PCI BIOS |
| 373 | * based config access (CONFIG_PCI_GOBIOS) support. |
| 374 | */ |
| 375 | #define BIOS_PFN PFN_DOWN(BIOS_BEGIN) |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 376 | #define BIOS_PFN_END PFN_DOWN(BIOS_END - 1) |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 377 | |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 378 | static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn) |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 379 | { |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 380 | if (pcibios_enabled && overlaps(spfn, epfn, BIOS_PFN, BIOS_PFN_END)) |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 381 | return _PAGE_NX; |
| 382 | return 0; |
| 383 | } |
| 384 | #else |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 385 | static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn) |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 386 | { |
| 387 | return 0; |
| 388 | } |
| 389 | #endif |
| 390 | |
| 391 | /* |
| 392 | * The .rodata section needs to be read-only. Using the pfn catches all |
| 393 | * aliases. This also includes __ro_after_init, so do not enforce until |
| 394 | * kernel_set_to_readonly is true. |
| 395 | */ |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 396 | static pgprotval_t protect_rodata(unsigned long spfn, unsigned long epfn) |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 397 | { |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 398 | unsigned long epfn_ro, spfn_ro = PFN_DOWN(__pa_symbol(__start_rodata)); |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 399 | |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 400 | /* |
| 401 | * Note: __end_rodata is at page aligned and not inclusive, so |
| 402 | * subtract 1 to get the last enforced PFN in the rodata area. |
| 403 | */ |
| 404 | epfn_ro = PFN_DOWN(__pa_symbol(__end_rodata)) - 1; |
| 405 | |
| 406 | if (kernel_set_to_readonly && overlaps(spfn, epfn, spfn_ro, epfn_ro)) |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 407 | return _PAGE_RW; |
| 408 | return 0; |
| 409 | } |
| 410 | |
| 411 | /* |
| 412 | * Protect kernel text against becoming non executable by forbidding |
| 413 | * _PAGE_NX. This protects only the high kernel mapping (_text -> _etext) |
| 414 | * out of which the kernel actually executes. Do not protect the low |
| 415 | * mapping. |
| 416 | * |
| 417 | * This does not cover __inittext since that is gone after boot. |
| 418 | */ |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 419 | static pgprotval_t protect_kernel_text(unsigned long start, unsigned long end) |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 420 | { |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 421 | unsigned long t_end = (unsigned long)_etext - 1; |
| 422 | unsigned long t_start = (unsigned long)_text; |
| 423 | |
| 424 | if (overlaps(start, end, t_start, t_end)) |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 425 | return _PAGE_NX; |
| 426 | return 0; |
| 427 | } |
| 428 | |
| 429 | #if defined(CONFIG_X86_64) |
| 430 | /* |
| 431 | * Once the kernel maps the text as RO (kernel_set_to_readonly is set), |
| 432 | * kernel text mappings for the large page aligned text, rodata sections |
| 433 | * will be always read-only. For the kernel identity mappings covering the |
| 434 | * holes caused by this alignment can be anything that user asks. |
| 435 | * |
| 436 | * This will preserve the large page mappings for kernel text/data at no |
| 437 | * extra cost. |
| 438 | */ |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 439 | static pgprotval_t protect_kernel_text_ro(unsigned long start, |
| 440 | unsigned long end) |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 441 | { |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 442 | unsigned long t_end = (unsigned long)__end_rodata_hpage_align - 1; |
| 443 | unsigned long t_start = (unsigned long)_text; |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 444 | unsigned int level; |
| 445 | |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 446 | if (!kernel_set_to_readonly || !overlaps(start, end, t_start, t_end)) |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 447 | return 0; |
| 448 | /* |
| 449 | * Don't enforce the !RW mapping for the kernel text mapping, if |
| 450 | * the current mapping is already using small page mapping. No |
| 451 | * need to work hard to preserve large page mappings in this case. |
| 452 | * |
| 453 | * This also fixes the Linux Xen paravirt guest boot failure caused |
| 454 | * by unexpected read-only mappings for kernel identity |
| 455 | * mappings. In this paravirt guest case, the kernel text mapping |
| 456 | * and the kernel identity mapping share the same page-table pages, |
| 457 | * so the protections for kernel text and identity mappings have to |
| 458 | * be the same. |
| 459 | */ |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 460 | if (lookup_address(start, &level) && (level != PG_LEVEL_4K)) |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 461 | return _PAGE_RW; |
| 462 | return 0; |
| 463 | } |
| 464 | #else |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 465 | static pgprotval_t protect_kernel_text_ro(unsigned long start, |
| 466 | unsigned long end) |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 467 | { |
| 468 | return 0; |
| 469 | } |
| 470 | #endif |
| 471 | |
Thomas Gleixner | 4046460 | 2018-09-17 16:29:11 +0200 | [diff] [blame] | 472 | static inline bool conflicts(pgprot_t prot, pgprotval_t val) |
| 473 | { |
| 474 | return (pgprot_val(prot) & ~val) != pgprot_val(prot); |
| 475 | } |
| 476 | |
| 477 | static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val, |
| 478 | unsigned long start, unsigned long end, |
| 479 | unsigned long pfn, const char *txt) |
| 480 | { |
| 481 | static const char *lvltxt[] = { |
Thomas Gleixner | f61c5ba | 2018-09-17 16:29:14 +0200 | [diff] [blame] | 482 | [CPA_CONFLICT] = "conflict", |
Thomas Gleixner | 4046460 | 2018-09-17 16:29:11 +0200 | [diff] [blame] | 483 | [CPA_PROTECT] = "protect", |
| 484 | [CPA_DETECT] = "detect", |
| 485 | }; |
| 486 | |
| 487 | if (warnlvl > cpa_warn_level || !conflicts(prot, val)) |
| 488 | return; |
| 489 | |
| 490 | pr_warn("CPA %8s %10s: 0x%016lx - 0x%016lx PFN %lx req %016llx prevent %016llx\n", |
| 491 | lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot), |
| 492 | (unsigned long long)val); |
| 493 | } |
| 494 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 495 | /* |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 496 | * Certain areas of memory on x86 require very specific protection flags, |
| 497 | * for example the BIOS area or kernel text. Callers don't always get this |
| 498 | * right (again, ioremap() on BIOS memory is not uncommon) so this function |
| 499 | * checks and fixes these known static required protection bits. |
| 500 | */ |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 501 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long start, |
Thomas Gleixner | 4046460 | 2018-09-17 16:29:11 +0200 | [diff] [blame] | 502 | unsigned long pfn, unsigned long npg, |
| 503 | int warnlvl) |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 504 | { |
Thomas Gleixner | 4046460 | 2018-09-17 16:29:11 +0200 | [diff] [blame] | 505 | pgprotval_t forbidden, res; |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 506 | unsigned long end; |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 507 | |
Thomas Gleixner | 69c31e6 | 2018-09-17 16:29:13 +0200 | [diff] [blame] | 508 | /* |
| 509 | * There is no point in checking RW/NX conflicts when the requested |
| 510 | * mapping is setting the page !PRESENT. |
| 511 | */ |
| 512 | if (!(pgprot_val(prot) & _PAGE_PRESENT)) |
| 513 | return prot; |
| 514 | |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 515 | /* Operate on the virtual address */ |
Thomas Gleixner | 91ee8f5 | 2018-09-17 16:29:10 +0200 | [diff] [blame] | 516 | end = start + npg * PAGE_SIZE - 1; |
Thomas Gleixner | 4046460 | 2018-09-17 16:29:11 +0200 | [diff] [blame] | 517 | |
| 518 | res = protect_kernel_text(start, end); |
| 519 | check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX"); |
| 520 | forbidden = res; |
| 521 | |
| 522 | res = protect_kernel_text_ro(start, end); |
| 523 | check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO"); |
| 524 | forbidden |= res; |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 525 | |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 526 | /* Check the PFN directly */ |
Thomas Gleixner | 4046460 | 2018-09-17 16:29:11 +0200 | [diff] [blame] | 527 | res = protect_pci_bios(pfn, pfn + npg - 1); |
| 528 | check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX"); |
| 529 | forbidden |= res; |
| 530 | |
| 531 | res = protect_rodata(pfn, pfn + npg - 1); |
| 532 | check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO"); |
| 533 | forbidden |= res; |
Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 534 | |
Thomas Gleixner | afd7969 | 2018-09-17 16:29:09 +0200 | [diff] [blame] | 535 | return __pgprot(pgprot_val(prot) & ~forbidden); |
Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 536 | } |
| 537 | |
Matt Fleming | 426e34c | 2013-12-06 21:13:04 +0000 | [diff] [blame] | 538 | /* |
| 539 | * Lookup the page table entry for a virtual address in a specific pgd. |
| 540 | * Return a pointer to the entry and the level of the mapping. |
| 541 | */ |
| 542 | pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, |
| 543 | unsigned int *level) |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 544 | { |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 545 | p4d_t *p4d; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | pud_t *pud; |
| 547 | pmd_t *pmd; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 548 | |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 549 | *level = PG_LEVEL_NONE; |
| 550 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | if (pgd_none(*pgd)) |
| 552 | return NULL; |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 553 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 554 | p4d = p4d_offset(pgd, address); |
| 555 | if (p4d_none(*p4d)) |
| 556 | return NULL; |
| 557 | |
| 558 | *level = PG_LEVEL_512G; |
| 559 | if (p4d_large(*p4d) || !p4d_present(*p4d)) |
| 560 | return (pte_t *)p4d; |
| 561 | |
| 562 | pud = pud_offset(p4d, address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | if (pud_none(*pud)) |
| 564 | return NULL; |
Andi Kleen | c2f71ee | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 565 | |
| 566 | *level = PG_LEVEL_1G; |
| 567 | if (pud_large(*pud) || !pud_present(*pud)) |
| 568 | return (pte_t *)pud; |
| 569 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | pmd = pmd_offset(pud, address); |
| 571 | if (pmd_none(*pmd)) |
| 572 | return NULL; |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 573 | |
| 574 | *level = PG_LEVEL_2M; |
Thomas Gleixner | 9a14aef | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 575 | if (pmd_large(*pmd) || !pmd_present(*pmd)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | return (pte_t *)pmd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 578 | *level = PG_LEVEL_4K; |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 579 | |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 580 | return pte_offset_kernel(pmd, address); |
| 581 | } |
Borislav Petkov | 0fd64c2 | 2013-10-31 17:25:00 +0100 | [diff] [blame] | 582 | |
| 583 | /* |
| 584 | * Lookup the page table entry for a virtual address. Return a pointer |
| 585 | * to the entry and the level of the mapping. |
| 586 | * |
| 587 | * Note: We return pud and pmd either when the entry is marked large |
| 588 | * or when the present bit is not set. Otherwise we would return a |
| 589 | * pointer to a nonexisting mapping. |
| 590 | */ |
| 591 | pte_t *lookup_address(unsigned long address, unsigned int *level) |
| 592 | { |
Thomas Gleixner | 8679de0 | 2018-09-17 16:29:08 +0200 | [diff] [blame] | 593 | return lookup_address_in_pgd(pgd_offset_k(address), address, level); |
Borislav Petkov | 0fd64c2 | 2013-10-31 17:25:00 +0100 | [diff] [blame] | 594 | } |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 595 | EXPORT_SYMBOL_GPL(lookup_address); |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 596 | |
Borislav Petkov | 0fd64c2 | 2013-10-31 17:25:00 +0100 | [diff] [blame] | 597 | static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, |
| 598 | unsigned int *level) |
| 599 | { |
Thomas Gleixner | 8679de0 | 2018-09-17 16:29:08 +0200 | [diff] [blame] | 600 | if (cpa->pgd) |
Matt Fleming | 426e34c | 2013-12-06 21:13:04 +0000 | [diff] [blame] | 601 | return lookup_address_in_pgd(cpa->pgd + pgd_index(address), |
Borislav Petkov | 0fd64c2 | 2013-10-31 17:25:00 +0100 | [diff] [blame] | 602 | address, level); |
| 603 | |
Thomas Gleixner | 8679de0 | 2018-09-17 16:29:08 +0200 | [diff] [blame] | 604 | return lookup_address(address, level); |
Borislav Petkov | 0fd64c2 | 2013-10-31 17:25:00 +0100 | [diff] [blame] | 605 | } |
| 606 | |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 607 | /* |
Juergen Gross | 792230c | 2014-11-28 11:53:56 +0100 | [diff] [blame] | 608 | * Lookup the PMD entry for a virtual address. Return a pointer to the entry |
| 609 | * or NULL if not present. |
| 610 | */ |
| 611 | pmd_t *lookup_pmd_address(unsigned long address) |
| 612 | { |
| 613 | pgd_t *pgd; |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 614 | p4d_t *p4d; |
Juergen Gross | 792230c | 2014-11-28 11:53:56 +0100 | [diff] [blame] | 615 | pud_t *pud; |
| 616 | |
| 617 | pgd = pgd_offset_k(address); |
| 618 | if (pgd_none(*pgd)) |
| 619 | return NULL; |
| 620 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 621 | p4d = p4d_offset(pgd, address); |
| 622 | if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d)) |
| 623 | return NULL; |
| 624 | |
| 625 | pud = pud_offset(p4d, address); |
Juergen Gross | 792230c | 2014-11-28 11:53:56 +0100 | [diff] [blame] | 626 | if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud)) |
| 627 | return NULL; |
| 628 | |
| 629 | return pmd_offset(pud, address); |
| 630 | } |
| 631 | |
| 632 | /* |
Dave Hansen | d765653 | 2013-01-22 13:24:33 -0800 | [diff] [blame] | 633 | * This is necessary because __pa() does not work on some |
| 634 | * kinds of memory, like vmalloc() or the alloc_remap() |
| 635 | * areas on 32-bit NUMA systems. The percpu areas can |
| 636 | * end up in this kind of memory, for instance. |
| 637 | * |
| 638 | * This could be optimized, but it is only intended to be |
| 639 | * used at inititalization time, and keeping it |
| 640 | * unoptimized should increase the testing coverage for |
| 641 | * the more obscure platforms. |
| 642 | */ |
| 643 | phys_addr_t slow_virt_to_phys(void *__virt_addr) |
| 644 | { |
| 645 | unsigned long virt_addr = (unsigned long)__virt_addr; |
Dexuan Cui | bf70e55 | 2016-02-25 01:58:12 -0800 | [diff] [blame] | 646 | phys_addr_t phys_addr; |
| 647 | unsigned long offset; |
Dave Hansen | d765653 | 2013-01-22 13:24:33 -0800 | [diff] [blame] | 648 | enum pg_level level; |
Dave Hansen | d765653 | 2013-01-22 13:24:33 -0800 | [diff] [blame] | 649 | pte_t *pte; |
| 650 | |
| 651 | pte = lookup_address(virt_addr, &level); |
| 652 | BUG_ON(!pte); |
Toshi Kani | 34437e6 | 2015-09-17 12:24:20 -0600 | [diff] [blame] | 653 | |
Dexuan Cui | bf70e55 | 2016-02-25 01:58:12 -0800 | [diff] [blame] | 654 | /* |
| 655 | * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t |
| 656 | * before being left-shifted PAGE_SHIFT bits -- this trick is to |
| 657 | * make 32-PAE kernel work correctly. |
| 658 | */ |
Toshi Kani | 34437e6 | 2015-09-17 12:24:20 -0600 | [diff] [blame] | 659 | switch (level) { |
| 660 | case PG_LEVEL_1G: |
Dexuan Cui | bf70e55 | 2016-02-25 01:58:12 -0800 | [diff] [blame] | 661 | phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; |
Toshi Kani | 34437e6 | 2015-09-17 12:24:20 -0600 | [diff] [blame] | 662 | offset = virt_addr & ~PUD_PAGE_MASK; |
| 663 | break; |
| 664 | case PG_LEVEL_2M: |
Dexuan Cui | bf70e55 | 2016-02-25 01:58:12 -0800 | [diff] [blame] | 665 | phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; |
Toshi Kani | 34437e6 | 2015-09-17 12:24:20 -0600 | [diff] [blame] | 666 | offset = virt_addr & ~PMD_PAGE_MASK; |
| 667 | break; |
| 668 | default: |
Dexuan Cui | bf70e55 | 2016-02-25 01:58:12 -0800 | [diff] [blame] | 669 | phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; |
Toshi Kani | 34437e6 | 2015-09-17 12:24:20 -0600 | [diff] [blame] | 670 | offset = virt_addr & ~PAGE_MASK; |
| 671 | } |
| 672 | |
| 673 | return (phys_addr_t)(phys_addr | offset); |
Dave Hansen | d765653 | 2013-01-22 13:24:33 -0800 | [diff] [blame] | 674 | } |
| 675 | EXPORT_SYMBOL_GPL(slow_virt_to_phys); |
| 676 | |
| 677 | /* |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 678 | * Set the new pmd in all the pgds we know about: |
| 679 | */ |
Ingo Molnar | 9a3dc78 | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 680 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 681 | { |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 682 | /* change init_mm */ |
| 683 | set_pte_atomic(kpte, pte); |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 684 | #ifdef CONFIG_X86_32 |
Ingo Molnar | e4b71dc | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 685 | if (!SHARED_KERNEL_PMD) { |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 686 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | |
Jeremy Fitzhardinge | e3ed910 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 688 | list_for_each_entry(page, &pgd_list, lru) { |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 689 | pgd_t *pgd; |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 690 | p4d_t *p4d; |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 691 | pud_t *pud; |
| 692 | pmd_t *pmd; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 693 | |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 694 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 695 | p4d = p4d_offset(pgd, address); |
| 696 | pud = pud_offset(p4d, address); |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 697 | pmd = pmd_offset(pud, address); |
| 698 | set_pte_atomic((pte_t *)pmd, pte); |
| 699 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 | } |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 701 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 702 | } |
| 703 | |
Dave Hansen | d1440b2 | 2018-04-06 13:55:02 -0700 | [diff] [blame] | 704 | static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot) |
| 705 | { |
| 706 | /* |
| 707 | * _PAGE_GLOBAL means "global page" for present PTEs. |
| 708 | * But, it is also used to indicate _PAGE_PROTNONE |
| 709 | * for non-present PTEs. |
| 710 | * |
| 711 | * This ensures that a _PAGE_GLOBAL PTE going from |
| 712 | * present to non-present is not confused as |
| 713 | * _PAGE_PROTNONE. |
| 714 | */ |
| 715 | if (!(pgprot_val(prot) & _PAGE_PRESENT)) |
| 716 | pgprot_val(prot) &= ~_PAGE_GLOBAL; |
| 717 | |
| 718 | return prot; |
| 719 | } |
| 720 | |
Thomas Gleixner | 8679de0 | 2018-09-17 16:29:08 +0200 | [diff] [blame] | 721 | static int __should_split_large_page(pte_t *kpte, unsigned long address, |
| 722 | struct cpa_data *cpa) |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 723 | { |
Thomas Gleixner | 585948f4 | 2018-09-17 16:29:17 +0200 | [diff] [blame] | 724 | unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn; |
Thomas Gleixner | f61c5ba | 2018-09-17 16:29:14 +0200 | [diff] [blame] | 725 | pgprot_t old_prot, new_prot, req_prot, chk_prot; |
Thomas Gleixner | 8679de0 | 2018-09-17 16:29:08 +0200 | [diff] [blame] | 726 | pte_t new_pte, old_pte, *tmp; |
Dave Hansen | f3c4fbb | 2013-01-22 13:24:32 -0800 | [diff] [blame] | 727 | enum pg_level level; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 728 | |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 729 | /* |
| 730 | * Check for races, another CPU might have split this page |
| 731 | * up already: |
| 732 | */ |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 733 | tmp = _lookup_address_cpa(cpa, address, &level); |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 734 | if (tmp != kpte) |
Thomas Gleixner | 8679de0 | 2018-09-17 16:29:08 +0200 | [diff] [blame] | 735 | return 1; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 736 | |
| 737 | switch (level) { |
| 738 | case PG_LEVEL_2M: |
Toshi Kani | 3a19109 | 2015-09-17 12:24:22 -0600 | [diff] [blame] | 739 | old_prot = pmd_pgprot(*(pmd_t *)kpte); |
| 740 | old_pfn = pmd_pfn(*(pmd_t *)kpte); |
Thomas Gleixner | 5c280cf | 2018-09-17 16:29:12 +0200 | [diff] [blame] | 741 | cpa_inc_2m_checked(); |
Toshi Kani | 3a19109 | 2015-09-17 12:24:22 -0600 | [diff] [blame] | 742 | break; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 743 | case PG_LEVEL_1G: |
Toshi Kani | 3a19109 | 2015-09-17 12:24:22 -0600 | [diff] [blame] | 744 | old_prot = pud_pgprot(*(pud_t *)kpte); |
| 745 | old_pfn = pud_pfn(*(pud_t *)kpte); |
Thomas Gleixner | 5c280cf | 2018-09-17 16:29:12 +0200 | [diff] [blame] | 746 | cpa_inc_1g_checked(); |
Dave Hansen | f3c4fbb | 2013-01-22 13:24:32 -0800 | [diff] [blame] | 747 | break; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 748 | default: |
Thomas Gleixner | 8679de0 | 2018-09-17 16:29:08 +0200 | [diff] [blame] | 749 | return -EINVAL; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 750 | } |
| 751 | |
Toshi Kani | 3a19109 | 2015-09-17 12:24:22 -0600 | [diff] [blame] | 752 | psize = page_level_size(level); |
| 753 | pmask = page_level_mask(level); |
| 754 | |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 755 | /* |
| 756 | * Calculate the number of pages, which fit into this large |
| 757 | * page starting at address: |
| 758 | */ |
Thomas Gleixner | 8679de0 | 2018-09-17 16:29:08 +0200 | [diff] [blame] | 759 | lpaddr = (address + psize) & pmask; |
| 760 | numpages = (lpaddr - address) >> PAGE_SHIFT; |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 761 | if (numpages < cpa->numpages) |
| 762 | cpa->numpages = numpages; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 763 | |
| 764 | /* |
| 765 | * We are safe now. Check whether the new pgprot is the same: |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 766 | * Convert protection attributes to 4k-format, as cpa->mask* are set |
| 767 | * up accordingly. |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 768 | */ |
| 769 | old_pte = *kpte; |
Dave Hansen | 606c719 | 2018-04-06 13:55:04 -0700 | [diff] [blame] | 770 | /* Clear PSE (aka _PAGE_PAT) and move PAT bit to correct position */ |
Toshi Kani | 55696b1 | 2015-09-17 12:24:24 -0600 | [diff] [blame] | 771 | req_prot = pgprot_large_2_4k(old_prot); |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 772 | |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 773 | pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); |
| 774 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 775 | |
| 776 | /* |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 777 | * req_prot is in format of 4k pages. It must be converted to large |
| 778 | * page format: the caching mode includes the PAT bit located at |
| 779 | * different bit positions in the two formats. |
| 780 | */ |
| 781 | req_prot = pgprot_4k_2_large(req_prot); |
Dave Hansen | d1440b2 | 2018-04-06 13:55:02 -0700 | [diff] [blame] | 782 | req_prot = pgprot_clear_protnone_bits(req_prot); |
Andrea Arcangeli | f76cfa3 | 2013-04-10 15:28:25 +0200 | [diff] [blame] | 783 | if (pgprot_val(req_prot) & _PAGE_PRESENT) |
Dave Hansen | d1440b2 | 2018-04-06 13:55:02 -0700 | [diff] [blame] | 784 | pgprot_val(req_prot) |= _PAGE_PSE; |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 785 | |
| 786 | /* |
Thomas Gleixner | 8679de0 | 2018-09-17 16:29:08 +0200 | [diff] [blame] | 787 | * old_pfn points to the large page base pfn. So we need to add the |
| 788 | * offset of the virtual address: |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 789 | */ |
Toshi Kani | 3a19109 | 2015-09-17 12:24:22 -0600 | [diff] [blame] | 790 | pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 791 | cpa->pfn = pfn; |
| 792 | |
Thomas Gleixner | 8679de0 | 2018-09-17 16:29:08 +0200 | [diff] [blame] | 793 | /* |
| 794 | * Calculate the large page base address and the number of 4K pages |
| 795 | * in the large page |
| 796 | */ |
| 797 | lpaddr = address & pmask; |
| 798 | numpages = psize >> PAGE_SHIFT; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 799 | |
| 800 | /* |
Thomas Gleixner | f61c5ba | 2018-09-17 16:29:14 +0200 | [diff] [blame] | 801 | * Sanity check that the existing mapping is correct versus the static |
| 802 | * protections. static_protections() guards against !PRESENT, so no |
| 803 | * extra conditional required here. |
| 804 | */ |
| 805 | chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages, |
| 806 | CPA_CONFLICT); |
| 807 | |
| 808 | if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) { |
| 809 | /* |
| 810 | * Split the large page and tell the split code to |
| 811 | * enforce static protections. |
| 812 | */ |
| 813 | cpa->force_static_prot = 1; |
| 814 | return 1; |
| 815 | } |
| 816 | |
| 817 | /* |
Thomas Gleixner | 1c4b406 | 2018-09-17 16:29:15 +0200 | [diff] [blame] | 818 | * Optimization: If the requested pgprot is the same as the current |
| 819 | * pgprot, then the large page can be preserved and no updates are |
| 820 | * required independent of alignment and length of the requested |
| 821 | * range. The above already established that the current pgprot is |
| 822 | * correct, which in consequence makes the requested pgprot correct |
| 823 | * as well if it is the same. The static protection scan below will |
| 824 | * not come to a different conclusion. |
| 825 | */ |
| 826 | if (pgprot_val(req_prot) == pgprot_val(old_prot)) { |
| 827 | cpa_inc_lp_sameprot(level); |
| 828 | return 0; |
| 829 | } |
| 830 | |
| 831 | /* |
Thomas Gleixner | 585948f4 | 2018-09-17 16:29:17 +0200 | [diff] [blame] | 832 | * If the requested range does not cover the full page, split it up |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 833 | */ |
Thomas Gleixner | 8679de0 | 2018-09-17 16:29:08 +0200 | [diff] [blame] | 834 | if (address != lpaddr || cpa->numpages != numpages) |
| 835 | return 1; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 836 | |
Thomas Gleixner | 585948f4 | 2018-09-17 16:29:17 +0200 | [diff] [blame] | 837 | /* |
| 838 | * Check whether the requested pgprot is conflicting with a static |
| 839 | * protection requirement in the large page. |
| 840 | */ |
| 841 | new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages, |
| 842 | CPA_DETECT); |
| 843 | |
| 844 | /* |
| 845 | * If there is a conflict, split the large page. |
| 846 | * |
| 847 | * There used to be a 4k wise evaluation trying really hard to |
| 848 | * preserve the large pages, but experimentation has shown, that this |
| 849 | * does not help at all. There might be corner cases which would |
| 850 | * preserve one large page occasionally, but it's really not worth the |
| 851 | * extra code and cycles for the common case. |
| 852 | */ |
| 853 | if (pgprot_val(req_prot) != pgprot_val(new_prot)) |
| 854 | return 1; |
| 855 | |
Thomas Gleixner | 8679de0 | 2018-09-17 16:29:08 +0200 | [diff] [blame] | 856 | /* All checks passed. Update the large page mapping. */ |
| 857 | new_pte = pfn_pte(old_pfn, new_prot); |
| 858 | __set_pmd_pte(kpte, address, new_pte); |
| 859 | cpa->flags |= CPA_FLUSHTLB; |
Thomas Gleixner | 5c280cf | 2018-09-17 16:29:12 +0200 | [diff] [blame] | 860 | cpa_inc_lp_preserved(level); |
Thomas Gleixner | 8679de0 | 2018-09-17 16:29:08 +0200 | [diff] [blame] | 861 | return 0; |
| 862 | } |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 863 | |
Thomas Gleixner | 8679de0 | 2018-09-17 16:29:08 +0200 | [diff] [blame] | 864 | static int should_split_large_page(pte_t *kpte, unsigned long address, |
| 865 | struct cpa_data *cpa) |
| 866 | { |
| 867 | int do_split; |
| 868 | |
| 869 | if (cpa->force_split) |
| 870 | return 1; |
| 871 | |
| 872 | spin_lock(&pgd_lock); |
| 873 | do_split = __should_split_large_page(kpte, address, cpa); |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 874 | spin_unlock(&pgd_lock); |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 875 | |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 876 | return do_split; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 877 | } |
| 878 | |
Thomas Gleixner | f61c5ba | 2018-09-17 16:29:14 +0200 | [diff] [blame] | 879 | static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn, |
| 880 | pgprot_t ref_prot, unsigned long address, |
| 881 | unsigned long size) |
| 882 | { |
| 883 | unsigned int npg = PFN_DOWN(size); |
| 884 | pgprot_t prot; |
| 885 | |
| 886 | /* |
| 887 | * If should_split_large_page() discovered an inconsistent mapping, |
| 888 | * remove the invalid protection in the split mapping. |
| 889 | */ |
| 890 | if (!cpa->force_static_prot) |
| 891 | goto set; |
| 892 | |
| 893 | prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT); |
| 894 | |
| 895 | if (pgprot_val(prot) == pgprot_val(ref_prot)) |
| 896 | goto set; |
| 897 | |
| 898 | /* |
| 899 | * If this is splitting a PMD, fix it up. PUD splits cannot be |
| 900 | * fixed trivially as that would require to rescan the newly |
| 901 | * installed PMD mappings after returning from split_large_page() |
| 902 | * so an eventual further split can allocate the necessary PTE |
| 903 | * pages. Warn for now and revisit it in case this actually |
| 904 | * happens. |
| 905 | */ |
| 906 | if (size == PAGE_SIZE) |
| 907 | ref_prot = prot; |
| 908 | else |
| 909 | pr_warn_once("CPA: Cannot fixup static protections for PUD split\n"); |
| 910 | set: |
| 911 | set_pte(pte, pfn_pte(pfn, ref_prot)); |
| 912 | } |
| 913 | |
Borislav Petkov | 5952886 | 2013-03-21 18:16:57 +0100 | [diff] [blame] | 914 | static int |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 915 | __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, |
| 916 | struct page *base) |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 917 | { |
Thomas Gleixner | f61c5ba | 2018-09-17 16:29:14 +0200 | [diff] [blame] | 918 | unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1; |
Borislav Petkov | 5952886 | 2013-03-21 18:16:57 +0100 | [diff] [blame] | 919 | pte_t *pbase = (pte_t *)page_address(base); |
Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 920 | unsigned int i, level; |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 921 | pgprot_t ref_prot; |
Thomas Gleixner | f61c5ba | 2018-09-17 16:29:14 +0200 | [diff] [blame] | 922 | pte_t *tmp; |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 923 | |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 924 | spin_lock(&pgd_lock); |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 925 | /* |
| 926 | * Check for races, another CPU might have split this page |
| 927 | * up for us already: |
| 928 | */ |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 929 | tmp = _lookup_address_cpa(cpa, address, &level); |
Wen Congyang | ae9aae9 | 2013-02-22 16:33:04 -0800 | [diff] [blame] | 930 | if (tmp != kpte) { |
| 931 | spin_unlock(&pgd_lock); |
| 932 | return 1; |
| 933 | } |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 934 | |
Jeremy Fitzhardinge | 6944a9c | 2008-03-17 16:37:01 -0700 | [diff] [blame] | 935 | paravirt_alloc_pte(&init_mm, page_to_pfn(base)); |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 936 | |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 937 | switch (level) { |
| 938 | case PG_LEVEL_2M: |
| 939 | ref_prot = pmd_pgprot(*(pmd_t *)kpte); |
Dave Hansen | 606c719 | 2018-04-06 13:55:04 -0700 | [diff] [blame] | 940 | /* |
| 941 | * Clear PSE (aka _PAGE_PAT) and move |
| 942 | * PAT bit to correct position. |
| 943 | */ |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 944 | ref_prot = pgprot_large_2_4k(ref_prot); |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 945 | ref_pfn = pmd_pfn(*(pmd_t *)kpte); |
Thomas Gleixner | f61c5ba | 2018-09-17 16:29:14 +0200 | [diff] [blame] | 946 | lpaddr = address & PMD_MASK; |
| 947 | lpinc = PAGE_SIZE; |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 948 | break; |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 949 | |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 950 | case PG_LEVEL_1G: |
| 951 | ref_prot = pud_pgprot(*(pud_t *)kpte); |
| 952 | ref_pfn = pud_pfn(*(pud_t *)kpte); |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 953 | pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; |
Thomas Gleixner | f61c5ba | 2018-09-17 16:29:14 +0200 | [diff] [blame] | 954 | lpaddr = address & PUD_MASK; |
| 955 | lpinc = PMD_SIZE; |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 956 | /* |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 957 | * Clear the PSE flags if the PRESENT flag is not set |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 958 | * otherwise pmd_present/pmd_huge will return true |
| 959 | * even on a non present pmd. |
| 960 | */ |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 961 | if (!(pgprot_val(ref_prot) & _PAGE_PRESENT)) |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 962 | pgprot_val(ref_prot) &= ~_PAGE_PSE; |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 963 | break; |
| 964 | |
| 965 | default: |
| 966 | spin_unlock(&pgd_lock); |
| 967 | return 1; |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 968 | } |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 969 | |
Dave Hansen | d1440b2 | 2018-04-06 13:55:02 -0700 | [diff] [blame] | 970 | ref_prot = pgprot_clear_protnone_bits(ref_prot); |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 971 | |
| 972 | /* |
Thomas Gleixner | 63c1dcf | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 973 | * Get the target pfn from the original entry: |
| 974 | */ |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 975 | pfn = ref_pfn; |
Thomas Gleixner | f61c5ba | 2018-09-17 16:29:14 +0200 | [diff] [blame] | 976 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc) |
| 977 | split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc); |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 978 | |
Sai Praneeth | 2c66e24d | 2015-10-16 16:20:27 -0700 | [diff] [blame] | 979 | if (virt_addr_valid(address)) { |
| 980 | unsigned long pfn = PFN_DOWN(__pa(address)); |
| 981 | |
| 982 | if (pfn_range_is_mapped(pfn, pfn + 1)) |
| 983 | split_page_count(level); |
| 984 | } |
Yinghai Lu | f361a45 | 2008-07-10 20:38:26 -0700 | [diff] [blame] | 985 | |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 986 | /* |
Ingo Molnar | 07a66d7 | 2009-02-20 08:04:13 +0100 | [diff] [blame] | 987 | * Install the new, split up pagetable. |
Huang, Ying | 4c881ca | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 988 | * |
Ingo Molnar | 07a66d7 | 2009-02-20 08:04:13 +0100 | [diff] [blame] | 989 | * We use the standard kernel pagetable protections for the new |
| 990 | * pagetable protections, the actual ptes set above control the |
| 991 | * primary protection behavior: |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 992 | */ |
Ingo Molnar | 07a66d7 | 2009-02-20 08:04:13 +0100 | [diff] [blame] | 993 | __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); |
Ingo Molnar | 211b3d0 | 2009-03-10 22:31:03 +0100 | [diff] [blame] | 994 | |
| 995 | /* |
Peter Zijlstra | c0a759a | 2018-09-19 10:50:18 +0200 | [diff] [blame] | 996 | * Do a global flush tlb after splitting the large page |
| 997 | * and before we do the actual change page attribute in the PTE. |
Ingo Molnar | 211b3d0 | 2009-03-10 22:31:03 +0100 | [diff] [blame] | 998 | * |
Peter Zijlstra | c0a759a | 2018-09-19 10:50:18 +0200 | [diff] [blame] | 999 | * Without this, we violate the TLB application note, that says: |
| 1000 | * "The TLBs may contain both ordinary and large-page |
| 1001 | * translations for a 4-KByte range of linear addresses. This |
| 1002 | * may occur if software modifies the paging structures so that |
| 1003 | * the page size used for the address range changes. If the two |
| 1004 | * translations differ with respect to page frame or attributes |
| 1005 | * (e.g., permissions), processor behavior is undefined and may |
| 1006 | * be implementation-specific." |
| 1007 | * |
| 1008 | * We do this global tlb flush inside the cpa_lock, so that we |
| 1009 | * don't allow any other cpu, with stale tlb entries change the |
| 1010 | * page attribute in parallel, that also falls into the |
| 1011 | * just split large page entry. |
Ingo Molnar | 211b3d0 | 2009-03-10 22:31:03 +0100 | [diff] [blame] | 1012 | */ |
Peter Zijlstra | c0a759a | 2018-09-19 10:50:18 +0200 | [diff] [blame] | 1013 | flush_tlb_all(); |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 1014 | spin_unlock(&pgd_lock); |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 1015 | |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 1016 | return 0; |
| 1017 | } |
| 1018 | |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 1019 | static int split_large_page(struct cpa_data *cpa, pte_t *kpte, |
| 1020 | unsigned long address) |
Wen Congyang | ae9aae9 | 2013-02-22 16:33:04 -0800 | [diff] [blame] | 1021 | { |
Wen Congyang | ae9aae9 | 2013-02-22 16:33:04 -0800 | [diff] [blame] | 1022 | struct page *base; |
| 1023 | |
Christian Borntraeger | 288cf3c | 2016-03-15 14:57:33 -0700 | [diff] [blame] | 1024 | if (!debug_pagealloc_enabled()) |
Wen Congyang | ae9aae9 | 2013-02-22 16:33:04 -0800 | [diff] [blame] | 1025 | spin_unlock(&cpa_lock); |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 1026 | base = alloc_pages(GFP_KERNEL, 0); |
Christian Borntraeger | 288cf3c | 2016-03-15 14:57:33 -0700 | [diff] [blame] | 1027 | if (!debug_pagealloc_enabled()) |
Wen Congyang | ae9aae9 | 2013-02-22 16:33:04 -0800 | [diff] [blame] | 1028 | spin_lock(&cpa_lock); |
| 1029 | if (!base) |
| 1030 | return -ENOMEM; |
| 1031 | |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 1032 | if (__split_large_page(cpa, kpte, address, base)) |
Wen Congyang | ae9aae9 | 2013-02-22 16:33:04 -0800 | [diff] [blame] | 1033 | __free_page(base); |
| 1034 | |
| 1035 | return 0; |
| 1036 | } |
| 1037 | |
Borislav Petkov | 52a628f | 2013-10-31 17:25:06 +0100 | [diff] [blame] | 1038 | static bool try_to_free_pte_page(pte_t *pte) |
| 1039 | { |
| 1040 | int i; |
| 1041 | |
| 1042 | for (i = 0; i < PTRS_PER_PTE; i++) |
| 1043 | if (!pte_none(pte[i])) |
| 1044 | return false; |
| 1045 | |
| 1046 | free_page((unsigned long)pte); |
| 1047 | return true; |
| 1048 | } |
| 1049 | |
| 1050 | static bool try_to_free_pmd_page(pmd_t *pmd) |
| 1051 | { |
| 1052 | int i; |
| 1053 | |
| 1054 | for (i = 0; i < PTRS_PER_PMD; i++) |
| 1055 | if (!pmd_none(pmd[i])) |
| 1056 | return false; |
| 1057 | |
| 1058 | free_page((unsigned long)pmd); |
| 1059 | return true; |
| 1060 | } |
| 1061 | |
| 1062 | static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) |
| 1063 | { |
| 1064 | pte_t *pte = pte_offset_kernel(pmd, start); |
| 1065 | |
| 1066 | while (start < end) { |
| 1067 | set_pte(pte, __pte(0)); |
| 1068 | |
| 1069 | start += PAGE_SIZE; |
| 1070 | pte++; |
| 1071 | } |
| 1072 | |
| 1073 | if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) { |
| 1074 | pmd_clear(pmd); |
| 1075 | return true; |
| 1076 | } |
| 1077 | return false; |
| 1078 | } |
| 1079 | |
| 1080 | static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd, |
| 1081 | unsigned long start, unsigned long end) |
| 1082 | { |
| 1083 | if (unmap_pte_range(pmd, start, end)) |
| 1084 | if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) |
| 1085 | pud_clear(pud); |
| 1086 | } |
| 1087 | |
| 1088 | static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) |
| 1089 | { |
| 1090 | pmd_t *pmd = pmd_offset(pud, start); |
| 1091 | |
| 1092 | /* |
| 1093 | * Not on a 2MB page boundary? |
| 1094 | */ |
| 1095 | if (start & (PMD_SIZE - 1)) { |
| 1096 | unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; |
| 1097 | unsigned long pre_end = min_t(unsigned long, end, next_page); |
| 1098 | |
| 1099 | __unmap_pmd_range(pud, pmd, start, pre_end); |
| 1100 | |
| 1101 | start = pre_end; |
| 1102 | pmd++; |
| 1103 | } |
| 1104 | |
| 1105 | /* |
| 1106 | * Try to unmap in 2M chunks. |
| 1107 | */ |
| 1108 | while (end - start >= PMD_SIZE) { |
| 1109 | if (pmd_large(*pmd)) |
| 1110 | pmd_clear(pmd); |
| 1111 | else |
| 1112 | __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); |
| 1113 | |
| 1114 | start += PMD_SIZE; |
| 1115 | pmd++; |
| 1116 | } |
| 1117 | |
| 1118 | /* |
| 1119 | * 4K leftovers? |
| 1120 | */ |
| 1121 | if (start < end) |
| 1122 | return __unmap_pmd_range(pud, pmd, start, end); |
| 1123 | |
| 1124 | /* |
| 1125 | * Try again to free the PMD page if haven't succeeded above. |
| 1126 | */ |
| 1127 | if (!pud_none(*pud)) |
| 1128 | if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) |
| 1129 | pud_clear(pud); |
| 1130 | } |
Borislav Petkov | 0bb8aee | 2013-10-31 17:25:05 +0100 | [diff] [blame] | 1131 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1132 | static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end) |
Borislav Petkov | 0bb8aee | 2013-10-31 17:25:05 +0100 | [diff] [blame] | 1133 | { |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1134 | pud_t *pud = pud_offset(p4d, start); |
Borislav Petkov | 0bb8aee | 2013-10-31 17:25:05 +0100 | [diff] [blame] | 1135 | |
| 1136 | /* |
| 1137 | * Not on a GB page boundary? |
| 1138 | */ |
| 1139 | if (start & (PUD_SIZE - 1)) { |
| 1140 | unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; |
| 1141 | unsigned long pre_end = min_t(unsigned long, end, next_page); |
| 1142 | |
| 1143 | unmap_pmd_range(pud, start, pre_end); |
| 1144 | |
| 1145 | start = pre_end; |
| 1146 | pud++; |
| 1147 | } |
| 1148 | |
| 1149 | /* |
| 1150 | * Try to unmap in 1G chunks? |
| 1151 | */ |
| 1152 | while (end - start >= PUD_SIZE) { |
| 1153 | |
| 1154 | if (pud_large(*pud)) |
| 1155 | pud_clear(pud); |
| 1156 | else |
| 1157 | unmap_pmd_range(pud, start, start + PUD_SIZE); |
| 1158 | |
| 1159 | start += PUD_SIZE; |
| 1160 | pud++; |
| 1161 | } |
| 1162 | |
| 1163 | /* |
| 1164 | * 2M leftovers? |
| 1165 | */ |
| 1166 | if (start < end) |
| 1167 | unmap_pmd_range(pud, start, end); |
| 1168 | |
| 1169 | /* |
| 1170 | * No need to try to free the PUD page because we'll free it in |
| 1171 | * populate_pgd's error path |
| 1172 | */ |
| 1173 | } |
| 1174 | |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 1175 | static int alloc_pte_page(pmd_t *pmd) |
| 1176 | { |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 1177 | pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL); |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 1178 | if (!pte) |
| 1179 | return -1; |
| 1180 | |
| 1181 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); |
| 1182 | return 0; |
| 1183 | } |
| 1184 | |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1185 | static int alloc_pmd_page(pud_t *pud) |
| 1186 | { |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 1187 | pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1188 | if (!pmd) |
| 1189 | return -1; |
| 1190 | |
| 1191 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); |
| 1192 | return 0; |
| 1193 | } |
| 1194 | |
Borislav Petkov | c6b6f36 | 2013-10-31 17:25:04 +0100 | [diff] [blame] | 1195 | static void populate_pte(struct cpa_data *cpa, |
| 1196 | unsigned long start, unsigned long end, |
| 1197 | unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) |
| 1198 | { |
| 1199 | pte_t *pte; |
| 1200 | |
| 1201 | pte = pte_offset_kernel(pmd, start); |
| 1202 | |
Dave Hansen | d1440b2 | 2018-04-06 13:55:02 -0700 | [diff] [blame] | 1203 | pgprot = pgprot_clear_protnone_bits(pgprot); |
Sai Praneeth | 397630150 | 2016-02-17 12:35:56 +0000 | [diff] [blame] | 1204 | |
Borislav Petkov | c6b6f36 | 2013-10-31 17:25:04 +0100 | [diff] [blame] | 1205 | while (num_pages-- && start < end) { |
Matt Fleming | edc3b91 | 2015-11-27 21:09:31 +0000 | [diff] [blame] | 1206 | set_pte(pte, pfn_pte(cpa->pfn, pgprot)); |
Borislav Petkov | c6b6f36 | 2013-10-31 17:25:04 +0100 | [diff] [blame] | 1207 | |
| 1208 | start += PAGE_SIZE; |
Matt Fleming | edc3b91 | 2015-11-27 21:09:31 +0000 | [diff] [blame] | 1209 | cpa->pfn++; |
Borislav Petkov | c6b6f36 | 2013-10-31 17:25:04 +0100 | [diff] [blame] | 1210 | pte++; |
| 1211 | } |
| 1212 | } |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 1213 | |
Matt Fleming | e535ec0 | 2016-09-20 14:26:21 +0100 | [diff] [blame] | 1214 | static long populate_pmd(struct cpa_data *cpa, |
| 1215 | unsigned long start, unsigned long end, |
| 1216 | unsigned num_pages, pud_t *pud, pgprot_t pgprot) |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 1217 | { |
Matt Fleming | e535ec0 | 2016-09-20 14:26:21 +0100 | [diff] [blame] | 1218 | long cur_pages = 0; |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 1219 | pmd_t *pmd; |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 1220 | pgprot_t pmd_pgprot; |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 1221 | |
| 1222 | /* |
| 1223 | * Not on a 2M boundary? |
| 1224 | */ |
| 1225 | if (start & (PMD_SIZE - 1)) { |
| 1226 | unsigned long pre_end = start + (num_pages << PAGE_SHIFT); |
| 1227 | unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; |
| 1228 | |
| 1229 | pre_end = min_t(unsigned long, pre_end, next_page); |
| 1230 | cur_pages = (pre_end - start) >> PAGE_SHIFT; |
| 1231 | cur_pages = min_t(unsigned int, num_pages, cur_pages); |
| 1232 | |
| 1233 | /* |
| 1234 | * Need a PTE page? |
| 1235 | */ |
| 1236 | pmd = pmd_offset(pud, start); |
| 1237 | if (pmd_none(*pmd)) |
| 1238 | if (alloc_pte_page(pmd)) |
| 1239 | return -1; |
| 1240 | |
| 1241 | populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); |
| 1242 | |
| 1243 | start = pre_end; |
| 1244 | } |
| 1245 | |
| 1246 | /* |
| 1247 | * We mapped them all? |
| 1248 | */ |
| 1249 | if (num_pages == cur_pages) |
| 1250 | return cur_pages; |
| 1251 | |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 1252 | pmd_pgprot = pgprot_4k_2_large(pgprot); |
| 1253 | |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 1254 | while (end - start >= PMD_SIZE) { |
| 1255 | |
| 1256 | /* |
| 1257 | * We cannot use a 1G page so allocate a PMD page if needed. |
| 1258 | */ |
| 1259 | if (pud_none(*pud)) |
| 1260 | if (alloc_pmd_page(pud)) |
| 1261 | return -1; |
| 1262 | |
| 1263 | pmd = pmd_offset(pud, start); |
| 1264 | |
Andi Kleen | 958f79b | 2018-08-07 15:09:39 -0700 | [diff] [blame] | 1265 | set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn, |
| 1266 | canon_pgprot(pmd_pgprot)))); |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 1267 | |
| 1268 | start += PMD_SIZE; |
Matt Fleming | edc3b91 | 2015-11-27 21:09:31 +0000 | [diff] [blame] | 1269 | cpa->pfn += PMD_SIZE >> PAGE_SHIFT; |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 1270 | cur_pages += PMD_SIZE >> PAGE_SHIFT; |
| 1271 | } |
| 1272 | |
| 1273 | /* |
| 1274 | * Map trailing 4K pages. |
| 1275 | */ |
| 1276 | if (start < end) { |
| 1277 | pmd = pmd_offset(pud, start); |
| 1278 | if (pmd_none(*pmd)) |
| 1279 | if (alloc_pte_page(pmd)) |
| 1280 | return -1; |
| 1281 | |
| 1282 | populate_pte(cpa, start, end, num_pages - cur_pages, |
| 1283 | pmd, pgprot); |
| 1284 | } |
| 1285 | return num_pages; |
| 1286 | } |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1287 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1288 | static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d, |
| 1289 | pgprot_t pgprot) |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1290 | { |
| 1291 | pud_t *pud; |
| 1292 | unsigned long end; |
Matt Fleming | e535ec0 | 2016-09-20 14:26:21 +0100 | [diff] [blame] | 1293 | long cur_pages = 0; |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 1294 | pgprot_t pud_pgprot; |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1295 | |
| 1296 | end = start + (cpa->numpages << PAGE_SHIFT); |
| 1297 | |
| 1298 | /* |
| 1299 | * Not on a Gb page boundary? => map everything up to it with |
| 1300 | * smaller pages. |
| 1301 | */ |
| 1302 | if (start & (PUD_SIZE - 1)) { |
| 1303 | unsigned long pre_end; |
| 1304 | unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; |
| 1305 | |
| 1306 | pre_end = min_t(unsigned long, end, next_page); |
| 1307 | cur_pages = (pre_end - start) >> PAGE_SHIFT; |
| 1308 | cur_pages = min_t(int, (int)cpa->numpages, cur_pages); |
| 1309 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1310 | pud = pud_offset(p4d, start); |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1311 | |
| 1312 | /* |
| 1313 | * Need a PMD page? |
| 1314 | */ |
| 1315 | if (pud_none(*pud)) |
| 1316 | if (alloc_pmd_page(pud)) |
| 1317 | return -1; |
| 1318 | |
| 1319 | cur_pages = populate_pmd(cpa, start, pre_end, cur_pages, |
| 1320 | pud, pgprot); |
| 1321 | if (cur_pages < 0) |
| 1322 | return cur_pages; |
| 1323 | |
| 1324 | start = pre_end; |
| 1325 | } |
| 1326 | |
| 1327 | /* We mapped them all? */ |
| 1328 | if (cpa->numpages == cur_pages) |
| 1329 | return cur_pages; |
| 1330 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1331 | pud = pud_offset(p4d, start); |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 1332 | pud_pgprot = pgprot_4k_2_large(pgprot); |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1333 | |
| 1334 | /* |
| 1335 | * Map everything starting from the Gb boundary, possibly with 1G pages |
| 1336 | */ |
Borislav Petkov | b8291adc | 2016-03-29 17:41:58 +0200 | [diff] [blame] | 1337 | while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { |
Andi Kleen | 958f79b | 2018-08-07 15:09:39 -0700 | [diff] [blame] | 1338 | set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn, |
| 1339 | canon_pgprot(pud_pgprot)))); |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1340 | |
| 1341 | start += PUD_SIZE; |
Matt Fleming | edc3b91 | 2015-11-27 21:09:31 +0000 | [diff] [blame] | 1342 | cpa->pfn += PUD_SIZE >> PAGE_SHIFT; |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1343 | cur_pages += PUD_SIZE >> PAGE_SHIFT; |
| 1344 | pud++; |
| 1345 | } |
| 1346 | |
| 1347 | /* Map trailing leftover */ |
| 1348 | if (start < end) { |
Matt Fleming | e535ec0 | 2016-09-20 14:26:21 +0100 | [diff] [blame] | 1349 | long tmp; |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1350 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1351 | pud = pud_offset(p4d, start); |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1352 | if (pud_none(*pud)) |
| 1353 | if (alloc_pmd_page(pud)) |
| 1354 | return -1; |
| 1355 | |
| 1356 | tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages, |
| 1357 | pud, pgprot); |
| 1358 | if (tmp < 0) |
| 1359 | return cur_pages; |
| 1360 | |
| 1361 | cur_pages += tmp; |
| 1362 | } |
| 1363 | return cur_pages; |
| 1364 | } |
Borislav Petkov | f3f7296 | 2013-10-31 17:25:01 +0100 | [diff] [blame] | 1365 | |
| 1366 | /* |
| 1367 | * Restrictions for kernel page table do not necessarily apply when mapping in |
| 1368 | * an alternate PGD. |
| 1369 | */ |
| 1370 | static int populate_pgd(struct cpa_data *cpa, unsigned long addr) |
| 1371 | { |
| 1372 | pgprot_t pgprot = __pgprot(_KERNPG_TABLE); |
Borislav Petkov | f3f7296 | 2013-10-31 17:25:01 +0100 | [diff] [blame] | 1373 | pud_t *pud = NULL; /* shut up gcc */ |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1374 | p4d_t *p4d; |
Borislav Petkov | 42a5477 | 2014-01-18 12:48:16 +0100 | [diff] [blame] | 1375 | pgd_t *pgd_entry; |
Matt Fleming | e535ec0 | 2016-09-20 14:26:21 +0100 | [diff] [blame] | 1376 | long ret; |
Borislav Petkov | f3f7296 | 2013-10-31 17:25:01 +0100 | [diff] [blame] | 1377 | |
| 1378 | pgd_entry = cpa->pgd + pgd_index(addr); |
| 1379 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1380 | if (pgd_none(*pgd_entry)) { |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 1381 | p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL); |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1382 | if (!p4d) |
| 1383 | return -1; |
| 1384 | |
| 1385 | set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE)); |
| 1386 | } |
| 1387 | |
Borislav Petkov | f3f7296 | 2013-10-31 17:25:01 +0100 | [diff] [blame] | 1388 | /* |
| 1389 | * Allocate a PUD page and hand it down for mapping. |
| 1390 | */ |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1391 | p4d = p4d_offset(pgd_entry, addr); |
| 1392 | if (p4d_none(*p4d)) { |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 1393 | pud = (pud_t *)get_zeroed_page(GFP_KERNEL); |
Borislav Petkov | f3f7296 | 2013-10-31 17:25:01 +0100 | [diff] [blame] | 1394 | if (!pud) |
| 1395 | return -1; |
Andy Lutomirski | 530dd8d | 2016-07-22 21:58:08 -0700 | [diff] [blame] | 1396 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1397 | set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE)); |
Borislav Petkov | f3f7296 | 2013-10-31 17:25:01 +0100 | [diff] [blame] | 1398 | } |
| 1399 | |
| 1400 | pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); |
| 1401 | pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); |
| 1402 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1403 | ret = populate_pud(cpa, addr, p4d, pgprot); |
Borislav Petkov | 0bb8aee | 2013-10-31 17:25:05 +0100 | [diff] [blame] | 1404 | if (ret < 0) { |
Andy Lutomirski | 55920d3 | 2016-07-23 09:59:28 -0700 | [diff] [blame] | 1405 | /* |
| 1406 | * Leave the PUD page in place in case some other CPU or thread |
| 1407 | * already found it, but remove any useless entries we just |
| 1408 | * added to it. |
| 1409 | */ |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1410 | unmap_pud_range(p4d, addr, |
Borislav Petkov | 0bb8aee | 2013-10-31 17:25:05 +0100 | [diff] [blame] | 1411 | addr + (cpa->numpages << PAGE_SHIFT)); |
Borislav Petkov | 0bb8aee | 2013-10-31 17:25:05 +0100 | [diff] [blame] | 1412 | return ret; |
| 1413 | } |
Borislav Petkov | 42a5477 | 2014-01-18 12:48:16 +0100 | [diff] [blame] | 1414 | |
Borislav Petkov | f3f7296 | 2013-10-31 17:25:01 +0100 | [diff] [blame] | 1415 | cpa->numpages = ret; |
| 1416 | return 0; |
| 1417 | } |
| 1418 | |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 1419 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, |
| 1420 | int primary) |
| 1421 | { |
Matt Fleming | 7fc8442 | 2016-04-25 21:06:35 +0100 | [diff] [blame] | 1422 | if (cpa->pgd) { |
| 1423 | /* |
| 1424 | * Right now, we only execute this code path when mapping |
| 1425 | * the EFI virtual memory map regions, no other users |
| 1426 | * provide a ->pgd value. This may change in the future. |
| 1427 | */ |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 1428 | return populate_pgd(cpa, vaddr); |
Matt Fleming | 7fc8442 | 2016-04-25 21:06:35 +0100 | [diff] [blame] | 1429 | } |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 1430 | |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 1431 | /* |
| 1432 | * Ignore all non primary paths. |
| 1433 | */ |
Jan Beulich | 405e1133 | 2016-02-10 02:03:00 -0700 | [diff] [blame] | 1434 | if (!primary) { |
| 1435 | cpa->numpages = 1; |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 1436 | return 0; |
Jan Beulich | 405e1133 | 2016-02-10 02:03:00 -0700 | [diff] [blame] | 1437 | } |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 1438 | |
| 1439 | /* |
| 1440 | * Ignore the NULL PTE for kernel identity mapping, as it is expected |
| 1441 | * to have holes. |
| 1442 | * Also set numpages to '1' indicating that we processed cpa req for |
| 1443 | * one virtual address page and its pfn. TBD: numpages can be set based |
| 1444 | * on the initial value and the level returned by lookup_address(). |
| 1445 | */ |
| 1446 | if (within(vaddr, PAGE_OFFSET, |
| 1447 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { |
| 1448 | cpa->numpages = 1; |
| 1449 | cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; |
| 1450 | return 0; |
Dave Hansen | 58e65b5 | 2018-04-20 15:20:21 -0700 | [diff] [blame] | 1451 | |
| 1452 | } else if (__cpa_pfn_in_highmap(cpa->pfn)) { |
| 1453 | /* Faults in the highmap are OK, so do not warn: */ |
| 1454 | return -EFAULT; |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 1455 | } else { |
| 1456 | WARN(1, KERN_WARNING "CPA: called for zero pte. " |
| 1457 | "vaddr = %lx cpa->vaddr = %lx\n", vaddr, |
| 1458 | *cpa->vaddr); |
| 1459 | |
| 1460 | return -EFAULT; |
| 1461 | } |
| 1462 | } |
| 1463 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1464 | static int __change_page_attr(struct cpa_data *cpa, int primary) |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1465 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1466 | unsigned long address; |
Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 1467 | int do_split, err; |
| 1468 | unsigned int level; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1469 | pte_t *kpte, old_pte; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1470 | |
Thomas Hellstrom | 8523acf | 2009-08-03 09:25:45 +0200 | [diff] [blame] | 1471 | if (cpa->flags & CPA_PAGES_ARRAY) { |
| 1472 | struct page *page = cpa->pages[cpa->curpage]; |
| 1473 | if (unlikely(PageHighMem(page))) |
| 1474 | return 0; |
| 1475 | address = (unsigned long)page_address(page); |
| 1476 | } else if (cpa->flags & CPA_ARRAY) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1477 | address = cpa->vaddr[cpa->curpage]; |
| 1478 | else |
| 1479 | address = *cpa->vaddr; |
Ingo Molnar | 97f99fe | 2008-01-30 13:33:55 +0100 | [diff] [blame] | 1480 | repeat: |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 1481 | kpte = _lookup_address_cpa(cpa, address, &level); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1482 | if (!kpte) |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 1483 | return __cpa_process_fault(cpa, address, primary); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1484 | |
| 1485 | old_pte = *kpte; |
Dave Hansen | dcb32d9 | 2016-07-07 17:19:15 -0700 | [diff] [blame] | 1486 | if (pte_none(old_pte)) |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 1487 | return __cpa_process_fault(cpa, address, primary); |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1488 | |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 1489 | if (level == PG_LEVEL_4K) { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1490 | pte_t new_pte; |
Arjan van de Ven | 626c2c9 | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 1491 | pgprot_t new_prot = pte_pgprot(old_pte); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1492 | unsigned long pfn = pte_pfn(old_pte); |
Thomas Gleixner | a72a08a | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1493 | |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1494 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); |
| 1495 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); |
Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 1496 | |
Thomas Gleixner | 5c280cf | 2018-09-17 16:29:12 +0200 | [diff] [blame] | 1497 | cpa_inc_4k_install(); |
Thomas Gleixner | 4046460 | 2018-09-17 16:29:11 +0200 | [diff] [blame] | 1498 | new_prot = static_protections(new_prot, address, pfn, 1, |
| 1499 | CPA_PROTECT); |
Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 1500 | |
Dave Hansen | d1440b2 | 2018-04-06 13:55:02 -0700 | [diff] [blame] | 1501 | new_prot = pgprot_clear_protnone_bits(new_prot); |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 1502 | |
| 1503 | /* |
Arjan van de Ven | 626c2c9 | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 1504 | * We need to keep the pfn from the existing PTE, |
| 1505 | * after all we're only going to change it's attributes |
| 1506 | * not the memory it points to |
| 1507 | */ |
Dave Hansen | 1a54420 | 2018-04-06 13:55:11 -0700 | [diff] [blame] | 1508 | new_pte = pfn_pte(pfn, new_prot); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1509 | cpa->pfn = pfn; |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1510 | /* |
| 1511 | * Do we really change anything ? |
| 1512 | */ |
| 1513 | if (pte_val(old_pte) != pte_val(new_pte)) { |
| 1514 | set_pte_atomic(kpte, new_pte); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1515 | cpa->flags |= CPA_FLUSHTLB; |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1516 | } |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 1517 | cpa->numpages = 1; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1518 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1519 | } |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1520 | |
| 1521 | /* |
| 1522 | * Check, whether we can keep the large page intact |
| 1523 | * and just change the pte: |
| 1524 | */ |
Thomas Gleixner | 8679de0 | 2018-09-17 16:29:08 +0200 | [diff] [blame] | 1525 | do_split = should_split_large_page(kpte, address, cpa); |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1526 | /* |
| 1527 | * When the range fits into the existing large page, |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 1528 | * return. cp->numpages and cpa->tlbflush have been updated in |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1529 | * try_large_page: |
| 1530 | */ |
Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 1531 | if (do_split <= 0) |
| 1532 | return do_split; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1533 | |
| 1534 | /* |
| 1535 | * We have to split the large page: |
| 1536 | */ |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 1537 | err = split_large_page(cpa, kpte, address); |
Peter Zijlstra | c0a759a | 2018-09-19 10:50:18 +0200 | [diff] [blame] | 1538 | if (!err) |
Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 1539 | goto repeat; |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 1540 | |
Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 1541 | return err; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1542 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1543 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1544 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); |
| 1545 | |
| 1546 | static int cpa_process_alias(struct cpa_data *cpa) |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 1547 | { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1548 | struct cpa_data alias_cpa; |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1549 | unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); |
Tejun Heo | e933a73 | 2009-08-14 15:00:53 +0900 | [diff] [blame] | 1550 | unsigned long vaddr; |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1551 | int ret; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1552 | |
Yinghai Lu | 8eb5779 | 2012-11-16 19:38:49 -0800 | [diff] [blame] | 1553 | if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1)) |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1554 | return 0; |
| 1555 | |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 1556 | /* |
| 1557 | * No need to redo, when the primary call touched the direct |
| 1558 | * mapping already: |
| 1559 | */ |
Thomas Hellstrom | 8523acf | 2009-08-03 09:25:45 +0200 | [diff] [blame] | 1560 | if (cpa->flags & CPA_PAGES_ARRAY) { |
| 1561 | struct page *page = cpa->pages[cpa->curpage]; |
| 1562 | if (unlikely(PageHighMem(page))) |
| 1563 | return 0; |
| 1564 | vaddr = (unsigned long)page_address(page); |
| 1565 | } else if (cpa->flags & CPA_ARRAY) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1566 | vaddr = cpa->vaddr[cpa->curpage]; |
| 1567 | else |
| 1568 | vaddr = *cpa->vaddr; |
| 1569 | |
| 1570 | if (!(within(vaddr, PAGE_OFFSET, |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 1571 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1572 | |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 1573 | alias_cpa = *cpa; |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1574 | alias_cpa.vaddr = &laddr; |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1575 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1576 | |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 1577 | ret = __change_page_attr_set_clr(&alias_cpa, 0); |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1578 | if (ret) |
| 1579 | return ret; |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 1580 | } |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 1581 | |
Arjan van de Ven | 488fd99 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1582 | #ifdef CONFIG_X86_64 |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 1583 | /* |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1584 | * If the primary call didn't touch the high mapping already |
| 1585 | * and the physical address is inside the kernel map, we need |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 1586 | * to touch the high mapped kernel as well: |
| 1587 | */ |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1588 | if (!within(vaddr, (unsigned long)_text, _brk_end) && |
Dave Hansen | 58e65b5 | 2018-04-20 15:20:21 -0700 | [diff] [blame] | 1589 | __cpa_pfn_in_highmap(cpa->pfn)) { |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1590 | unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + |
| 1591 | __START_KERNEL_map - phys_base; |
| 1592 | alias_cpa = *cpa; |
| 1593 | alias_cpa.vaddr = &temp_cpa_vaddr; |
| 1594 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 1595 | |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1596 | /* |
| 1597 | * The high mapping range is imprecise, so ignore the |
| 1598 | * return value. |
| 1599 | */ |
| 1600 | __change_page_attr_set_clr(&alias_cpa, 0); |
| 1601 | } |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 1602 | #endif |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1603 | |
| 1604 | return 0; |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 1605 | } |
| 1606 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1607 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1608 | { |
Matt Fleming | e535ec0 | 2016-09-20 14:26:21 +0100 | [diff] [blame] | 1609 | unsigned long numpages = cpa->numpages; |
| 1610 | int ret; |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1611 | |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1612 | while (numpages) { |
| 1613 | /* |
| 1614 | * Store the remaining nr of pages for the large page |
| 1615 | * preservation check. |
| 1616 | */ |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 1617 | cpa->numpages = numpages; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1618 | /* for array changes, we can't use large page */ |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1619 | if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY)) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1620 | cpa->numpages = 1; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1621 | |
Christian Borntraeger | 288cf3c | 2016-03-15 14:57:33 -0700 | [diff] [blame] | 1622 | if (!debug_pagealloc_enabled()) |
Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 1623 | spin_lock(&cpa_lock); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1624 | ret = __change_page_attr(cpa, checkalias); |
Christian Borntraeger | 288cf3c | 2016-03-15 14:57:33 -0700 | [diff] [blame] | 1625 | if (!debug_pagealloc_enabled()) |
Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 1626 | spin_unlock(&cpa_lock); |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1627 | if (ret) |
| 1628 | return ret; |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1629 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1630 | if (checkalias) { |
| 1631 | ret = cpa_process_alias(cpa); |
| 1632 | if (ret) |
| 1633 | return ret; |
| 1634 | } |
| 1635 | |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1636 | /* |
| 1637 | * Adjust the number of pages with the result of the |
| 1638 | * CPA operation. Either a large page has been |
| 1639 | * preserved or a single page update happened. |
| 1640 | */ |
Matt Fleming | 7425637 | 2016-01-29 11:36:10 +0000 | [diff] [blame] | 1641 | BUG_ON(cpa->numpages > numpages || !cpa->numpages); |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 1642 | numpages -= cpa->numpages; |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1643 | if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1644 | cpa->curpage++; |
| 1645 | else |
| 1646 | *cpa->vaddr += cpa->numpages * PAGE_SIZE; |
| 1647 | |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1648 | } |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1649 | return 0; |
| 1650 | } |
| 1651 | |
LuckTony | c748610 | 2018-08-31 09:55:06 -0700 | [diff] [blame] | 1652 | /* |
| 1653 | * Machine check recovery code needs to change cache mode of poisoned |
| 1654 | * pages to UC to avoid speculative access logging another error. But |
| 1655 | * passing the address of the 1:1 mapping to set_memory_uc() is a fine |
| 1656 | * way to encourage a speculative access. So we cheat and flip the top |
| 1657 | * bit of the address. This works fine for the code that updates the |
| 1658 | * page tables. But at the end of the process we need to flush the cache |
| 1659 | * and the non-canonical address causes a #GP fault when used by the |
| 1660 | * CLFLUSH instruction. |
| 1661 | * |
| 1662 | * But in the common case we already have a canonical address. This code |
| 1663 | * will fix the top bit if needed and is a no-op otherwise. |
| 1664 | */ |
| 1665 | static inline unsigned long make_addr_canonical_again(unsigned long addr) |
| 1666 | { |
| 1667 | #ifdef CONFIG_X86_64 |
| 1668 | return (long)(addr << 1) >> 1; |
| 1669 | #else |
| 1670 | return addr; |
| 1671 | #endif |
| 1672 | } |
| 1673 | |
| 1674 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1675 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 1676 | pgprot_t mask_set, pgprot_t mask_clr, |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1677 | int force_split, int in_flag, |
| 1678 | struct page **pages) |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1679 | { |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1680 | struct cpa_data cpa; |
Ingo Molnar | cacf890 | 2008-08-21 13:46:33 +0200 | [diff] [blame] | 1681 | int ret, cache, checkalias; |
Jack Steiner | fa526d0 | 2009-09-03 12:56:02 -0500 | [diff] [blame] | 1682 | unsigned long baddr = 0; |
Thomas Gleixner | 331e406 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 1683 | |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 1684 | memset(&cpa, 0, sizeof(cpa)); |
| 1685 | |
Thomas Gleixner | 331e406 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 1686 | /* |
Dave Hansen | 39114b7 | 2018-04-06 13:55:17 -0700 | [diff] [blame] | 1687 | * Check, if we are requested to set a not supported |
| 1688 | * feature. Clearing non-supported features is OK. |
Thomas Gleixner | 331e406 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 1689 | */ |
| 1690 | mask_set = canon_pgprot(mask_set); |
Dave Hansen | 39114b7 | 2018-04-06 13:55:17 -0700 | [diff] [blame] | 1691 | |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 1692 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) |
Thomas Gleixner | 331e406 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 1693 | return 0; |
| 1694 | |
Thomas Gleixner | 69b1415 | 2008-02-13 11:04:50 +0100 | [diff] [blame] | 1695 | /* Ensure we are PAGE_SIZE aligned */ |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1696 | if (in_flag & CPA_ARRAY) { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1697 | int i; |
| 1698 | for (i = 0; i < numpages; i++) { |
| 1699 | if (addr[i] & ~PAGE_MASK) { |
| 1700 | addr[i] &= PAGE_MASK; |
| 1701 | WARN_ON_ONCE(1); |
| 1702 | } |
| 1703 | } |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1704 | } else if (!(in_flag & CPA_PAGES_ARRAY)) { |
| 1705 | /* |
| 1706 | * in_flag of CPA_PAGES_ARRAY implies it is aligned. |
| 1707 | * No need to cehck in that case |
| 1708 | */ |
| 1709 | if (*addr & ~PAGE_MASK) { |
| 1710 | *addr &= PAGE_MASK; |
| 1711 | /* |
| 1712 | * People should not be passing in unaligned addresses: |
| 1713 | */ |
| 1714 | WARN_ON_ONCE(1); |
| 1715 | } |
Jack Steiner | fa526d0 | 2009-09-03 12:56:02 -0500 | [diff] [blame] | 1716 | /* |
| 1717 | * Save address for cache flush. *addr is modified in the call |
| 1718 | * to __change_page_attr_set_clr() below. |
| 1719 | */ |
LuckTony | c748610 | 2018-08-31 09:55:06 -0700 | [diff] [blame] | 1720 | baddr = make_addr_canonical_again(*addr); |
Thomas Gleixner | 69b1415 | 2008-02-13 11:04:50 +0100 | [diff] [blame] | 1721 | } |
| 1722 | |
Nick Piggin | 5843d9a | 2008-08-01 03:15:21 +0200 | [diff] [blame] | 1723 | /* Must avoid aliasing mappings in the highmem code */ |
| 1724 | kmap_flush_unused(); |
| 1725 | |
Nick Piggin | db64fe0 | 2008-10-18 20:27:03 -0700 | [diff] [blame] | 1726 | vm_unmap_aliases(); |
| 1727 | |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1728 | cpa.vaddr = addr; |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1729 | cpa.pages = pages; |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1730 | cpa.numpages = numpages; |
| 1731 | cpa.mask_set = mask_set; |
| 1732 | cpa.mask_clr = mask_clr; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1733 | cpa.flags = 0; |
| 1734 | cpa.curpage = 0; |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 1735 | cpa.force_split = force_split; |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1736 | |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1737 | if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY)) |
| 1738 | cpa.flags |= in_flag; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1739 | |
Thomas Gleixner | af96e44 | 2008-02-15 21:49:46 +0100 | [diff] [blame] | 1740 | /* No alias checking for _NX bit modifications */ |
| 1741 | checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; |
Dave Hansen | c40a56a | 2018-08-02 15:58:31 -0700 | [diff] [blame] | 1742 | /* Has caller explicitly disabled alias checking? */ |
| 1743 | if (in_flag & CPA_NO_CHECK_ALIAS) |
| 1744 | checkalias = 0; |
Thomas Gleixner | af96e44 | 2008-02-15 21:49:46 +0100 | [diff] [blame] | 1745 | |
| 1746 | ret = __change_page_attr_set_clr(&cpa, checkalias); |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1747 | |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1748 | /* |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1749 | * Check whether we really changed something: |
| 1750 | */ |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1751 | if (!(cpa.flags & CPA_FLUSHTLB)) |
Shaohua Li | 1ac2f7d | 2008-08-04 14:51:24 +0800 | [diff] [blame] | 1752 | goto out; |
Ingo Molnar | cacf890 | 2008-08-21 13:46:33 +0200 | [diff] [blame] | 1753 | |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1754 | /* |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 1755 | * No need to flush, when we did not set any of the caching |
| 1756 | * attributes: |
| 1757 | */ |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1758 | cache = !!pgprot2cachemode(mask_set); |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 1759 | |
| 1760 | /* |
Peter Zijlstra | fce2ce9 | 2018-09-19 10:50:22 +0200 | [diff] [blame] | 1761 | * On error; flush everything to be sure. |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1762 | */ |
Peter Zijlstra | fce2ce9 | 2018-09-19 10:50:22 +0200 | [diff] [blame] | 1763 | if (ret) { |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 1764 | cpa_flush_all(cache); |
Peter Zijlstra | fce2ce9 | 2018-09-19 10:50:22 +0200 | [diff] [blame] | 1765 | goto out; |
| 1766 | } |
| 1767 | |
| 1768 | if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { |
| 1769 | cpa_flush_array(baddr, addr, numpages, cache, |
| 1770 | cpa.flags, pages); |
| 1771 | } else { |
| 1772 | cpa_flush_range(baddr, numpages, cache); |
| 1773 | } |
Ingo Molnar | cacf890 | 2008-08-21 13:46:33 +0200 | [diff] [blame] | 1774 | |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 1775 | out: |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1776 | return ret; |
| 1777 | } |
| 1778 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1779 | static inline int change_page_attr_set(unsigned long *addr, int numpages, |
| 1780 | pgprot_t mask, int array) |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1781 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1782 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0, |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1783 | (array ? CPA_ARRAY : 0), NULL); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1784 | } |
| 1785 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1786 | static inline int change_page_attr_clear(unsigned long *addr, int numpages, |
| 1787 | pgprot_t mask, int array) |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1788 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1789 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0, |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1790 | (array ? CPA_ARRAY : 0), NULL); |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1791 | } |
| 1792 | |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1793 | static inline int cpa_set_pages_array(struct page **pages, int numpages, |
| 1794 | pgprot_t mask) |
| 1795 | { |
| 1796 | return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0, |
| 1797 | CPA_PAGES_ARRAY, pages); |
| 1798 | } |
| 1799 | |
| 1800 | static inline int cpa_clear_pages_array(struct page **pages, int numpages, |
| 1801 | pgprot_t mask) |
| 1802 | { |
| 1803 | return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0, |
| 1804 | CPA_PAGES_ARRAY, pages); |
| 1805 | } |
| 1806 | |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1807 | int _set_memory_uc(unsigned long addr, int numpages) |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1808 | { |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 1809 | /* |
| 1810 | * for now UC MINUS. see comments in ioremap_nocache() |
Luis R. Rodriguez | e4b6be33 | 2015-05-11 10:15:53 +0200 | [diff] [blame] | 1811 | * If you really need strong UC use ioremap_uc(), but note |
| 1812 | * that you cannot override IO areas with set_memory_*() as |
| 1813 | * these helpers cannot work with IO memory. |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 1814 | */ |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1815 | return change_page_attr_set(&addr, numpages, |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1816 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), |
| 1817 | 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1818 | } |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1819 | |
| 1820 | int set_memory_uc(unsigned long addr, int numpages) |
| 1821 | { |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1822 | int ret; |
| 1823 | |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 1824 | /* |
| 1825 | * for now UC MINUS. see comments in ioremap_nocache() |
| 1826 | */ |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1827 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 1828 | _PAGE_CACHE_MODE_UC_MINUS, NULL); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1829 | if (ret) |
| 1830 | goto out_err; |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1831 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1832 | ret = _set_memory_uc(addr, numpages); |
| 1833 | if (ret) |
| 1834 | goto out_free; |
| 1835 | |
| 1836 | return 0; |
| 1837 | |
| 1838 | out_free: |
| 1839 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
| 1840 | out_err: |
| 1841 | return ret; |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1842 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1843 | EXPORT_SYMBOL(set_memory_uc); |
| 1844 | |
H Hartley Sweeten | 2d070ef | 2011-11-15 14:49:00 -0800 | [diff] [blame] | 1845 | static int _set_memory_array(unsigned long *addr, int addrinarray, |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1846 | enum page_cache_mode new_type) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1847 | { |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1848 | enum page_cache_mode set_type; |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1849 | int i, j; |
| 1850 | int ret; |
| 1851 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1852 | for (i = 0; i < addrinarray; i++) { |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1853 | ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1854 | new_type, NULL); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1855 | if (ret) |
| 1856 | goto out_free; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1857 | } |
| 1858 | |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1859 | /* If WC, set to UC- first and then WC */ |
| 1860 | set_type = (new_type == _PAGE_CACHE_MODE_WC) ? |
| 1861 | _PAGE_CACHE_MODE_UC_MINUS : new_type; |
| 1862 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1863 | ret = change_page_attr_set(addr, addrinarray, |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1864 | cachemode2pgprot(set_type), 1); |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1865 | |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1866 | if (!ret && new_type == _PAGE_CACHE_MODE_WC) |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1867 | ret = change_page_attr_set_clr(addr, addrinarray, |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1868 | cachemode2pgprot( |
| 1869 | _PAGE_CACHE_MODE_WC), |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1870 | __pgprot(_PAGE_CACHE_MASK), |
| 1871 | 0, CPA_ARRAY, NULL); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1872 | if (ret) |
| 1873 | goto out_free; |
Rene Herman | c5e147c | 2008-08-22 01:02:20 +0200 | [diff] [blame] | 1874 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1875 | return 0; |
| 1876 | |
| 1877 | out_free: |
| 1878 | for (j = 0; j < i; j++) |
| 1879 | free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE); |
| 1880 | |
| 1881 | return ret; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1882 | } |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1883 | |
| 1884 | int set_memory_array_uc(unsigned long *addr, int addrinarray) |
| 1885 | { |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1886 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1887 | } |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1888 | EXPORT_SYMBOL(set_memory_array_uc); |
| 1889 | |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1890 | int set_memory_array_wc(unsigned long *addr, int addrinarray) |
| 1891 | { |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1892 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WC); |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1893 | } |
| 1894 | EXPORT_SYMBOL(set_memory_array_wc); |
| 1895 | |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1896 | int set_memory_array_wt(unsigned long *addr, int addrinarray) |
| 1897 | { |
| 1898 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WT); |
| 1899 | } |
| 1900 | EXPORT_SYMBOL_GPL(set_memory_array_wt); |
| 1901 | |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 1902 | int _set_memory_wc(unsigned long addr, int numpages) |
| 1903 | { |
venkatesh.pallipadi@intel.com | 3869c4a | 2009-04-09 14:26:50 -0700 | [diff] [blame] | 1904 | int ret; |
Pallipadi, Venkatesh | bdc6340 | 2009-07-30 14:43:19 -0700 | [diff] [blame] | 1905 | unsigned long addr_copy = addr; |
| 1906 | |
venkatesh.pallipadi@intel.com | 3869c4a | 2009-04-09 14:26:50 -0700 | [diff] [blame] | 1907 | ret = change_page_attr_set(&addr, numpages, |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1908 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), |
| 1909 | 0); |
venkatesh.pallipadi@intel.com | 3869c4a | 2009-04-09 14:26:50 -0700 | [diff] [blame] | 1910 | if (!ret) { |
Pallipadi, Venkatesh | bdc6340 | 2009-07-30 14:43:19 -0700 | [diff] [blame] | 1911 | ret = change_page_attr_set_clr(&addr_copy, numpages, |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1912 | cachemode2pgprot( |
| 1913 | _PAGE_CACHE_MODE_WC), |
Pallipadi, Venkatesh | bdc6340 | 2009-07-30 14:43:19 -0700 | [diff] [blame] | 1914 | __pgprot(_PAGE_CACHE_MASK), |
| 1915 | 0, 0, NULL); |
venkatesh.pallipadi@intel.com | 3869c4a | 2009-04-09 14:26:50 -0700 | [diff] [blame] | 1916 | } |
| 1917 | return ret; |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 1918 | } |
| 1919 | |
| 1920 | int set_memory_wc(unsigned long addr, int numpages) |
| 1921 | { |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1922 | int ret; |
| 1923 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1924 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 1925 | _PAGE_CACHE_MODE_WC, NULL); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1926 | if (ret) |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1927 | return ret; |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 1928 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1929 | ret = _set_memory_wc(addr, numpages); |
| 1930 | if (ret) |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1931 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1932 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1933 | return ret; |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 1934 | } |
| 1935 | EXPORT_SYMBOL(set_memory_wc); |
| 1936 | |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1937 | int _set_memory_wt(unsigned long addr, int numpages) |
| 1938 | { |
| 1939 | return change_page_attr_set(&addr, numpages, |
| 1940 | cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0); |
| 1941 | } |
| 1942 | |
| 1943 | int set_memory_wt(unsigned long addr, int numpages) |
| 1944 | { |
| 1945 | int ret; |
| 1946 | |
| 1947 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
| 1948 | _PAGE_CACHE_MODE_WT, NULL); |
| 1949 | if (ret) |
| 1950 | return ret; |
| 1951 | |
| 1952 | ret = _set_memory_wt(addr, numpages); |
| 1953 | if (ret) |
| 1954 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
| 1955 | |
| 1956 | return ret; |
| 1957 | } |
| 1958 | EXPORT_SYMBOL_GPL(set_memory_wt); |
| 1959 | |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1960 | int _set_memory_wb(unsigned long addr, int numpages) |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1961 | { |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1962 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1963 | return change_page_attr_clear(&addr, numpages, |
| 1964 | __pgprot(_PAGE_CACHE_MASK), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1965 | } |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1966 | |
| 1967 | int set_memory_wb(unsigned long addr, int numpages) |
| 1968 | { |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1969 | int ret; |
| 1970 | |
| 1971 | ret = _set_memory_wb(addr, numpages); |
| 1972 | if (ret) |
| 1973 | return ret; |
| 1974 | |
venkatesh.pallipadi@intel.com | c15238d | 2008-08-20 16:45:51 -0700 | [diff] [blame] | 1975 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1976 | return 0; |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1977 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1978 | EXPORT_SYMBOL(set_memory_wb); |
| 1979 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1980 | int set_memory_array_wb(unsigned long *addr, int addrinarray) |
| 1981 | { |
| 1982 | int i; |
venkatesh.pallipadi@intel.com | a5593e0 | 2009-04-09 14:26:48 -0700 | [diff] [blame] | 1983 | int ret; |
| 1984 | |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1985 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ |
venkatesh.pallipadi@intel.com | a5593e0 | 2009-04-09 14:26:48 -0700 | [diff] [blame] | 1986 | ret = change_page_attr_clear(addr, addrinarray, |
| 1987 | __pgprot(_PAGE_CACHE_MASK), 1); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1988 | if (ret) |
| 1989 | return ret; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1990 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1991 | for (i = 0; i < addrinarray; i++) |
| 1992 | free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE); |
Rene Herman | c5e147c | 2008-08-22 01:02:20 +0200 | [diff] [blame] | 1993 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1994 | return 0; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1995 | } |
| 1996 | EXPORT_SYMBOL(set_memory_array_wb); |
| 1997 | |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1998 | int set_memory_x(unsigned long addr, int numpages) |
| 1999 | { |
H. Peter Anvin | 583140a | 2009-11-13 15:28:15 -0800 | [diff] [blame] | 2000 | if (!(__supported_pte_mask & _PAGE_NX)) |
| 2001 | return 0; |
| 2002 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 2003 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2004 | } |
| 2005 | EXPORT_SYMBOL(set_memory_x); |
| 2006 | |
| 2007 | int set_memory_nx(unsigned long addr, int numpages) |
| 2008 | { |
H. Peter Anvin | 583140a | 2009-11-13 15:28:15 -0800 | [diff] [blame] | 2009 | if (!(__supported_pte_mask & _PAGE_NX)) |
| 2010 | return 0; |
| 2011 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 2012 | return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2013 | } |
| 2014 | EXPORT_SYMBOL(set_memory_nx); |
| 2015 | |
| 2016 | int set_memory_ro(unsigned long addr, int numpages) |
| 2017 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 2018 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2019 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2020 | |
| 2021 | int set_memory_rw(unsigned long addr, int numpages) |
| 2022 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 2023 | return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2024 | } |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 2025 | |
| 2026 | int set_memory_np(unsigned long addr, int numpages) |
| 2027 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 2028 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 2029 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2030 | |
Dave Hansen | c40a56a | 2018-08-02 15:58:31 -0700 | [diff] [blame] | 2031 | int set_memory_np_noalias(unsigned long addr, int numpages) |
| 2032 | { |
| 2033 | int cpa_flags = CPA_NO_CHECK_ALIAS; |
| 2034 | |
| 2035 | return change_page_attr_set_clr(&addr, numpages, __pgprot(0), |
| 2036 | __pgprot(_PAGE_PRESENT), 0, |
| 2037 | cpa_flags, NULL); |
| 2038 | } |
| 2039 | |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 2040 | int set_memory_4k(unsigned long addr, int numpages) |
| 2041 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 2042 | return change_page_attr_set_clr(&addr, numpages, __pgprot(0), |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 2043 | __pgprot(0), 1, 0, NULL); |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 2044 | } |
| 2045 | |
Dave Hansen | 39114b7 | 2018-04-06 13:55:17 -0700 | [diff] [blame] | 2046 | int set_memory_nonglobal(unsigned long addr, int numpages) |
| 2047 | { |
| 2048 | return change_page_attr_clear(&addr, numpages, |
| 2049 | __pgprot(_PAGE_GLOBAL), 0); |
| 2050 | } |
| 2051 | |
Dave Hansen | eac7073 | 2018-08-02 15:58:25 -0700 | [diff] [blame] | 2052 | int set_memory_global(unsigned long addr, int numpages) |
| 2053 | { |
| 2054 | return change_page_attr_set(&addr, numpages, |
| 2055 | __pgprot(_PAGE_GLOBAL), 0); |
| 2056 | } |
| 2057 | |
Tom Lendacky | 77bd234 | 2017-07-17 16:10:19 -0500 | [diff] [blame] | 2058 | static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) |
| 2059 | { |
| 2060 | struct cpa_data cpa; |
| 2061 | unsigned long start; |
| 2062 | int ret; |
| 2063 | |
Tom Lendacky | a72ec5a | 2017-10-20 09:30:48 -0500 | [diff] [blame] | 2064 | /* Nothing to do if memory encryption is not active */ |
| 2065 | if (!mem_encrypt_active()) |
Tom Lendacky | 77bd234 | 2017-07-17 16:10:19 -0500 | [diff] [blame] | 2066 | return 0; |
| 2067 | |
| 2068 | /* Should not be working on unaligned addresses */ |
| 2069 | if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr)) |
| 2070 | addr &= PAGE_MASK; |
| 2071 | |
| 2072 | start = addr; |
| 2073 | |
| 2074 | memset(&cpa, 0, sizeof(cpa)); |
| 2075 | cpa.vaddr = &addr; |
| 2076 | cpa.numpages = numpages; |
| 2077 | cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0); |
| 2078 | cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC); |
| 2079 | cpa.pgd = init_mm.pgd; |
| 2080 | |
| 2081 | /* Must avoid aliasing mappings in the highmem code */ |
| 2082 | kmap_flush_unused(); |
| 2083 | vm_unmap_aliases(); |
| 2084 | |
| 2085 | /* |
| 2086 | * Before changing the encryption attribute, we need to flush caches. |
| 2087 | */ |
Peter Zijlstra | 5f464b3 | 2018-09-19 10:50:21 +0200 | [diff] [blame] | 2088 | cpa_flush_range(start, numpages, 1); |
Tom Lendacky | 77bd234 | 2017-07-17 16:10:19 -0500 | [diff] [blame] | 2089 | |
| 2090 | ret = __change_page_attr_set_clr(&cpa, 1); |
| 2091 | |
| 2092 | /* |
| 2093 | * After changing the encryption attribute, we need to flush TLBs |
| 2094 | * again in case any speculative TLB caching occurred (but no need |
| 2095 | * to flush caches again). We could just use cpa_flush_all(), but |
| 2096 | * in case TLB flushing gets optimized in the cpa_flush_range() |
| 2097 | * path use the same logic as above. |
| 2098 | */ |
Peter Zijlstra | 5f464b3 | 2018-09-19 10:50:21 +0200 | [diff] [blame] | 2099 | cpa_flush_range(start, numpages, 0); |
Tom Lendacky | 77bd234 | 2017-07-17 16:10:19 -0500 | [diff] [blame] | 2100 | |
| 2101 | return ret; |
| 2102 | } |
| 2103 | |
| 2104 | int set_memory_encrypted(unsigned long addr, int numpages) |
| 2105 | { |
| 2106 | return __set_memory_enc_dec(addr, numpages, true); |
| 2107 | } |
Tom Lendacky | 95cf926 | 2017-07-17 16:10:26 -0500 | [diff] [blame] | 2108 | EXPORT_SYMBOL_GPL(set_memory_encrypted); |
Tom Lendacky | 77bd234 | 2017-07-17 16:10:19 -0500 | [diff] [blame] | 2109 | |
| 2110 | int set_memory_decrypted(unsigned long addr, int numpages) |
| 2111 | { |
| 2112 | return __set_memory_enc_dec(addr, numpages, false); |
| 2113 | } |
Tom Lendacky | 95cf926 | 2017-07-17 16:10:26 -0500 | [diff] [blame] | 2114 | EXPORT_SYMBOL_GPL(set_memory_decrypted); |
Tom Lendacky | 77bd234 | 2017-07-17 16:10:19 -0500 | [diff] [blame] | 2115 | |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2116 | int set_pages_uc(struct page *page, int numpages) |
| 2117 | { |
| 2118 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2119 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 2120 | return set_memory_uc(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2121 | } |
| 2122 | EXPORT_SYMBOL(set_pages_uc); |
| 2123 | |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 2124 | static int _set_pages_array(struct page **pages, int addrinarray, |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 2125 | enum page_cache_mode new_type) |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 2126 | { |
| 2127 | unsigned long start; |
| 2128 | unsigned long end; |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 2129 | enum page_cache_mode set_type; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 2130 | int i; |
| 2131 | int free_idx; |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 2132 | int ret; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 2133 | |
| 2134 | for (i = 0; i < addrinarray; i++) { |
Thomas Hellstrom | 8523acf | 2009-08-03 09:25:45 +0200 | [diff] [blame] | 2135 | if (PageHighMem(pages[i])) |
| 2136 | continue; |
| 2137 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 2138 | end = start + PAGE_SIZE; |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 2139 | if (reserve_memtype(start, end, new_type, NULL)) |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 2140 | goto err_out; |
| 2141 | } |
| 2142 | |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 2143 | /* If WC, set to UC- first and then WC */ |
| 2144 | set_type = (new_type == _PAGE_CACHE_MODE_WC) ? |
| 2145 | _PAGE_CACHE_MODE_UC_MINUS : new_type; |
| 2146 | |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 2147 | ret = cpa_set_pages_array(pages, addrinarray, |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 2148 | cachemode2pgprot(set_type)); |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 2149 | if (!ret && new_type == _PAGE_CACHE_MODE_WC) |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 2150 | ret = change_page_attr_set_clr(NULL, addrinarray, |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 2151 | cachemode2pgprot( |
| 2152 | _PAGE_CACHE_MODE_WC), |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 2153 | __pgprot(_PAGE_CACHE_MASK), |
| 2154 | 0, CPA_PAGES_ARRAY, pages); |
| 2155 | if (ret) |
| 2156 | goto err_out; |
| 2157 | return 0; /* Success */ |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 2158 | err_out: |
| 2159 | free_idx = i; |
| 2160 | for (i = 0; i < free_idx; i++) { |
Thomas Hellstrom | 8523acf | 2009-08-03 09:25:45 +0200 | [diff] [blame] | 2161 | if (PageHighMem(pages[i])) |
| 2162 | continue; |
| 2163 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 2164 | end = start + PAGE_SIZE; |
| 2165 | free_memtype(start, end); |
| 2166 | } |
| 2167 | return -EINVAL; |
| 2168 | } |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 2169 | |
| 2170 | int set_pages_array_uc(struct page **pages, int addrinarray) |
| 2171 | { |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 2172 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 2173 | } |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 2174 | EXPORT_SYMBOL(set_pages_array_uc); |
| 2175 | |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 2176 | int set_pages_array_wc(struct page **pages, int addrinarray) |
| 2177 | { |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 2178 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC); |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 2179 | } |
| 2180 | EXPORT_SYMBOL(set_pages_array_wc); |
| 2181 | |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 2182 | int set_pages_array_wt(struct page **pages, int addrinarray) |
| 2183 | { |
| 2184 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WT); |
| 2185 | } |
| 2186 | EXPORT_SYMBOL_GPL(set_pages_array_wt); |
| 2187 | |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2188 | int set_pages_wb(struct page *page, int numpages) |
| 2189 | { |
| 2190 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2191 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 2192 | return set_memory_wb(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2193 | } |
| 2194 | EXPORT_SYMBOL(set_pages_wb); |
| 2195 | |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 2196 | int set_pages_array_wb(struct page **pages, int addrinarray) |
| 2197 | { |
| 2198 | int retval; |
| 2199 | unsigned long start; |
| 2200 | unsigned long end; |
| 2201 | int i; |
| 2202 | |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 2203 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 2204 | retval = cpa_clear_pages_array(pages, addrinarray, |
| 2205 | __pgprot(_PAGE_CACHE_MASK)); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 2206 | if (retval) |
| 2207 | return retval; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 2208 | |
| 2209 | for (i = 0; i < addrinarray; i++) { |
Thomas Hellstrom | 8523acf | 2009-08-03 09:25:45 +0200 | [diff] [blame] | 2210 | if (PageHighMem(pages[i])) |
| 2211 | continue; |
| 2212 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 2213 | end = start + PAGE_SIZE; |
| 2214 | free_memtype(start, end); |
| 2215 | } |
| 2216 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 2217 | return 0; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 2218 | } |
| 2219 | EXPORT_SYMBOL(set_pages_array_wb); |
| 2220 | |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2221 | int set_pages_x(struct page *page, int numpages) |
| 2222 | { |
| 2223 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2224 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 2225 | return set_memory_x(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2226 | } |
| 2227 | EXPORT_SYMBOL(set_pages_x); |
| 2228 | |
| 2229 | int set_pages_nx(struct page *page, int numpages) |
| 2230 | { |
| 2231 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2232 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 2233 | return set_memory_nx(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2234 | } |
| 2235 | EXPORT_SYMBOL(set_pages_nx); |
| 2236 | |
| 2237 | int set_pages_ro(struct page *page, int numpages) |
| 2238 | { |
| 2239 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2240 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 2241 | return set_memory_ro(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2242 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2243 | |
| 2244 | int set_pages_rw(struct page *page, int numpages) |
| 2245 | { |
| 2246 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2247 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 2248 | return set_memory_rw(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2249 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 2250 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2251 | #ifdef CONFIG_DEBUG_PAGEALLOC |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 2252 | |
| 2253 | static int __set_pages_p(struct page *page, int numpages) |
| 2254 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 2255 | unsigned long tempaddr = (unsigned long) page_address(page); |
| 2256 | struct cpa_data cpa = { .vaddr = &tempaddr, |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 2257 | .pgd = NULL, |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 2258 | .numpages = numpages, |
| 2259 | .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 2260 | .mask_clr = __pgprot(0), |
| 2261 | .flags = 0}; |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 2262 | |
Suresh Siddha | 55121b4 | 2008-09-23 14:00:40 -0700 | [diff] [blame] | 2263 | /* |
| 2264 | * No alias checking needed for setting present flag. otherwise, |
| 2265 | * we may need to break large pages for 64-bit kernel text |
| 2266 | * mappings (this adds to complexity if we want to do this from |
| 2267 | * atomic context especially). Let's keep it simple! |
| 2268 | */ |
| 2269 | return __change_page_attr_set_clr(&cpa, 0); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 2270 | } |
| 2271 | |
| 2272 | static int __set_pages_np(struct page *page, int numpages) |
| 2273 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 2274 | unsigned long tempaddr = (unsigned long) page_address(page); |
| 2275 | struct cpa_data cpa = { .vaddr = &tempaddr, |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 2276 | .pgd = NULL, |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 2277 | .numpages = numpages, |
| 2278 | .mask_set = __pgprot(0), |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 2279 | .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), |
| 2280 | .flags = 0}; |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 2281 | |
Suresh Siddha | 55121b4 | 2008-09-23 14:00:40 -0700 | [diff] [blame] | 2282 | /* |
| 2283 | * No alias checking needed for setting not present flag. otherwise, |
| 2284 | * we may need to break large pages for 64-bit kernel text |
| 2285 | * mappings (this adds to complexity if we want to do this from |
| 2286 | * atomic context especially). Let's keep it simple! |
| 2287 | */ |
| 2288 | return __change_page_attr_set_clr(&cpa, 0); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 2289 | } |
| 2290 | |
Joonsoo Kim | 031bc57 | 2014-12-12 16:55:52 -0800 | [diff] [blame] | 2291 | void __kernel_map_pages(struct page *page, int numpages, int enable) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2292 | { |
| 2293 | if (PageHighMem(page)) |
| 2294 | return; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 2295 | if (!enable) { |
Ingo Molnar | f9b8404 | 2006-06-27 02:54:49 -0700 | [diff] [blame] | 2296 | debug_check_no_locks_freed(page_address(page), |
| 2297 | numpages * PAGE_SIZE); |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 2298 | } |
Ingo Molnar | de5097c | 2006-01-09 15:59:21 -0800 | [diff] [blame] | 2299 | |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 2300 | /* |
Ingo Molnar | f8d8406 | 2008-02-13 14:09:53 +0100 | [diff] [blame] | 2301 | * The return value is ignored as the calls cannot fail. |
Suresh Siddha | 55121b4 | 2008-09-23 14:00:40 -0700 | [diff] [blame] | 2302 | * Large pages for identity mappings are not used at boot time |
| 2303 | * and hence no memory allocations during large page split. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2304 | */ |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 2305 | if (enable) |
| 2306 | __set_pages_p(page, numpages); |
| 2307 | else |
| 2308 | __set_pages_np(page, numpages); |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 2309 | |
| 2310 | /* |
Ingo Molnar | e4b71dc | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 2311 | * We should perform an IPI and flush all tlbs, |
Sebastian Andrzej Siewior | f77084d | 2018-10-17 12:34:32 +0200 | [diff] [blame^] | 2312 | * but that can deadlock->flush only current cpu. |
| 2313 | * Preemption needs to be disabled around __flush_tlb_all() due to |
| 2314 | * CR3 reload in __native_flush_tlb(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2315 | */ |
Sebastian Andrzej Siewior | f77084d | 2018-10-17 12:34:32 +0200 | [diff] [blame^] | 2316 | preempt_disable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2317 | __flush_tlb_all(); |
Sebastian Andrzej Siewior | f77084d | 2018-10-17 12:34:32 +0200 | [diff] [blame^] | 2318 | preempt_enable(); |
Boris Ostrovsky | 2656460 | 2013-04-11 13:59:52 -0400 | [diff] [blame] | 2319 | |
| 2320 | arch_flush_lazy_mmu_mode(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2321 | } |
Rafael J. Wysocki | 8a235ef | 2008-02-20 01:47:44 +0100 | [diff] [blame] | 2322 | |
| 2323 | #ifdef CONFIG_HIBERNATION |
| 2324 | |
| 2325 | bool kernel_page_present(struct page *page) |
| 2326 | { |
| 2327 | unsigned int level; |
| 2328 | pte_t *pte; |
| 2329 | |
| 2330 | if (PageHighMem(page)) |
| 2331 | return false; |
| 2332 | |
| 2333 | pte = lookup_address((unsigned long)page_address(page), &level); |
| 2334 | return (pte_val(*pte) & _PAGE_PRESENT); |
| 2335 | } |
| 2336 | |
| 2337 | #endif /* CONFIG_HIBERNATION */ |
| 2338 | |
| 2339 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
Arjan van de Ven | d1028a1 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 2340 | |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 2341 | int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, |
| 2342 | unsigned numpages, unsigned long page_flags) |
| 2343 | { |
| 2344 | int retval = -EINVAL; |
| 2345 | |
| 2346 | struct cpa_data cpa = { |
| 2347 | .vaddr = &address, |
| 2348 | .pfn = pfn, |
| 2349 | .pgd = pgd, |
| 2350 | .numpages = numpages, |
| 2351 | .mask_set = __pgprot(0), |
| 2352 | .mask_clr = __pgprot(0), |
| 2353 | .flags = 0, |
| 2354 | }; |
| 2355 | |
| 2356 | if (!(__supported_pte_mask & _PAGE_NX)) |
| 2357 | goto out; |
| 2358 | |
| 2359 | if (!(page_flags & _PAGE_NX)) |
| 2360 | cpa.mask_clr = __pgprot(_PAGE_NX); |
| 2361 | |
Sai Praneeth | 15f003d | 2016-02-17 12:36:04 +0000 | [diff] [blame] | 2362 | if (!(page_flags & _PAGE_RW)) |
| 2363 | cpa.mask_clr = __pgprot(_PAGE_RW); |
| 2364 | |
Tom Lendacky | 21729f8 | 2017-07-17 16:10:07 -0500 | [diff] [blame] | 2365 | if (!(page_flags & _PAGE_ENC)) |
| 2366 | cpa.mask_clr = pgprot_encrypted(cpa.mask_clr); |
| 2367 | |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 2368 | cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags); |
| 2369 | |
| 2370 | retval = __change_page_attr_set_clr(&cpa, 0); |
| 2371 | __flush_tlb_all(); |
| 2372 | |
| 2373 | out: |
| 2374 | return retval; |
| 2375 | } |
| 2376 | |
Arjan van de Ven | d1028a1 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 2377 | /* |
| 2378 | * The testcases use internal knowledge of the implementation that shouldn't |
| 2379 | * be exposed to the rest of the kernel. Include these directly here. |
| 2380 | */ |
| 2381 | #ifdef CONFIG_CPA_DEBUG |
| 2382 | #include "pageattr-test.c" |
| 2383 | #endif |