venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Handle caching attributes in page tables (PAT) |
| 3 | * |
| 4 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> |
| 5 | * Suresh B Siddha <suresh.b.siddha@intel.com> |
| 6 | * |
| 7 | * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. |
| 8 | */ |
| 9 | |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 10 | #include <linux/seq_file.h> |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 11 | #include <linux/memblock.h> |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 12 | #include <linux/debugfs.h> |
Ingo Molnar | 9de94db | 2017-01-27 13:08:42 +0100 | [diff] [blame] | 13 | #include <linux/ioport.h> |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 14 | #include <linux/kernel.h> |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 15 | #include <linux/pfn_t.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 16 | #include <linux/slab.h> |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 17 | #include <linux/mm.h> |
| 18 | #include <linux/fs.h> |
Venkatesh Pallipadi | 335ef89 | 2009-07-10 09:57:36 -0700 | [diff] [blame] | 19 | #include <linux/rbtree.h> |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 20 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 21 | #include <asm/cacheflush.h> |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 22 | #include <asm/processor.h> |
| 23 | #include <asm/tlbflush.h> |
Jack Steiner | fd12a0d | 2009-11-19 14:23:41 -0600 | [diff] [blame] | 24 | #include <asm/x86_init.h> |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 25 | #include <asm/pgtable.h> |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 26 | #include <asm/fcntl.h> |
Ingo Molnar | 66441bd | 2017-01-27 10:27:10 +0100 | [diff] [blame] | 27 | #include <asm/e820/api.h> |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 28 | #include <asm/mtrr.h> |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 29 | #include <asm/page.h> |
| 30 | #include <asm/msr.h> |
| 31 | #include <asm/pat.h> |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 32 | #include <asm/io.h> |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 33 | |
venkatesh.pallipadi@intel.com | be5a0c1 | 2010-02-10 11:57:06 -0800 | [diff] [blame] | 34 | #include "pat_internal.h" |
Juergen Gross | bd809af | 2014-11-03 14:02:03 +0100 | [diff] [blame] | 35 | #include "mm_internal.h" |
venkatesh.pallipadi@intel.com | be5a0c1 | 2010-02-10 11:57:06 -0800 | [diff] [blame] | 36 | |
Luis R. Rodriguez | 9e76561 | 2015-05-26 10:28:11 +0200 | [diff] [blame] | 37 | #undef pr_fmt |
| 38 | #define pr_fmt(fmt) "" fmt |
| 39 | |
Mikulas Patocka | 99c13b8 | 2017-07-04 19:04:23 -0400 | [diff] [blame] | 40 | static bool __read_mostly boot_cpu_done; |
| 41 | static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT); |
| 42 | static bool __read_mostly pat_initialized; |
| 43 | static bool __read_mostly init_cm_done; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 44 | |
Toshi Kani | 224bb1e | 2016-03-23 15:41:58 -0600 | [diff] [blame] | 45 | void pat_disable(const char *reason) |
Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 46 | { |
Mikulas Patocka | 99c13b8 | 2017-07-04 19:04:23 -0400 | [diff] [blame] | 47 | if (pat_disabled) |
Toshi Kani | 224bb1e | 2016-03-23 15:41:58 -0600 | [diff] [blame] | 48 | return; |
| 49 | |
| 50 | if (boot_cpu_done) { |
| 51 | WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n"); |
| 52 | return; |
| 53 | } |
| 54 | |
Mikulas Patocka | 99c13b8 | 2017-07-04 19:04:23 -0400 | [diff] [blame] | 55 | pat_disabled = true; |
Luis R. Rodriguez | 9e76561 | 2015-05-26 10:28:11 +0200 | [diff] [blame] | 56 | pr_info("x86/PAT: %s\n", reason); |
Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 57 | } |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 58 | |
Andrew Morton | be524fb | 2008-05-29 00:01:28 -0700 | [diff] [blame] | 59 | static int __init nopat(char *str) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 60 | { |
Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 61 | pat_disable("PAT support disabled."); |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 62 | return 0; |
| 63 | } |
| 64 | early_param("nopat", nopat); |
| 65 | |
Luis R. Rodriguez | cb32edf | 2015-05-26 10:28:15 +0200 | [diff] [blame] | 66 | bool pat_enabled(void) |
| 67 | { |
Mikulas Patocka | 99c13b8 | 2017-07-04 19:04:23 -0400 | [diff] [blame] | 68 | return pat_initialized; |
Luis R. Rodriguez | cb32edf | 2015-05-26 10:28:15 +0200 | [diff] [blame] | 69 | } |
Luis R. Rodriguez | fbe7193 | 2015-05-26 10:28:16 +0200 | [diff] [blame] | 70 | EXPORT_SYMBOL_GPL(pat_enabled); |
Venki Pallipadi | 77b52b4c | 2008-05-05 19:09:10 -0700 | [diff] [blame] | 71 | |
venkatesh.pallipadi@intel.com | be5a0c1 | 2010-02-10 11:57:06 -0800 | [diff] [blame] | 72 | int pat_debug_enable; |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 73 | |
Venki Pallipadi | 77b52b4c | 2008-05-05 19:09:10 -0700 | [diff] [blame] | 74 | static int __init pat_debug_setup(char *str) |
| 75 | { |
venkatesh.pallipadi@intel.com | be5a0c1 | 2010-02-10 11:57:06 -0800 | [diff] [blame] | 76 | pat_debug_enable = 1; |
Venki Pallipadi | 77b52b4c | 2008-05-05 19:09:10 -0700 | [diff] [blame] | 77 | return 0; |
| 78 | } |
| 79 | __setup("debugpat", pat_debug_setup); |
| 80 | |
Thomas Gleixner | 0dbcae8 | 2014-11-16 18:59:19 +0100 | [diff] [blame] | 81 | #ifdef CONFIG_X86_PAT |
| 82 | /* |
Toshi Kani | 35a5a10 | 2015-06-04 18:55:19 +0200 | [diff] [blame] | 83 | * X86 PAT uses page flags arch_1 and uncached together to keep track of |
| 84 | * memory type of pages that have backing page struct. |
| 85 | * |
| 86 | * X86 PAT supports 4 different memory types: |
| 87 | * - _PAGE_CACHE_MODE_WB |
| 88 | * - _PAGE_CACHE_MODE_WC |
| 89 | * - _PAGE_CACHE_MODE_UC_MINUS |
| 90 | * - _PAGE_CACHE_MODE_WT |
| 91 | * |
| 92 | * _PAGE_CACHE_MODE_WB is the default type. |
Thomas Gleixner | 0dbcae8 | 2014-11-16 18:59:19 +0100 | [diff] [blame] | 93 | */ |
| 94 | |
Toshi Kani | 35a5a10 | 2015-06-04 18:55:19 +0200 | [diff] [blame] | 95 | #define _PGMT_WB 0 |
Thomas Gleixner | 0dbcae8 | 2014-11-16 18:59:19 +0100 | [diff] [blame] | 96 | #define _PGMT_WC (1UL << PG_arch_1) |
| 97 | #define _PGMT_UC_MINUS (1UL << PG_uncached) |
Toshi Kani | 35a5a10 | 2015-06-04 18:55:19 +0200 | [diff] [blame] | 98 | #define _PGMT_WT (1UL << PG_uncached | 1UL << PG_arch_1) |
Thomas Gleixner | 0dbcae8 | 2014-11-16 18:59:19 +0100 | [diff] [blame] | 99 | #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) |
| 100 | #define _PGMT_CLEAR_MASK (~_PGMT_MASK) |
| 101 | |
| 102 | static inline enum page_cache_mode get_page_memtype(struct page *pg) |
| 103 | { |
| 104 | unsigned long pg_flags = pg->flags & _PGMT_MASK; |
| 105 | |
Toshi Kani | 35a5a10 | 2015-06-04 18:55:19 +0200 | [diff] [blame] | 106 | if (pg_flags == _PGMT_WB) |
| 107 | return _PAGE_CACHE_MODE_WB; |
Thomas Gleixner | 0dbcae8 | 2014-11-16 18:59:19 +0100 | [diff] [blame] | 108 | else if (pg_flags == _PGMT_WC) |
| 109 | return _PAGE_CACHE_MODE_WC; |
| 110 | else if (pg_flags == _PGMT_UC_MINUS) |
| 111 | return _PAGE_CACHE_MODE_UC_MINUS; |
| 112 | else |
Toshi Kani | 35a5a10 | 2015-06-04 18:55:19 +0200 | [diff] [blame] | 113 | return _PAGE_CACHE_MODE_WT; |
Thomas Gleixner | 0dbcae8 | 2014-11-16 18:59:19 +0100 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | static inline void set_page_memtype(struct page *pg, |
| 117 | enum page_cache_mode memtype) |
| 118 | { |
| 119 | unsigned long memtype_flags; |
| 120 | unsigned long old_flags; |
| 121 | unsigned long new_flags; |
| 122 | |
| 123 | switch (memtype) { |
| 124 | case _PAGE_CACHE_MODE_WC: |
| 125 | memtype_flags = _PGMT_WC; |
| 126 | break; |
| 127 | case _PAGE_CACHE_MODE_UC_MINUS: |
| 128 | memtype_flags = _PGMT_UC_MINUS; |
| 129 | break; |
Toshi Kani | 35a5a10 | 2015-06-04 18:55:19 +0200 | [diff] [blame] | 130 | case _PAGE_CACHE_MODE_WT: |
| 131 | memtype_flags = _PGMT_WT; |
Thomas Gleixner | 0dbcae8 | 2014-11-16 18:59:19 +0100 | [diff] [blame] | 132 | break; |
Toshi Kani | 35a5a10 | 2015-06-04 18:55:19 +0200 | [diff] [blame] | 133 | case _PAGE_CACHE_MODE_WB: |
Thomas Gleixner | 0dbcae8 | 2014-11-16 18:59:19 +0100 | [diff] [blame] | 134 | default: |
Toshi Kani | 35a5a10 | 2015-06-04 18:55:19 +0200 | [diff] [blame] | 135 | memtype_flags = _PGMT_WB; |
Thomas Gleixner | 0dbcae8 | 2014-11-16 18:59:19 +0100 | [diff] [blame] | 136 | break; |
| 137 | } |
| 138 | |
| 139 | do { |
| 140 | old_flags = pg->flags; |
| 141 | new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; |
| 142 | } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); |
| 143 | } |
| 144 | #else |
| 145 | static inline enum page_cache_mode get_page_memtype(struct page *pg) |
| 146 | { |
| 147 | return -1; |
| 148 | } |
| 149 | static inline void set_page_memtype(struct page *pg, |
| 150 | enum page_cache_mode memtype) |
| 151 | { |
| 152 | } |
| 153 | #endif |
| 154 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 155 | enum { |
| 156 | PAT_UC = 0, /* uncached */ |
| 157 | PAT_WC = 1, /* Write combining */ |
| 158 | PAT_WT = 4, /* Write Through */ |
| 159 | PAT_WP = 5, /* Write Protected */ |
| 160 | PAT_WB = 6, /* Write Back (default) */ |
Adam Buchbinder | 6a6256f | 2016-02-23 15:34:30 -0800 | [diff] [blame] | 161 | PAT_UC_MINUS = 7, /* UC, but can be overridden by MTRR */ |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 162 | }; |
| 163 | |
Juergen Gross | bd809af | 2014-11-03 14:02:03 +0100 | [diff] [blame] | 164 | #define CM(c) (_PAGE_CACHE_MODE_ ## c) |
| 165 | |
| 166 | static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg) |
| 167 | { |
| 168 | enum page_cache_mode cache; |
| 169 | char *cache_mode; |
| 170 | |
| 171 | switch (pat_val) { |
| 172 | case PAT_UC: cache = CM(UC); cache_mode = "UC "; break; |
| 173 | case PAT_WC: cache = CM(WC); cache_mode = "WC "; break; |
| 174 | case PAT_WT: cache = CM(WT); cache_mode = "WT "; break; |
| 175 | case PAT_WP: cache = CM(WP); cache_mode = "WP "; break; |
| 176 | case PAT_WB: cache = CM(WB); cache_mode = "WB "; break; |
| 177 | case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break; |
| 178 | default: cache = CM(WB); cache_mode = "WB "; break; |
| 179 | } |
| 180 | |
| 181 | memcpy(msg, cache_mode, 4); |
| 182 | |
| 183 | return cache; |
| 184 | } |
| 185 | |
| 186 | #undef CM |
| 187 | |
| 188 | /* |
| 189 | * Update the cache mode to pgprot translation tables according to PAT |
| 190 | * configuration. |
| 191 | * Using lower indices is preferred, so we start with highest index. |
| 192 | */ |
Toshi Kani | 88ba281 | 2016-03-23 15:42:02 -0600 | [diff] [blame] | 193 | static void __init_cache_modes(u64 pat) |
Juergen Gross | bd809af | 2014-11-03 14:02:03 +0100 | [diff] [blame] | 194 | { |
Juergen Gross | bd809af | 2014-11-03 14:02:03 +0100 | [diff] [blame] | 195 | enum page_cache_mode cache; |
| 196 | char pat_msg[33]; |
Borislav Petkov | 9cd25aa | 2015-06-04 18:55:10 +0200 | [diff] [blame] | 197 | int i; |
Juergen Gross | bd809af | 2014-11-03 14:02:03 +0100 | [diff] [blame] | 198 | |
Juergen Gross | bd809af | 2014-11-03 14:02:03 +0100 | [diff] [blame] | 199 | pat_msg[32] = 0; |
| 200 | for (i = 7; i >= 0; i--) { |
| 201 | cache = pat_get_cache_mode((pat >> (i * 8)) & 7, |
| 202 | pat_msg + 4 * i); |
| 203 | update_cache_mode_entry(i, cache); |
| 204 | } |
Luis R. Rodriguez | 9e76561 | 2015-05-26 10:28:11 +0200 | [diff] [blame] | 205 | pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg); |
Mikulas Patocka | 99c13b8 | 2017-07-04 19:04:23 -0400 | [diff] [blame] | 206 | |
| 207 | init_cm_done = true; |
Juergen Gross | bd809af | 2014-11-03 14:02:03 +0100 | [diff] [blame] | 208 | } |
| 209 | |
Andreas Herrmann | cd7a4e9 | 2008-06-10 16:05:39 +0200 | [diff] [blame] | 210 | #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 211 | |
Borislav Petkov | 9dac629 | 2015-06-04 18:55:09 +0200 | [diff] [blame] | 212 | static void pat_bsp_init(u64 pat) |
| 213 | { |
Borislav Petkov | 9cd25aa | 2015-06-04 18:55:10 +0200 | [diff] [blame] | 214 | u64 tmp_pat; |
| 215 | |
Toshi Kani | d63dcf4 | 2016-03-23 15:41:59 -0600 | [diff] [blame] | 216 | if (!boot_cpu_has(X86_FEATURE_PAT)) { |
Borislav Petkov | 9dac629 | 2015-06-04 18:55:09 +0200 | [diff] [blame] | 217 | pat_disable("PAT not supported by CPU."); |
| 218 | return; |
| 219 | } |
| 220 | |
Borislav Petkov | 9cd25aa | 2015-06-04 18:55:10 +0200 | [diff] [blame] | 221 | rdmsrl(MSR_IA32_CR_PAT, tmp_pat); |
| 222 | if (!tmp_pat) { |
Borislav Petkov | 9dac629 | 2015-06-04 18:55:09 +0200 | [diff] [blame] | 223 | pat_disable("PAT MSR is 0, disabled."); |
| 224 | return; |
| 225 | } |
| 226 | |
| 227 | wrmsrl(MSR_IA32_CR_PAT, pat); |
Mikulas Patocka | 99c13b8 | 2017-07-04 19:04:23 -0400 | [diff] [blame] | 228 | pat_initialized = true; |
Borislav Petkov | 9dac629 | 2015-06-04 18:55:09 +0200 | [diff] [blame] | 229 | |
Toshi Kani | 02f037d | 2016-03-23 15:41:57 -0600 | [diff] [blame] | 230 | __init_cache_modes(pat); |
Borislav Petkov | 9dac629 | 2015-06-04 18:55:09 +0200 | [diff] [blame] | 231 | } |
| 232 | |
| 233 | static void pat_ap_init(u64 pat) |
| 234 | { |
Ingo Molnar | c08d517 | 2017-06-01 15:52:23 +0200 | [diff] [blame] | 235 | if (!boot_cpu_has(X86_FEATURE_PAT)) { |
Borislav Petkov | 9dac629 | 2015-06-04 18:55:09 +0200 | [diff] [blame] | 236 | /* |
| 237 | * If this happens we are on a secondary CPU, but switched to |
| 238 | * PAT on the boot CPU. We have no way to undo PAT. |
| 239 | */ |
| 240 | panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n"); |
| 241 | } |
| 242 | |
| 243 | wrmsrl(MSR_IA32_CR_PAT, pat); |
| 244 | } |
| 245 | |
Mikulas Patocka | 99c13b8 | 2017-07-04 19:04:23 -0400 | [diff] [blame] | 246 | void init_cache_modes(void) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 247 | { |
Toshi Kani | 02f037d | 2016-03-23 15:41:57 -0600 | [diff] [blame] | 248 | u64 pat = 0; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 249 | |
Toshi Kani | 02f037d | 2016-03-23 15:41:57 -0600 | [diff] [blame] | 250 | if (init_cm_done) |
| 251 | return; |
| 252 | |
| 253 | if (boot_cpu_has(X86_FEATURE_PAT)) { |
| 254 | /* |
| 255 | * CPU supports PAT. Set PAT table to be consistent with |
| 256 | * PAT MSR. This case supports "nopat" boot option, and |
| 257 | * virtual machine environments which support PAT without |
| 258 | * MTRRs. In specific, Xen has unique setup to PAT MSR. |
| 259 | * |
| 260 | * If PAT MSR returns 0, it is considered invalid and emulates |
| 261 | * as No PAT. |
| 262 | */ |
| 263 | rdmsrl(MSR_IA32_CR_PAT, pat); |
| 264 | } |
| 265 | |
| 266 | if (!pat) { |
Borislav Petkov | 9cd25aa | 2015-06-04 18:55:10 +0200 | [diff] [blame] | 267 | /* |
| 268 | * No PAT. Emulate the PAT table that corresponds to the two |
Toshi Kani | 02f037d | 2016-03-23 15:41:57 -0600 | [diff] [blame] | 269 | * cache bits, PWT (Write Through) and PCD (Cache Disable). |
| 270 | * This setup is also the same as the BIOS default setup. |
Borislav Petkov | 9cd25aa | 2015-06-04 18:55:10 +0200 | [diff] [blame] | 271 | * |
Toshi Kani | d79a40c | 2015-06-04 18:55:12 +0200 | [diff] [blame] | 272 | * PTE encoding: |
Borislav Petkov | 9cd25aa | 2015-06-04 18:55:10 +0200 | [diff] [blame] | 273 | * |
| 274 | * PCD |
| 275 | * |PWT PAT |
| 276 | * || slot |
| 277 | * 00 0 WB : _PAGE_CACHE_MODE_WB |
| 278 | * 01 1 WT : _PAGE_CACHE_MODE_WT |
| 279 | * 10 2 UC-: _PAGE_CACHE_MODE_UC_MINUS |
| 280 | * 11 3 UC : _PAGE_CACHE_MODE_UC |
| 281 | * |
| 282 | * NOTE: When WC or WP is used, it is redirected to UC- per |
| 283 | * the default setup in __cachemode2pte_tbl[]. |
| 284 | */ |
| 285 | pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) | |
| 286 | PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC); |
Toshi Kani | 02f037d | 2016-03-23 15:41:57 -0600 | [diff] [blame] | 287 | } |
Toshi Kani | d79a40c | 2015-06-04 18:55:12 +0200 | [diff] [blame] | 288 | |
Toshi Kani | 02f037d | 2016-03-23 15:41:57 -0600 | [diff] [blame] | 289 | __init_cache_modes(pat); |
Toshi Kani | 02f037d | 2016-03-23 15:41:57 -0600 | [diff] [blame] | 290 | } |
| 291 | |
| 292 | /** |
| 293 | * pat_init - Initialize PAT MSR and PAT table |
| 294 | * |
| 295 | * This function initializes PAT MSR and PAT table with an OS-defined value |
Tom Lendacky | aac7b79 | 2017-07-17 16:09:59 -0500 | [diff] [blame] | 296 | * to enable additional cache attributes, WC, WT and WP. |
Toshi Kani | 02f037d | 2016-03-23 15:41:57 -0600 | [diff] [blame] | 297 | * |
| 298 | * This function must be called on all CPUs using the specific sequence of |
| 299 | * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this |
| 300 | * procedure for PAT. |
| 301 | */ |
| 302 | void pat_init(void) |
| 303 | { |
| 304 | u64 pat; |
| 305 | struct cpuinfo_x86 *c = &boot_cpu_data; |
| 306 | |
Mikulas Patocka | 99c13b8 | 2017-07-04 19:04:23 -0400 | [diff] [blame] | 307 | if (pat_disabled) |
Toshi Kani | 02f037d | 2016-03-23 15:41:57 -0600 | [diff] [blame] | 308 | return; |
Toshi Kani | 02f037d | 2016-03-23 15:41:57 -0600 | [diff] [blame] | 309 | |
| 310 | if ((c->x86_vendor == X86_VENDOR_INTEL) && |
| 311 | (((c->x86 == 0x6) && (c->x86_model <= 0xd)) || |
| 312 | ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) { |
Borislav Petkov | 9cd25aa | 2015-06-04 18:55:10 +0200 | [diff] [blame] | 313 | /* |
Toshi Kani | d79a40c | 2015-06-04 18:55:12 +0200 | [diff] [blame] | 314 | * PAT support with the lower four entries. Intel Pentium 2, |
| 315 | * 3, M, and 4 are affected by PAT errata, which makes the |
| 316 | * upper four entries unusable. To be on the safe side, we don't |
| 317 | * use those. |
| 318 | * |
| 319 | * PTE encoding: |
Borislav Petkov | 9cd25aa | 2015-06-04 18:55:10 +0200 | [diff] [blame] | 320 | * PAT |
| 321 | * |PCD |
Toshi Kani | d79a40c | 2015-06-04 18:55:12 +0200 | [diff] [blame] | 322 | * ||PWT PAT |
| 323 | * ||| slot |
| 324 | * 000 0 WB : _PAGE_CACHE_MODE_WB |
| 325 | * 001 1 WC : _PAGE_CACHE_MODE_WC |
| 326 | * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS |
| 327 | * 011 3 UC : _PAGE_CACHE_MODE_UC |
Borislav Petkov | 9cd25aa | 2015-06-04 18:55:10 +0200 | [diff] [blame] | 328 | * PAT bit unused |
Toshi Kani | d79a40c | 2015-06-04 18:55:12 +0200 | [diff] [blame] | 329 | * |
| 330 | * NOTE: When WT or WP is used, it is redirected to UC- per |
| 331 | * the default setup in __cachemode2pte_tbl[]. |
Borislav Petkov | 9cd25aa | 2015-06-04 18:55:10 +0200 | [diff] [blame] | 332 | */ |
| 333 | pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | |
| 334 | PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC); |
Toshi Kani | d79a40c | 2015-06-04 18:55:12 +0200 | [diff] [blame] | 335 | } else { |
| 336 | /* |
| 337 | * Full PAT support. We put WT in slot 7 to improve |
| 338 | * robustness in the presence of errata that might cause |
| 339 | * the high PAT bit to be ignored. This way, a buggy slot 7 |
| 340 | * access will hit slot 3, and slot 3 is UC, so at worst |
| 341 | * we lose performance without causing a correctness issue. |
| 342 | * Pentium 4 erratum N46 is an example for such an erratum, |
| 343 | * although we try not to use PAT at all on affected CPUs. |
| 344 | * |
| 345 | * PTE encoding: |
| 346 | * PAT |
| 347 | * |PCD |
| 348 | * ||PWT PAT |
| 349 | * ||| slot |
| 350 | * 000 0 WB : _PAGE_CACHE_MODE_WB |
| 351 | * 001 1 WC : _PAGE_CACHE_MODE_WC |
| 352 | * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS |
| 353 | * 011 3 UC : _PAGE_CACHE_MODE_UC |
| 354 | * 100 4 WB : Reserved |
Tom Lendacky | aac7b79 | 2017-07-17 16:09:59 -0500 | [diff] [blame] | 355 | * 101 5 WP : _PAGE_CACHE_MODE_WP |
Toshi Kani | d79a40c | 2015-06-04 18:55:12 +0200 | [diff] [blame] | 356 | * 110 6 UC-: Reserved |
| 357 | * 111 7 WT : _PAGE_CACHE_MODE_WT |
| 358 | * |
| 359 | * The reserved slots are unused, but mapped to their |
| 360 | * corresponding types in the presence of PAT errata. |
| 361 | */ |
| 362 | pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | |
Tom Lendacky | aac7b79 | 2017-07-17 16:09:59 -0500 | [diff] [blame] | 363 | PAT(4, WB) | PAT(5, WP) | PAT(6, UC_MINUS) | PAT(7, WT); |
Borislav Petkov | 9cd25aa | 2015-06-04 18:55:10 +0200 | [diff] [blame] | 364 | } |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 365 | |
Borislav Petkov | 9dac629 | 2015-06-04 18:55:09 +0200 | [diff] [blame] | 366 | if (!boot_cpu_done) { |
| 367 | pat_bsp_init(pat); |
| 368 | boot_cpu_done = true; |
| 369 | } else { |
| 370 | pat_ap_init(pat); |
Juergen Gross | 9d34cfd | 2015-01-12 06:15:45 +0100 | [diff] [blame] | 371 | } |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 372 | } |
| 373 | |
| 374 | #undef PAT |
| 375 | |
Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 376 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ |
Venkatesh Pallipadi | 335ef89 | 2009-07-10 09:57:36 -0700 | [diff] [blame] | 377 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 378 | /* |
| 379 | * Does intersection of PAT memory type and MTRR memory type and returns |
| 380 | * the resulting memory type as PAT understands it. |
| 381 | * (Type in pat and mtrr will not have same value) |
| 382 | * The intersection is based on "Effective Memory Type" tables in IA-32 |
| 383 | * SDM vol 3a |
| 384 | */ |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 385 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, |
| 386 | enum page_cache_mode req_type) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 387 | { |
Venki Pallipadi | c26421d | 2008-05-29 12:01:44 -0700 | [diff] [blame] | 388 | /* |
| 389 | * Look for MTRR hint to get the effective type in case where PAT |
| 390 | * request is for WB. |
| 391 | */ |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 392 | if (req_type == _PAGE_CACHE_MODE_WB) { |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 393 | u8 mtrr_type, uniform; |
Andreas Herrmann | dd0c7c4 | 2008-06-18 15:38:57 +0200 | [diff] [blame] | 394 | |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 395 | mtrr_type = mtrr_type_lookup(start, end, &uniform); |
Suresh Siddha | b6ff32d | 2009-04-09 14:26:51 -0700 | [diff] [blame] | 396 | if (mtrr_type != MTRR_TYPE_WRBACK) |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 397 | return _PAGE_CACHE_MODE_UC_MINUS; |
Suresh Siddha | b6ff32d | 2009-04-09 14:26:51 -0700 | [diff] [blame] | 398 | |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 399 | return _PAGE_CACHE_MODE_WB; |
Andreas Herrmann | dd0c7c4 | 2008-06-18 15:38:57 +0200 | [diff] [blame] | 400 | } |
| 401 | |
| 402 | return req_type; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 403 | } |
| 404 | |
John Dykstra | fa83523 | 2012-05-25 16:12:46 -0500 | [diff] [blame] | 405 | struct pagerange_state { |
| 406 | unsigned long cur_pfn; |
| 407 | int ram; |
| 408 | int not_ram; |
| 409 | }; |
| 410 | |
| 411 | static int |
| 412 | pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg) |
| 413 | { |
| 414 | struct pagerange_state *state = arg; |
| 415 | |
| 416 | state->not_ram |= initial_pfn > state->cur_pfn; |
| 417 | state->ram |= total_nr_pages > 0; |
| 418 | state->cur_pfn = initial_pfn + total_nr_pages; |
| 419 | |
| 420 | return state->ram && state->not_ram; |
| 421 | } |
| 422 | |
Yasuaki Ishimatsu | 3709c85 | 2010-07-22 14:57:35 +0900 | [diff] [blame] | 423 | static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 424 | { |
John Dykstra | fa83523 | 2012-05-25 16:12:46 -0500 | [diff] [blame] | 425 | int ret = 0; |
| 426 | unsigned long start_pfn = start >> PAGE_SHIFT; |
| 427 | unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 428 | struct pagerange_state state = {start_pfn, 0, 0}; |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 429 | |
John Dykstra | fa83523 | 2012-05-25 16:12:46 -0500 | [diff] [blame] | 430 | /* |
| 431 | * For legacy reasons, physical address range in the legacy ISA |
| 432 | * region is tracked as non-RAM. This will allow users of |
| 433 | * /dev/mem to map portions of legacy ISA region, even when |
| 434 | * some of those portions are listed(or not even listed) with |
| 435 | * different e820 types(RAM/reserved/..) |
| 436 | */ |
| 437 | if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT) |
| 438 | start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT; |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 439 | |
John Dykstra | fa83523 | 2012-05-25 16:12:46 -0500 | [diff] [blame] | 440 | if (start_pfn < end_pfn) { |
| 441 | ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, |
| 442 | &state, pagerange_is_ram_callback); |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 443 | } |
| 444 | |
John Dykstra | fa83523 | 2012-05-25 16:12:46 -0500 | [diff] [blame] | 445 | return (ret > 0) ? -1 : (state.ram ? 1 : 0); |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 446 | } |
| 447 | |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 448 | /* |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 449 | * For RAM pages, we use page flags to mark the pages with appropriate type. |
Toshi Kani | 35a5a10 | 2015-06-04 18:55:19 +0200 | [diff] [blame] | 450 | * The page flags are limited to four types, WB (default), WC, WT and UC-. |
| 451 | * WP request fails with -EINVAL, and UC gets redirected to UC-. Setting |
| 452 | * a new memory type is only allowed for a page mapped with the default WB |
| 453 | * type. |
Toshi Kani | 0d69bdf | 2015-06-04 18:55:13 +0200 | [diff] [blame] | 454 | * |
| 455 | * Here we do two passes: |
| 456 | * - Find the memtype of all the pages in the range, look for any conflicts. |
| 457 | * - In case of no conflicts, set the new memtype for pages in the range. |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 458 | */ |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 459 | static int reserve_ram_pages_type(u64 start, u64 end, |
| 460 | enum page_cache_mode req_type, |
| 461 | enum page_cache_mode *new_type) |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 462 | { |
| 463 | struct page *page; |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 464 | u64 pfn; |
| 465 | |
Toshi Kani | 35a5a10 | 2015-06-04 18:55:19 +0200 | [diff] [blame] | 466 | if (req_type == _PAGE_CACHE_MODE_WP) { |
Toshi Kani | 0d69bdf | 2015-06-04 18:55:13 +0200 | [diff] [blame] | 467 | if (new_type) |
| 468 | *new_type = _PAGE_CACHE_MODE_UC_MINUS; |
| 469 | return -EINVAL; |
| 470 | } |
| 471 | |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 472 | if (req_type == _PAGE_CACHE_MODE_UC) { |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 473 | /* We do not support strong UC */ |
| 474 | WARN_ON_ONCE(1); |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 475 | req_type = _PAGE_CACHE_MODE_UC_MINUS; |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 476 | } |
| 477 | |
| 478 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 479 | enum page_cache_mode type; |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 480 | |
| 481 | page = pfn_to_page(pfn); |
| 482 | type = get_page_memtype(page); |
Toshi Kani | 35a5a10 | 2015-06-04 18:55:19 +0200 | [diff] [blame] | 483 | if (type != _PAGE_CACHE_MODE_WB) { |
Luis R. Rodriguez | 9e76561 | 2015-05-26 10:28:11 +0200 | [diff] [blame] | 484 | pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n", |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 485 | start, end - 1, type, req_type); |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 486 | if (new_type) |
| 487 | *new_type = type; |
| 488 | |
| 489 | return -EBUSY; |
| 490 | } |
| 491 | } |
| 492 | |
| 493 | if (new_type) |
| 494 | *new_type = req_type; |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 495 | |
| 496 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { |
| 497 | page = pfn_to_page(pfn); |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 498 | set_page_memtype(page, req_type); |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 499 | } |
| 500 | return 0; |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 501 | } |
| 502 | |
| 503 | static int free_ram_pages_type(u64 start, u64 end) |
| 504 | { |
| 505 | struct page *page; |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 506 | u64 pfn; |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 507 | |
| 508 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { |
| 509 | page = pfn_to_page(pfn); |
Toshi Kani | 35a5a10 | 2015-06-04 18:55:19 +0200 | [diff] [blame] | 510 | set_page_memtype(page, _PAGE_CACHE_MODE_WB); |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 511 | } |
| 512 | return 0; |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 513 | } |
| 514 | |
Dan Williams | 510ee09 | 2018-07-13 21:50:27 -0700 | [diff] [blame] | 515 | static u64 sanitize_phys(u64 address) |
| 516 | { |
| 517 | /* |
| 518 | * When changing the memtype for pages containing poison allow |
| 519 | * for a "decoy" virtual address (bit 63 clear) passed to |
| 520 | * set_memory_X(). __pa() on a "decoy" address results in a |
| 521 | * physical address with bit 63 set. |
Dan Williams | 51c3fbd | 2018-12-11 07:49:39 -0800 | [diff] [blame] | 522 | * |
| 523 | * Decoy addresses are not present for 32-bit builds, see |
| 524 | * set_mce_nospec(). |
Dan Williams | 510ee09 | 2018-07-13 21:50:27 -0700 | [diff] [blame] | 525 | */ |
Dan Williams | 51c3fbd | 2018-12-11 07:49:39 -0800 | [diff] [blame] | 526 | if (IS_ENABLED(CONFIG_X86_64)) |
| 527 | return address & __PHYSICAL_MASK; |
| 528 | return address; |
Dan Williams | 510ee09 | 2018-07-13 21:50:27 -0700 | [diff] [blame] | 529 | } |
| 530 | |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 531 | /* |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 532 | * req_type typically has one of the: |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 533 | * - _PAGE_CACHE_MODE_WB |
| 534 | * - _PAGE_CACHE_MODE_WC |
| 535 | * - _PAGE_CACHE_MODE_UC_MINUS |
| 536 | * - _PAGE_CACHE_MODE_UC |
Toshi Kani | 0d69bdf | 2015-06-04 18:55:13 +0200 | [diff] [blame] | 537 | * - _PAGE_CACHE_MODE_WT |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 538 | * |
Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 539 | * If new_type is NULL, function will return an error if it cannot reserve the |
| 540 | * region with req_type. If new_type is non-NULL, function will return |
| 541 | * available type in new_type in case of no error. In case of any error |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 542 | * it will return a negative return value. |
| 543 | */ |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 544 | int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, |
| 545 | enum page_cache_mode *new_type) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 546 | { |
venkatesh.pallipadi@intel.com | be5a0c1 | 2010-02-10 11:57:06 -0800 | [diff] [blame] | 547 | struct memtype *new; |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 548 | enum page_cache_mode actual_type; |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 549 | int is_range_ram; |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 550 | int err = 0; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 551 | |
Dan Williams | 510ee09 | 2018-07-13 21:50:27 -0700 | [diff] [blame] | 552 | start = sanitize_phys(start); |
| 553 | end = sanitize_phys(end); |
Dan Williams | 51c3fbd | 2018-12-11 07:49:39 -0800 | [diff] [blame] | 554 | if (start >= end) { |
| 555 | WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, |
| 556 | start, end - 1, cattr_name(req_type)); |
| 557 | return -EINVAL; |
| 558 | } |
Andreas Herrmann | 69e26be | 2008-06-20 22:03:06 +0200 | [diff] [blame] | 559 | |
Luis R. Rodriguez | cb32edf | 2015-05-26 10:28:15 +0200 | [diff] [blame] | 560 | if (!pat_enabled()) { |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 561 | /* This is identical to page table setting without PAT */ |
Borislav Petkov | 7202fdb | 2015-06-04 18:55:11 +0200 | [diff] [blame] | 562 | if (new_type) |
| 563 | *new_type = req_type; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 564 | return 0; |
| 565 | } |
| 566 | |
| 567 | /* Low ISA region is always mapped WB in page table. No need to track */ |
H. Peter Anvin | 8a27138 | 2009-11-23 14:49:20 -0800 | [diff] [blame] | 568 | if (x86_platform.is_untracked_pat_range(start, end)) { |
Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 569 | if (new_type) |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 570 | *new_type = _PAGE_CACHE_MODE_WB; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 571 | return 0; |
| 572 | } |
| 573 | |
Suresh Siddha | b6ff32d | 2009-04-09 14:26:51 -0700 | [diff] [blame] | 574 | /* |
| 575 | * Call mtrr_lookup to get the type hint. This is an |
| 576 | * optimization for /dev/mem mmap'ers into WB memory (BIOS |
| 577 | * tools and ACPI tools). Use WB request for WB memory and use |
| 578 | * UC_MINUS otherwise. |
| 579 | */ |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 580 | actual_type = pat_x_mtrr_type(start, end, req_type); |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 581 | |
Suresh Siddha | 9597134 | 2009-01-13 10:21:30 -0800 | [diff] [blame] | 582 | if (new_type) |
| 583 | *new_type = actual_type; |
| 584 | |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 585 | is_range_ram = pat_pagerange_is_ram(start, end); |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 586 | if (is_range_ram == 1) { |
| 587 | |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 588 | err = reserve_ram_pages_type(start, end, req_type, new_type); |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 589 | |
| 590 | return err; |
| 591 | } else if (is_range_ram < 0) { |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 592 | return -EINVAL; |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 593 | } |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 594 | |
Venkatesh Pallipadi | 6a4f3b5 | 2010-06-10 17:45:01 -0700 | [diff] [blame] | 595 | new = kzalloc(sizeof(struct memtype), GFP_KERNEL); |
Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 596 | if (!new) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 597 | return -ENOMEM; |
| 598 | |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 599 | new->start = start; |
| 600 | new->end = end; |
| 601 | new->type = actual_type; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 602 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 603 | spin_lock(&memtype_lock); |
| 604 | |
Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 605 | err = rbt_memtype_check_insert(new, new_type); |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 606 | if (err) { |
Luis R. Rodriguez | 9e76561 | 2015-05-26 10:28:11 +0200 | [diff] [blame] | 607 | pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n", |
| 608 | start, end - 1, |
| 609 | cattr_name(new->type), cattr_name(req_type)); |
Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 610 | kfree(new); |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 611 | spin_unlock(&memtype_lock); |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 612 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 613 | return err; |
| 614 | } |
| 615 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 616 | spin_unlock(&memtype_lock); |
Andreas Herrmann | 3e9c83b | 2008-06-20 22:04:02 +0200 | [diff] [blame] | 617 | |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 618 | dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n", |
| 619 | start, end - 1, cattr_name(new->type), cattr_name(req_type), |
Andreas Herrmann | 3e9c83b | 2008-06-20 22:04:02 +0200 | [diff] [blame] | 620 | new_type ? cattr_name(*new_type) : "-"); |
| 621 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 622 | return err; |
| 623 | } |
| 624 | |
| 625 | int free_memtype(u64 start, u64 end) |
| 626 | { |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 627 | int err = -EINVAL; |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 628 | int is_range_ram; |
Xiaotian Feng | 20413f2 | 2010-05-26 09:51:10 +0800 | [diff] [blame] | 629 | struct memtype *entry; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 630 | |
Luis R. Rodriguez | cb32edf | 2015-05-26 10:28:15 +0200 | [diff] [blame] | 631 | if (!pat_enabled()) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 632 | return 0; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 633 | |
Dan Williams | 510ee09 | 2018-07-13 21:50:27 -0700 | [diff] [blame] | 634 | start = sanitize_phys(start); |
| 635 | end = sanitize_phys(end); |
| 636 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 637 | /* Low ISA region is always mapped WB. No need to track */ |
H. Peter Anvin | 8a27138 | 2009-11-23 14:49:20 -0800 | [diff] [blame] | 638 | if (x86_platform.is_untracked_pat_range(start, end)) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 639 | return 0; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 640 | |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 641 | is_range_ram = pat_pagerange_is_ram(start, end); |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 642 | if (is_range_ram == 1) { |
| 643 | |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 644 | err = free_ram_pages_type(start, end); |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 645 | |
| 646 | return err; |
| 647 | } else if (is_range_ram < 0) { |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 648 | return -EINVAL; |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 649 | } |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 650 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 651 | spin_lock(&memtype_lock); |
Xiaotian Feng | 20413f2 | 2010-05-26 09:51:10 +0800 | [diff] [blame] | 652 | entry = rbt_memtype_erase(start, end); |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 653 | spin_unlock(&memtype_lock); |
| 654 | |
Toshi Kani | 2039e6a | 2015-12-22 17:54:24 -0700 | [diff] [blame] | 655 | if (IS_ERR(entry)) { |
Luis R. Rodriguez | 9e76561 | 2015-05-26 10:28:11 +0200 | [diff] [blame] | 656 | pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n", |
| 657 | current->comm, current->pid, start, end - 1); |
Xiaotian Feng | 20413f2 | 2010-05-26 09:51:10 +0800 | [diff] [blame] | 658 | return -EINVAL; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 659 | } |
venkatesh.pallipadi@intel.com | 6997ab4 | 2008-03-18 17:00:25 -0700 | [diff] [blame] | 660 | |
Xiaotian Feng | 20413f2 | 2010-05-26 09:51:10 +0800 | [diff] [blame] | 661 | kfree(entry); |
| 662 | |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 663 | dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1); |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 664 | |
Xiaotian Feng | 20413f2 | 2010-05-26 09:51:10 +0800 | [diff] [blame] | 665 | return 0; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 666 | } |
| 667 | |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 668 | |
Venkatesh Pallipadi | 9fd126bc | 2009-07-10 09:57:34 -0700 | [diff] [blame] | 669 | /** |
Venkatesh Pallipadi | 637b86e | 2009-07-10 09:57:39 -0700 | [diff] [blame] | 670 | * lookup_memtype - Looksup the memory type for a physical address |
| 671 | * @paddr: physical address of which memory type needs to be looked up |
| 672 | * |
| 673 | * Only to be called when PAT is enabled |
| 674 | * |
Juergen Gross | 2a37469 | 2014-11-03 14:01:55 +0100 | [diff] [blame] | 675 | * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS |
Toshi Kani | 35a5a10 | 2015-06-04 18:55:19 +0200 | [diff] [blame] | 676 | * or _PAGE_CACHE_MODE_WT. |
Venkatesh Pallipadi | 637b86e | 2009-07-10 09:57:39 -0700 | [diff] [blame] | 677 | */ |
Juergen Gross | 2a37469 | 2014-11-03 14:01:55 +0100 | [diff] [blame] | 678 | static enum page_cache_mode lookup_memtype(u64 paddr) |
Venkatesh Pallipadi | 637b86e | 2009-07-10 09:57:39 -0700 | [diff] [blame] | 679 | { |
Juergen Gross | 2a37469 | 2014-11-03 14:01:55 +0100 | [diff] [blame] | 680 | enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB; |
Venkatesh Pallipadi | 637b86e | 2009-07-10 09:57:39 -0700 | [diff] [blame] | 681 | struct memtype *entry; |
| 682 | |
H. Peter Anvin | 8a27138 | 2009-11-23 14:49:20 -0800 | [diff] [blame] | 683 | if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) |
Venkatesh Pallipadi | 637b86e | 2009-07-10 09:57:39 -0700 | [diff] [blame] | 684 | return rettype; |
| 685 | |
| 686 | if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { |
| 687 | struct page *page; |
Venkatesh Pallipadi | 637b86e | 2009-07-10 09:57:39 -0700 | [diff] [blame] | 688 | |
Toshi Kani | 35a5a10 | 2015-06-04 18:55:19 +0200 | [diff] [blame] | 689 | page = pfn_to_page(paddr >> PAGE_SHIFT); |
| 690 | return get_page_memtype(page); |
Venkatesh Pallipadi | 637b86e | 2009-07-10 09:57:39 -0700 | [diff] [blame] | 691 | } |
| 692 | |
| 693 | spin_lock(&memtype_lock); |
| 694 | |
Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 695 | entry = rbt_memtype_lookup(paddr); |
Venkatesh Pallipadi | 637b86e | 2009-07-10 09:57:39 -0700 | [diff] [blame] | 696 | if (entry != NULL) |
| 697 | rettype = entry->type; |
| 698 | else |
Juergen Gross | 2a37469 | 2014-11-03 14:01:55 +0100 | [diff] [blame] | 699 | rettype = _PAGE_CACHE_MODE_UC_MINUS; |
Venkatesh Pallipadi | 637b86e | 2009-07-10 09:57:39 -0700 | [diff] [blame] | 700 | |
| 701 | spin_unlock(&memtype_lock); |
| 702 | return rettype; |
| 703 | } |
| 704 | |
| 705 | /** |
Haozhong Zhang | b8d7044 | 2017-12-20 15:29:28 +0800 | [diff] [blame] | 706 | * pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type |
| 707 | * of @pfn cannot be overridden by UC MTRR memory type. |
| 708 | * |
| 709 | * Only to be called when PAT is enabled. |
| 710 | * |
| 711 | * Returns true, if the PAT memory type of @pfn is UC, UC-, or WC. |
| 712 | * Returns false in other cases. |
| 713 | */ |
| 714 | bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn) |
| 715 | { |
| 716 | enum page_cache_mode cm = lookup_memtype(PFN_PHYS(pfn)); |
| 717 | |
| 718 | return cm == _PAGE_CACHE_MODE_UC || |
| 719 | cm == _PAGE_CACHE_MODE_UC_MINUS || |
| 720 | cm == _PAGE_CACHE_MODE_WC; |
| 721 | } |
| 722 | EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr); |
| 723 | |
| 724 | /** |
Venkatesh Pallipadi | 9fd126bc | 2009-07-10 09:57:34 -0700 | [diff] [blame] | 725 | * io_reserve_memtype - Request a memory type mapping for a region of memory |
| 726 | * @start: start (physical address) of the region |
| 727 | * @end: end (physical address) of the region |
| 728 | * @type: A pointer to memtype, with requested type. On success, requested |
| 729 | * or any other compatible type that was available for the region is returned |
| 730 | * |
| 731 | * On success, returns 0 |
| 732 | * On failure, returns non-zero |
| 733 | */ |
| 734 | int io_reserve_memtype(resource_size_t start, resource_size_t end, |
Juergen Gross | 49a3b3c | 2014-11-03 14:01:54 +0100 | [diff] [blame] | 735 | enum page_cache_mode *type) |
Venkatesh Pallipadi | 9fd126bc | 2009-07-10 09:57:34 -0700 | [diff] [blame] | 736 | { |
H. Peter Anvin | b855192 | 2009-08-26 17:17:51 -0700 | [diff] [blame] | 737 | resource_size_t size = end - start; |
Juergen Gross | 49a3b3c | 2014-11-03 14:01:54 +0100 | [diff] [blame] | 738 | enum page_cache_mode req_type = *type; |
| 739 | enum page_cache_mode new_type; |
Venkatesh Pallipadi | 9fd126bc | 2009-07-10 09:57:34 -0700 | [diff] [blame] | 740 | int ret; |
| 741 | |
H. Peter Anvin | b855192 | 2009-08-26 17:17:51 -0700 | [diff] [blame] | 742 | WARN_ON_ONCE(iomem_map_sanity_check(start, size)); |
Venkatesh Pallipadi | 9fd126bc | 2009-07-10 09:57:34 -0700 | [diff] [blame] | 743 | |
| 744 | ret = reserve_memtype(start, end, req_type, &new_type); |
| 745 | if (ret) |
| 746 | goto out_err; |
| 747 | |
H. Peter Anvin | b855192 | 2009-08-26 17:17:51 -0700 | [diff] [blame] | 748 | if (!is_new_memtype_allowed(start, size, req_type, new_type)) |
Venkatesh Pallipadi | 9fd126bc | 2009-07-10 09:57:34 -0700 | [diff] [blame] | 749 | goto out_free; |
| 750 | |
H. Peter Anvin | b855192 | 2009-08-26 17:17:51 -0700 | [diff] [blame] | 751 | if (kernel_map_sync_memtype(start, size, new_type) < 0) |
Venkatesh Pallipadi | 9fd126bc | 2009-07-10 09:57:34 -0700 | [diff] [blame] | 752 | goto out_free; |
| 753 | |
| 754 | *type = new_type; |
| 755 | return 0; |
| 756 | |
| 757 | out_free: |
| 758 | free_memtype(start, end); |
| 759 | ret = -EBUSY; |
| 760 | out_err: |
| 761 | return ret; |
| 762 | } |
| 763 | |
| 764 | /** |
| 765 | * io_free_memtype - Release a memory type mapping for a region of memory |
| 766 | * @start: start (physical address) of the region |
| 767 | * @end: end (physical address) of the region |
| 768 | */ |
| 769 | void io_free_memtype(resource_size_t start, resource_size_t end) |
| 770 | { |
| 771 | free_memtype(start, end); |
| 772 | } |
| 773 | |
Dave Airlie | 8ef4227 | 2016-10-24 15:27:59 +1000 | [diff] [blame] | 774 | int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size) |
| 775 | { |
| 776 | enum page_cache_mode type = _PAGE_CACHE_MODE_WC; |
| 777 | |
| 778 | return io_reserve_memtype(start, start + size, &type); |
| 779 | } |
| 780 | EXPORT_SYMBOL(arch_io_reserve_memtype_wc); |
| 781 | |
| 782 | void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size) |
| 783 | { |
| 784 | io_free_memtype(start, start + size); |
| 785 | } |
| 786 | EXPORT_SYMBOL(arch_io_free_memtype_wc); |
| 787 | |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 788 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| 789 | unsigned long size, pgprot_t vma_prot) |
| 790 | { |
Tom Lendacky | 8458bf9 | 2017-07-17 16:10:30 -0500 | [diff] [blame] | 791 | if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size)) |
| 792 | vma_prot = pgprot_decrypted(vma_prot); |
| 793 | |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 794 | return vma_prot; |
| 795 | } |
| 796 | |
Ingo Molnar | d092633 | 2008-07-18 00:26:59 +0200 | [diff] [blame] | 797 | #ifdef CONFIG_STRICT_DEVMEM |
Pavel Machek | 1f40a8b | 2014-12-28 17:15:24 +0100 | [diff] [blame] | 798 | /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */ |
Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 799 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
| 800 | { |
| 801 | return 1; |
| 802 | } |
| 803 | #else |
Ravikiran G Thirumalai | 9e41bff | 2008-10-30 13:59:21 -0700 | [diff] [blame] | 804 | /* This check is needed to avoid cache aliasing when PAT is enabled */ |
Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 805 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
| 806 | { |
| 807 | u64 from = ((u64)pfn) << PAGE_SHIFT; |
| 808 | u64 to = from + size; |
| 809 | u64 cursor = from; |
| 810 | |
Luis R. Rodriguez | cb32edf | 2015-05-26 10:28:15 +0200 | [diff] [blame] | 811 | if (!pat_enabled()) |
Ravikiran G Thirumalai | 9e41bff | 2008-10-30 13:59:21 -0700 | [diff] [blame] | 812 | return 1; |
| 813 | |
Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 814 | while (cursor < to) { |
Jiri Kosina | 39380b8 | 2016-07-08 11:38:28 +0200 | [diff] [blame] | 815 | if (!devmem_is_allowed(pfn)) |
Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 816 | return 0; |
Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 817 | cursor += PAGE_SIZE; |
| 818 | pfn++; |
| 819 | } |
| 820 | return 1; |
| 821 | } |
Ingo Molnar | d092633 | 2008-07-18 00:26:59 +0200 | [diff] [blame] | 822 | #endif /* CONFIG_STRICT_DEVMEM */ |
Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 823 | |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 824 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
| 825 | unsigned long size, pgprot_t *vma_prot) |
| 826 | { |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 827 | enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB; |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 828 | |
Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 829 | if (!range_is_allowed(pfn, size)) |
| 830 | return 0; |
| 831 | |
Christoph Hellwig | 6b2f3d1 | 2009-10-27 11:05:28 +0100 | [diff] [blame] | 832 | if (file->f_flags & O_DSYNC) |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 833 | pcm = _PAGE_CACHE_MODE_UC_MINUS; |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 834 | |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 835 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 836 | cachemode2protval(pcm)); |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 837 | return 1; |
| 838 | } |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 839 | |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 840 | /* |
Venkatesh Pallipadi | 7880f74 | 2009-02-24 17:35:13 -0800 | [diff] [blame] | 841 | * Change the memory type for the physial address range in kernel identity |
| 842 | * mapping space if that range is a part of identity map. |
| 843 | */ |
Juergen Gross | b14097b | 2014-11-03 14:01:58 +0100 | [diff] [blame] | 844 | int kernel_map_sync_memtype(u64 base, unsigned long size, |
| 845 | enum page_cache_mode pcm) |
Venkatesh Pallipadi | 7880f74 | 2009-02-24 17:35:13 -0800 | [diff] [blame] | 846 | { |
| 847 | unsigned long id_sz; |
| 848 | |
Dave Hansen | a25b931 | 2013-01-22 13:24:30 -0800 | [diff] [blame] | 849 | if (base > __pa(high_memory-1)) |
Venkatesh Pallipadi | 7880f74 | 2009-02-24 17:35:13 -0800 | [diff] [blame] | 850 | return 0; |
| 851 | |
Dave Hansen | 60f583d | 2013-03-07 08:31:51 -0800 | [diff] [blame] | 852 | /* |
| 853 | * some areas in the middle of the kernel identity range |
| 854 | * are not mapped, like the PCI space. |
| 855 | */ |
| 856 | if (!page_is_ram(base >> PAGE_SHIFT)) |
| 857 | return 0; |
| 858 | |
Dave Hansen | a25b931 | 2013-01-22 13:24:30 -0800 | [diff] [blame] | 859 | id_sz = (__pa(high_memory-1) <= base + size) ? |
Venkatesh Pallipadi | 7880f74 | 2009-02-24 17:35:13 -0800 | [diff] [blame] | 860 | __pa(high_memory) - base : |
| 861 | size; |
| 862 | |
Juergen Gross | b14097b | 2014-11-03 14:01:58 +0100 | [diff] [blame] | 863 | if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) { |
Luis R. Rodriguez | 9e76561 | 2015-05-26 10:28:11 +0200 | [diff] [blame] | 864 | pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n", |
Venkatesh Pallipadi | 7880f74 | 2009-02-24 17:35:13 -0800 | [diff] [blame] | 865 | current->comm, current->pid, |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 866 | cattr_name(pcm), |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 867 | base, (unsigned long long)(base + size-1)); |
Venkatesh Pallipadi | 7880f74 | 2009-02-24 17:35:13 -0800 | [diff] [blame] | 868 | return -EINVAL; |
| 869 | } |
| 870 | return 0; |
| 871 | } |
| 872 | |
| 873 | /* |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 874 | * Internal interface to reserve a range of physical memory with prot. |
| 875 | * Reserved non RAM regions only and after successful reserve_memtype, |
| 876 | * this func also keeps identity mapping (if any) in sync with this new prot. |
| 877 | */ |
venkatesh.pallipadi@intel.com | cdecff6 | 2009-01-09 16:13:12 -0800 | [diff] [blame] | 878 | static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, |
| 879 | int strict_prot) |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 880 | { |
| 881 | int is_ram = 0; |
Venkatesh Pallipadi | 7880f74 | 2009-02-24 17:35:13 -0800 | [diff] [blame] | 882 | int ret; |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 883 | enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot); |
| 884 | enum page_cache_mode pcm = want_pcm; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 885 | |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 886 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 887 | |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 888 | /* |
Venkatesh Pallipadi | d886c73 | 2009-07-10 09:57:41 -0700 | [diff] [blame] | 889 | * reserve_pfn_range() for RAM pages. We do not refcount to keep |
| 890 | * track of number of mappings of RAM pages. We can assert that |
| 891 | * the type requested matches the type of first page in the range. |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 892 | */ |
Venkatesh Pallipadi | d886c73 | 2009-07-10 09:57:41 -0700 | [diff] [blame] | 893 | if (is_ram) { |
Luis R. Rodriguez | cb32edf | 2015-05-26 10:28:15 +0200 | [diff] [blame] | 894 | if (!pat_enabled()) |
Venkatesh Pallipadi | d886c73 | 2009-07-10 09:57:41 -0700 | [diff] [blame] | 895 | return 0; |
| 896 | |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 897 | pcm = lookup_memtype(paddr); |
| 898 | if (want_pcm != pcm) { |
Luis R. Rodriguez | 9e76561 | 2015-05-26 10:28:11 +0200 | [diff] [blame] | 899 | pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", |
Venkatesh Pallipadi | d886c73 | 2009-07-10 09:57:41 -0700 | [diff] [blame] | 900 | current->comm, current->pid, |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 901 | cattr_name(want_pcm), |
Venkatesh Pallipadi | d886c73 | 2009-07-10 09:57:41 -0700 | [diff] [blame] | 902 | (unsigned long long)paddr, |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 903 | (unsigned long long)(paddr + size - 1), |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 904 | cattr_name(pcm)); |
Venkatesh Pallipadi | d886c73 | 2009-07-10 09:57:41 -0700 | [diff] [blame] | 905 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 906 | (~_PAGE_CACHE_MASK)) | |
| 907 | cachemode2protval(pcm)); |
Venkatesh Pallipadi | d886c73 | 2009-07-10 09:57:41 -0700 | [diff] [blame] | 908 | } |
Pallipadi, Venkatesh | 4bb9c5c | 2009-03-12 17:45:27 -0700 | [diff] [blame] | 909 | return 0; |
Venkatesh Pallipadi | d886c73 | 2009-07-10 09:57:41 -0700 | [diff] [blame] | 910 | } |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 911 | |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 912 | ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm); |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 913 | if (ret) |
| 914 | return ret; |
| 915 | |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 916 | if (pcm != want_pcm) { |
Suresh Siddha | 1adcaaf | 2009-08-17 13:23:50 -0700 | [diff] [blame] | 917 | if (strict_prot || |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 918 | !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { |
venkatesh.pallipadi@intel.com | cdecff6 | 2009-01-09 16:13:12 -0800 | [diff] [blame] | 919 | free_memtype(paddr, paddr + size); |
Luis R. Rodriguez | 9e76561 | 2015-05-26 10:28:11 +0200 | [diff] [blame] | 920 | pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n", |
| 921 | current->comm, current->pid, |
| 922 | cattr_name(want_pcm), |
| 923 | (unsigned long long)paddr, |
| 924 | (unsigned long long)(paddr + size - 1), |
| 925 | cattr_name(pcm)); |
venkatesh.pallipadi@intel.com | cdecff6 | 2009-01-09 16:13:12 -0800 | [diff] [blame] | 926 | return -EINVAL; |
| 927 | } |
| 928 | /* |
| 929 | * We allow returning different type than the one requested in |
| 930 | * non strict case. |
| 931 | */ |
| 932 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & |
| 933 | (~_PAGE_CACHE_MASK)) | |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 934 | cachemode2protval(pcm)); |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 935 | } |
| 936 | |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 937 | if (kernel_map_sync_memtype(paddr, size, pcm) < 0) { |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 938 | free_memtype(paddr, paddr + size); |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 939 | return -EINVAL; |
| 940 | } |
| 941 | return 0; |
| 942 | } |
| 943 | |
| 944 | /* |
| 945 | * Internal interface to free a range of physical memory. |
| 946 | * Frees non RAM regions only. |
| 947 | */ |
| 948 | static void free_pfn_range(u64 paddr, unsigned long size) |
| 949 | { |
| 950 | int is_ram; |
| 951 | |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 952 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 953 | if (is_ram == 0) |
| 954 | free_memtype(paddr, paddr + size); |
| 955 | } |
| 956 | |
| 957 | /* |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 958 | * track_pfn_copy is called when vma that is covering the pfnmap gets |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 959 | * copied through copy_page_range(). |
| 960 | * |
| 961 | * If the vma has a linear pfn mapping for the entire range, we get the prot |
| 962 | * from pte and reserve the entire vma range with single reserve_pfn_range call. |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 963 | */ |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 964 | int track_pfn_copy(struct vm_area_struct *vma) |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 965 | { |
H. Peter Anvin | c1c15b6 | 2008-12-23 10:10:40 -0800 | [diff] [blame] | 966 | resource_size_t paddr; |
venkatesh.pallipadi@intel.com | 982d789 | 2008-12-19 13:47:28 -0800 | [diff] [blame] | 967 | unsigned long prot; |
Pallipadi, Venkatesh | 4b06504 | 2009-04-08 15:37:16 -0700 | [diff] [blame] | 968 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
venkatesh.pallipadi@intel.com | cdecff6 | 2009-01-09 16:13:12 -0800 | [diff] [blame] | 969 | pgprot_t pgprot; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 970 | |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 971 | if (vma->vm_flags & VM_PAT) { |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 972 | /* |
venkatesh.pallipadi@intel.com | 982d789 | 2008-12-19 13:47:28 -0800 | [diff] [blame] | 973 | * reserve the whole chunk covered by vma. We need the |
| 974 | * starting address and protection from pte. |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 975 | */ |
Pallipadi, Venkatesh | 4b06504 | 2009-04-08 15:37:16 -0700 | [diff] [blame] | 976 | if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 977 | WARN_ON_ONCE(1); |
venkatesh.pallipadi@intel.com | 982d789 | 2008-12-19 13:47:28 -0800 | [diff] [blame] | 978 | return -EINVAL; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 979 | } |
venkatesh.pallipadi@intel.com | cdecff6 | 2009-01-09 16:13:12 -0800 | [diff] [blame] | 980 | pgprot = __pgprot(prot); |
| 981 | return reserve_pfn_range(paddr, vma_size, &pgprot, 1); |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 982 | } |
| 983 | |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 984 | return 0; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 985 | } |
| 986 | |
| 987 | /* |
Dan Williams | 9049771 | 2016-09-07 08:51:21 -0700 | [diff] [blame] | 988 | * prot is passed in as a parameter for the new mapping. If the vma has |
| 989 | * a linear pfn mapping for the entire range, or no vma is provided, |
| 990 | * reserve the entire pfn + size range with single reserve_pfn_range |
| 991 | * call. |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 992 | */ |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 993 | int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 994 | unsigned long pfn, unsigned long addr, unsigned long size) |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 995 | { |
Suresh Siddha | b1a86e1 | 2012-10-08 16:28:23 -0700 | [diff] [blame] | 996 | resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; |
Juergen Gross | 2a37469 | 2014-11-03 14:01:55 +0100 | [diff] [blame] | 997 | enum page_cache_mode pcm; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 998 | |
Suresh Siddha | b1a86e1 | 2012-10-08 16:28:23 -0700 | [diff] [blame] | 999 | /* reserve the whole chunk starting from paddr */ |
Dan Williams | 9049771 | 2016-09-07 08:51:21 -0700 | [diff] [blame] | 1000 | if (!vma || (addr == vma->vm_start |
| 1001 | && size == (vma->vm_end - vma->vm_start))) { |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 1002 | int ret; |
| 1003 | |
| 1004 | ret = reserve_pfn_range(paddr, size, prot, 0); |
Dan Williams | 9049771 | 2016-09-07 08:51:21 -0700 | [diff] [blame] | 1005 | if (ret == 0 && vma) |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 1006 | vma->vm_flags |= VM_PAT; |
| 1007 | return ret; |
| 1008 | } |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 1009 | |
Luis R. Rodriguez | cb32edf | 2015-05-26 10:28:15 +0200 | [diff] [blame] | 1010 | if (!pat_enabled()) |
Venkatesh Pallipadi | 10876376 | 2009-07-10 09:57:40 -0700 | [diff] [blame] | 1011 | return 0; |
| 1012 | |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 1013 | /* |
| 1014 | * For anything smaller than the vma size we set prot based on the |
| 1015 | * lookup. |
| 1016 | */ |
Juergen Gross | 2a37469 | 2014-11-03 14:01:55 +0100 | [diff] [blame] | 1017 | pcm = lookup_memtype(paddr); |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 1018 | |
| 1019 | /* Check memtype for the remaining pages */ |
| 1020 | while (size > PAGE_SIZE) { |
| 1021 | size -= PAGE_SIZE; |
| 1022 | paddr += PAGE_SIZE; |
Juergen Gross | 2a37469 | 2014-11-03 14:01:55 +0100 | [diff] [blame] | 1023 | if (pcm != lookup_memtype(paddr)) |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 1024 | return -EINVAL; |
| 1025 | } |
| 1026 | |
Matthew Wilcox | dd7b684 | 2016-01-25 12:25:15 -0500 | [diff] [blame] | 1027 | *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | |
Juergen Gross | 2a37469 | 2014-11-03 14:01:55 +0100 | [diff] [blame] | 1028 | cachemode2protval(pcm)); |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 1029 | |
| 1030 | return 0; |
| 1031 | } |
| 1032 | |
Borislav Petkov | 308a047 | 2016-10-26 19:43:43 +0200 | [diff] [blame] | 1033 | void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn) |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 1034 | { |
Juergen Gross | 2a37469 | 2014-11-03 14:01:55 +0100 | [diff] [blame] | 1035 | enum page_cache_mode pcm; |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 1036 | |
Luis R. Rodriguez | cb32edf | 2015-05-26 10:28:15 +0200 | [diff] [blame] | 1037 | if (!pat_enabled()) |
Borislav Petkov | 308a047 | 2016-10-26 19:43:43 +0200 | [diff] [blame] | 1038 | return; |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 1039 | |
| 1040 | /* Set prot based on lookup */ |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 1041 | pcm = lookup_memtype(pfn_t_to_phys(pfn)); |
Matthew Wilcox | dd7b684 | 2016-01-25 12:25:15 -0500 | [diff] [blame] | 1042 | *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | |
Juergen Gross | 2a37469 | 2014-11-03 14:01:55 +0100 | [diff] [blame] | 1043 | cachemode2protval(pcm)); |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 1044 | } |
| 1045 | |
| 1046 | /* |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 1047 | * untrack_pfn is called while unmapping a pfnmap for a region. |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 1048 | * untrack can be called for a specific region indicated by pfn and size or |
Suresh Siddha | b1a86e1 | 2012-10-08 16:28:23 -0700 | [diff] [blame] | 1049 | * can be for the entire vma (in which case pfn, size are zero). |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 1050 | */ |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 1051 | void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, |
| 1052 | unsigned long size) |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 1053 | { |
H. Peter Anvin | c1c15b6 | 2008-12-23 10:10:40 -0800 | [diff] [blame] | 1054 | resource_size_t paddr; |
Suresh Siddha | b1a86e1 | 2012-10-08 16:28:23 -0700 | [diff] [blame] | 1055 | unsigned long prot; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 1056 | |
Dan Williams | 9049771 | 2016-09-07 08:51:21 -0700 | [diff] [blame] | 1057 | if (vma && !(vma->vm_flags & VM_PAT)) |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 1058 | return; |
Suresh Siddha | b1a86e1 | 2012-10-08 16:28:23 -0700 | [diff] [blame] | 1059 | |
| 1060 | /* free the chunk starting from pfn or the whole chunk */ |
| 1061 | paddr = (resource_size_t)pfn << PAGE_SHIFT; |
| 1062 | if (!paddr && !size) { |
| 1063 | if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { |
| 1064 | WARN_ON_ONCE(1); |
| 1065 | return; |
| 1066 | } |
| 1067 | |
| 1068 | size = vma->vm_end - vma->vm_start; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 1069 | } |
Suresh Siddha | b1a86e1 | 2012-10-08 16:28:23 -0700 | [diff] [blame] | 1070 | free_pfn_range(paddr, size); |
Dan Williams | 9049771 | 2016-09-07 08:51:21 -0700 | [diff] [blame] | 1071 | if (vma) |
| 1072 | vma->vm_flags &= ~VM_PAT; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 1073 | } |
| 1074 | |
Toshi Kani | d9fe4fa | 2015-12-22 17:54:23 -0700 | [diff] [blame] | 1075 | /* |
| 1076 | * untrack_pfn_moved is called, while mremapping a pfnmap for a new region, |
| 1077 | * with the old vma after its pfnmap page table has been removed. The new |
| 1078 | * vma has a new pfnmap to the same pfn & cache type with VM_PAT set. |
| 1079 | */ |
| 1080 | void untrack_pfn_moved(struct vm_area_struct *vma) |
| 1081 | { |
| 1082 | vma->vm_flags &= ~VM_PAT; |
| 1083 | } |
| 1084 | |
venkatesh.pallipadi@intel.com | 2520bd3 | 2008-12-18 11:41:32 -0800 | [diff] [blame] | 1085 | pgprot_t pgprot_writecombine(pgprot_t prot) |
| 1086 | { |
Borislav Petkov | 7202fdb | 2015-06-04 18:55:11 +0200 | [diff] [blame] | 1087 | return __pgprot(pgprot_val(prot) | |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 1088 | cachemode2protval(_PAGE_CACHE_MODE_WC)); |
venkatesh.pallipadi@intel.com | 2520bd3 | 2008-12-18 11:41:32 -0800 | [diff] [blame] | 1089 | } |
Ingo Molnar | 92b9af9 | 2009-02-28 14:09:27 +0100 | [diff] [blame] | 1090 | EXPORT_SYMBOL_GPL(pgprot_writecombine); |
venkatesh.pallipadi@intel.com | 2520bd3 | 2008-12-18 11:41:32 -0800 | [diff] [blame] | 1091 | |
Toshi Kani | d1b4bfb | 2015-06-04 18:55:18 +0200 | [diff] [blame] | 1092 | pgprot_t pgprot_writethrough(pgprot_t prot) |
| 1093 | { |
| 1094 | return __pgprot(pgprot_val(prot) | |
| 1095 | cachemode2protval(_PAGE_CACHE_MODE_WT)); |
| 1096 | } |
| 1097 | EXPORT_SYMBOL_GPL(pgprot_writethrough); |
| 1098 | |
Andreas Herrmann | 012f09e | 2008-08-06 16:23:08 +0200 | [diff] [blame] | 1099 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 1100 | |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 1101 | static struct memtype *memtype_get_idx(loff_t pos) |
| 1102 | { |
venkatesh.pallipadi@intel.com | be5a0c1 | 2010-02-10 11:57:06 -0800 | [diff] [blame] | 1103 | struct memtype *print_entry; |
| 1104 | int ret; |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 1105 | |
venkatesh.pallipadi@intel.com | be5a0c1 | 2010-02-10 11:57:06 -0800 | [diff] [blame] | 1106 | print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL); |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 1107 | if (!print_entry) |
| 1108 | return NULL; |
| 1109 | |
| 1110 | spin_lock(&memtype_lock); |
Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 1111 | ret = rbt_memtype_copy_nth_element(print_entry, pos); |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 1112 | spin_unlock(&memtype_lock); |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 1113 | |
venkatesh.pallipadi@intel.com | be5a0c1 | 2010-02-10 11:57:06 -0800 | [diff] [blame] | 1114 | if (!ret) { |
| 1115 | return print_entry; |
| 1116 | } else { |
| 1117 | kfree(print_entry); |
| 1118 | return NULL; |
| 1119 | } |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 1120 | } |
| 1121 | |
| 1122 | static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) |
| 1123 | { |
| 1124 | if (*pos == 0) { |
| 1125 | ++*pos; |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 1126 | seq_puts(seq, "PAT memtype list:\n"); |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 1127 | } |
| 1128 | |
| 1129 | return memtype_get_idx(*pos); |
| 1130 | } |
| 1131 | |
| 1132 | static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 1133 | { |
| 1134 | ++*pos; |
| 1135 | return memtype_get_idx(*pos); |
| 1136 | } |
| 1137 | |
| 1138 | static void memtype_seq_stop(struct seq_file *seq, void *v) |
| 1139 | { |
| 1140 | } |
| 1141 | |
| 1142 | static int memtype_seq_show(struct seq_file *seq, void *v) |
| 1143 | { |
| 1144 | struct memtype *print_entry = (struct memtype *)v; |
| 1145 | |
| 1146 | seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), |
| 1147 | print_entry->start, print_entry->end); |
| 1148 | kfree(print_entry); |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 1149 | |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 1150 | return 0; |
| 1151 | } |
| 1152 | |
Tobias Klauser | d535e43 | 2009-09-04 15:53:09 +0200 | [diff] [blame] | 1153 | static const struct seq_operations memtype_seq_ops = { |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 1154 | .start = memtype_seq_start, |
| 1155 | .next = memtype_seq_next, |
| 1156 | .stop = memtype_seq_stop, |
| 1157 | .show = memtype_seq_show, |
| 1158 | }; |
| 1159 | |
| 1160 | static int memtype_seq_open(struct inode *inode, struct file *file) |
| 1161 | { |
| 1162 | return seq_open(file, &memtype_seq_ops); |
| 1163 | } |
| 1164 | |
| 1165 | static const struct file_operations memtype_fops = { |
| 1166 | .open = memtype_seq_open, |
| 1167 | .read = seq_read, |
| 1168 | .llseek = seq_lseek, |
| 1169 | .release = seq_release, |
| 1170 | }; |
| 1171 | |
| 1172 | static int __init pat_memtype_list_init(void) |
| 1173 | { |
Luis R. Rodriguez | cb32edf | 2015-05-26 10:28:15 +0200 | [diff] [blame] | 1174 | if (pat_enabled()) { |
Xiaotian Feng | dd4377b | 2009-11-26 19:53:48 +0800 | [diff] [blame] | 1175 | debugfs_create_file("pat_memtype_list", S_IRUSR, |
| 1176 | arch_debugfs_dir, NULL, &memtype_fops); |
| 1177 | } |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 1178 | return 0; |
| 1179 | } |
| 1180 | |
| 1181 | late_initcall(pat_memtype_list_init); |
| 1182 | |
Andreas Herrmann | 012f09e | 2008-08-06 16:23:08 +0200 | [diff] [blame] | 1183 | #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */ |