Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 1 | /* |
| 2 | * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong |
Lucas De Marchi | 0d2eb44 | 2011-03-17 16:24:16 -0300 | [diff] [blame] | 3 | * because MTRRs can span up to 40 bits (36bits on most modern x86) |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 4 | */ |
| 5 | #define DEBUG |
| 6 | |
| 7 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/init.h> |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 9 | #include <linux/io.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/mm.h> |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 11 | |
| 12 | #include <asm/processor-flags.h> |
| 13 | #include <asm/cpufeature.h> |
| 14 | #include <asm/tlbflush.h> |
| 15 | #include <asm/system.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/mtrr.h> |
| 17 | #include <asm/msr.h> |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 18 | #include <asm/pat.h> |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 19 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include "mtrr.h" |
| 21 | |
Bernhard Kaindl | de938c5 | 2007-05-02 19:27:17 +0200 | [diff] [blame] | 22 | struct fixed_range_block { |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 23 | int base_msr; /* start address of an MTRR block */ |
| 24 | int ranges; /* number of MTRRs in this block */ |
Bernhard Kaindl | de938c5 | 2007-05-02 19:27:17 +0200 | [diff] [blame] | 25 | }; |
| 26 | |
| 27 | static struct fixed_range_block fixed_range_blocks[] = { |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 28 | { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ |
| 29 | { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ |
| 30 | { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ |
Bernhard Kaindl | de938c5 | 2007-05-02 19:27:17 +0200 | [diff] [blame] | 31 | {} |
| 32 | }; |
| 33 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | static unsigned long smp_changes_mask; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 35 | static int mtrr_state_set; |
Yinghai Lu | 95ffa24 | 2008-04-29 03:52:33 -0700 | [diff] [blame] | 36 | u64 mtrr_tom2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 38 | struct mtrr_state_type mtrr_state; |
Sheng Yang | 932d27a | 2008-10-09 16:01:53 +0800 | [diff] [blame] | 39 | EXPORT_SYMBOL_GPL(mtrr_state); |
| 40 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 41 | /* |
Andreas Herrmann | 3ff42da | 2009-03-12 17:39:37 +0100 | [diff] [blame] | 42 | * BIOS is expected to clear MtrrFixDramModEn bit, see for example |
| 43 | * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD |
| 44 | * Opteron Processors" (26094 Rev. 3.30 February 2006), section |
| 45 | * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set |
| 46 | * to 1 during BIOS initalization of the fixed MTRRs, then cleared to |
| 47 | * 0 for operation." |
| 48 | */ |
| 49 | static inline void k8_check_syscfg_dram_mod_en(void) |
| 50 | { |
| 51 | u32 lo, hi; |
| 52 | |
| 53 | if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && |
| 54 | (boot_cpu_data.x86 >= 0x0f))) |
| 55 | return; |
| 56 | |
| 57 | rdmsr(MSR_K8_SYSCFG, lo, hi); |
| 58 | if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) { |
| 59 | printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]" |
| 60 | " not cleared by BIOS, clearing this bit\n", |
| 61 | smp_processor_id()); |
| 62 | lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY; |
| 63 | mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi); |
| 64 | } |
| 65 | } |
| 66 | |
Venkatesh Pallipadi | 351e5a7 | 2010-09-10 15:55:50 -0700 | [diff] [blame] | 67 | /* Get the size of contiguous MTRR range */ |
| 68 | static u64 get_mtrr_size(u64 mask) |
| 69 | { |
| 70 | u64 size; |
| 71 | |
| 72 | mask >>= PAGE_SHIFT; |
| 73 | mask |= size_or_mask; |
| 74 | size = -mask; |
| 75 | size <<= PAGE_SHIFT; |
| 76 | return size; |
| 77 | } |
| 78 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 79 | /* |
Venkatesh Pallipadi | a7f07cf | 2010-09-10 15:55:49 -0700 | [diff] [blame] | 80 | * Check and return the effective type for MTRR-MTRR type overlap. |
| 81 | * Returns 1 if the effective type is UNCACHEABLE, else returns 0 |
| 82 | */ |
| 83 | static int check_type_overlap(u8 *prev, u8 *curr) |
| 84 | { |
| 85 | if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) { |
| 86 | *prev = MTRR_TYPE_UNCACHABLE; |
| 87 | *curr = MTRR_TYPE_UNCACHABLE; |
| 88 | return 1; |
| 89 | } |
| 90 | |
| 91 | if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) || |
| 92 | (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) { |
| 93 | *prev = MTRR_TYPE_WRTHROUGH; |
| 94 | *curr = MTRR_TYPE_WRTHROUGH; |
| 95 | } |
| 96 | |
| 97 | if (*prev != *curr) { |
| 98 | *prev = MTRR_TYPE_UNCACHABLE; |
| 99 | *curr = MTRR_TYPE_UNCACHABLE; |
| 100 | return 1; |
| 101 | } |
| 102 | |
| 103 | return 0; |
| 104 | } |
| 105 | |
| 106 | /* |
Venkatesh Pallipadi | 351e5a7 | 2010-09-10 15:55:50 -0700 | [diff] [blame] | 107 | * Error/Semi-error returns: |
| 108 | * 0xFF - when MTRR is not enabled |
| 109 | * *repeat == 1 implies [start:end] spanned across MTRR range and type returned |
| 110 | * corresponds only to [start:*partial_end]. |
| 111 | * Caller has to lookup again for [*partial_end:end]. |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 112 | */ |
Venkatesh Pallipadi | 351e5a7 | 2010-09-10 15:55:50 -0700 | [diff] [blame] | 113 | static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 114 | { |
| 115 | int i; |
| 116 | u64 base, mask; |
| 117 | u8 prev_match, curr_match; |
| 118 | |
Venkatesh Pallipadi | 351e5a7 | 2010-09-10 15:55:50 -0700 | [diff] [blame] | 119 | *repeat = 0; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 120 | if (!mtrr_state_set) |
| 121 | return 0xFF; |
| 122 | |
| 123 | if (!mtrr_state.enabled) |
| 124 | return 0xFF; |
| 125 | |
| 126 | /* Make end inclusive end, instead of exclusive */ |
| 127 | end--; |
| 128 | |
| 129 | /* Look in fixed ranges. Just return the type as per start */ |
| 130 | if (mtrr_state.have_fixed && (start < 0x100000)) { |
| 131 | int idx; |
| 132 | |
| 133 | if (start < 0x80000) { |
| 134 | idx = 0; |
| 135 | idx += (start >> 16); |
| 136 | return mtrr_state.fixed_ranges[idx]; |
| 137 | } else if (start < 0xC0000) { |
| 138 | idx = 1 * 8; |
| 139 | idx += ((start - 0x80000) >> 14); |
| 140 | return mtrr_state.fixed_ranges[idx]; |
| 141 | } else if (start < 0x1000000) { |
| 142 | idx = 3 * 8; |
| 143 | idx += ((start - 0xC0000) >> 12); |
| 144 | return mtrr_state.fixed_ranges[idx]; |
| 145 | } |
| 146 | } |
| 147 | |
| 148 | /* |
| 149 | * Look in variable ranges |
| 150 | * Look of multiple ranges matching this address and pick type |
| 151 | * as per MTRR precedence |
| 152 | */ |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 153 | if (!(mtrr_state.enabled & 2)) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 154 | return mtrr_state.def_type; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 155 | |
| 156 | prev_match = 0xFF; |
| 157 | for (i = 0; i < num_var_ranges; ++i) { |
| 158 | unsigned short start_state, end_state; |
| 159 | |
| 160 | if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11))) |
| 161 | continue; |
| 162 | |
| 163 | base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) + |
| 164 | (mtrr_state.var_ranges[i].base_lo & PAGE_MASK); |
| 165 | mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) + |
| 166 | (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK); |
| 167 | |
| 168 | start_state = ((start & mask) == (base & mask)); |
| 169 | end_state = ((end & mask) == (base & mask)); |
Venkatesh Pallipadi | 351e5a7 | 2010-09-10 15:55:50 -0700 | [diff] [blame] | 170 | |
| 171 | if (start_state != end_state) { |
| 172 | /* |
| 173 | * We have start:end spanning across an MTRR. |
| 174 | * We split the region into |
| 175 | * either |
| 176 | * (start:mtrr_end) (mtrr_end:end) |
| 177 | * or |
| 178 | * (start:mtrr_start) (mtrr_start:end) |
| 179 | * depending on kind of overlap. |
| 180 | * Return the type for first region and a pointer to |
| 181 | * the start of second region so that caller will |
| 182 | * lookup again on the second region. |
| 183 | * Note: This way we handle multiple overlaps as well. |
| 184 | */ |
| 185 | if (start_state) |
| 186 | *partial_end = base + get_mtrr_size(mask); |
| 187 | else |
| 188 | *partial_end = base; |
| 189 | |
| 190 | if (unlikely(*partial_end <= start)) { |
| 191 | WARN_ON(1); |
| 192 | *partial_end = start + PAGE_SIZE; |
| 193 | } |
| 194 | |
| 195 | end = *partial_end - 1; /* end is inclusive */ |
| 196 | *repeat = 1; |
| 197 | } |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 198 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 199 | if ((start & mask) != (base & mask)) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 200 | continue; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 201 | |
| 202 | curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; |
| 203 | if (prev_match == 0xFF) { |
| 204 | prev_match = curr_match; |
| 205 | continue; |
| 206 | } |
| 207 | |
Venkatesh Pallipadi | a7f07cf | 2010-09-10 15:55:49 -0700 | [diff] [blame] | 208 | if (check_type_overlap(&prev_match, &curr_match)) |
| 209 | return curr_match; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 210 | } |
| 211 | |
Yinghai Lu | 95ffa24 | 2008-04-29 03:52:33 -0700 | [diff] [blame] | 212 | if (mtrr_tom2) { |
| 213 | if (start >= (1ULL<<32) && (end < mtrr_tom2)) |
Yinghai Lu | 35605a1 | 2008-03-24 16:02:01 -0700 | [diff] [blame] | 214 | return MTRR_TYPE_WRBACK; |
| 215 | } |
| 216 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 217 | if (prev_match != 0xFF) |
| 218 | return prev_match; |
| 219 | |
| 220 | return mtrr_state.def_type; |
| 221 | } |
| 222 | |
Venkatesh Pallipadi | 351e5a7 | 2010-09-10 15:55:50 -0700 | [diff] [blame] | 223 | /* |
| 224 | * Returns the effective MTRR type for the region |
| 225 | * Error return: |
| 226 | * 0xFF - when MTRR is not enabled |
| 227 | */ |
| 228 | u8 mtrr_type_lookup(u64 start, u64 end) |
| 229 | { |
| 230 | u8 type, prev_type; |
| 231 | int repeat; |
| 232 | u64 partial_end; |
| 233 | |
| 234 | type = __mtrr_type_lookup(start, end, &partial_end, &repeat); |
| 235 | |
| 236 | /* |
| 237 | * Common path is with repeat = 0. |
| 238 | * However, we can have cases where [start:end] spans across some |
| 239 | * MTRR range. Do repeated lookups for that case here. |
| 240 | */ |
| 241 | while (repeat) { |
| 242 | prev_type = type; |
| 243 | start = partial_end; |
| 244 | type = __mtrr_type_lookup(start, end, &partial_end, &repeat); |
| 245 | |
| 246 | if (check_type_overlap(&prev_type, &type)) |
| 247 | return type; |
| 248 | } |
| 249 | |
| 250 | return type; |
| 251 | } |
| 252 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 253 | /* Get the MSR pair relating to a var range */ |
Yinghai Lu | bf8c481 | 2007-06-20 12:23:39 +0200 | [diff] [blame] | 254 | static void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) |
| 256 | { |
| 257 | rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); |
| 258 | rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); |
| 259 | } |
| 260 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 261 | /* Fill the MSR pair relating to a var range */ |
Yinghai Lu | 95ffa24 | 2008-04-29 03:52:33 -0700 | [diff] [blame] | 262 | void fill_mtrr_var_range(unsigned int index, |
| 263 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) |
| 264 | { |
| 265 | struct mtrr_var_range *vr; |
| 266 | |
| 267 | vr = mtrr_state.var_ranges; |
| 268 | |
| 269 | vr[index].base_lo = base_lo; |
| 270 | vr[index].base_hi = base_hi; |
| 271 | vr[index].mask_lo = mask_lo; |
| 272 | vr[index].mask_hi = mask_hi; |
| 273 | } |
| 274 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 275 | static void get_fixed_ranges(mtrr_type *frs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | { |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 277 | unsigned int *p = (unsigned int *)frs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | int i; |
| 279 | |
Andreas Herrmann | 3ff42da | 2009-03-12 17:39:37 +0100 | [diff] [blame] | 280 | k8_check_syscfg_dram_mod_en(); |
| 281 | |
Jaswinder Singh Rajput | a036c7a | 2009-05-14 12:10:43 +0530 | [diff] [blame] | 282 | rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | |
| 284 | for (i = 0; i < 2; i++) |
Jaswinder Singh Rajput | 7d9d55e | 2009-05-14 12:15:32 +0530 | [diff] [blame] | 285 | rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | for (i = 0; i < 8; i++) |
Jaswinder Singh Rajput | ba5673f | 2009-05-14 12:29:25 +0530 | [diff] [blame] | 287 | rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | } |
| 289 | |
Bernhard Kaindl | 2b3b483 | 2007-05-02 19:27:17 +0200 | [diff] [blame] | 290 | void mtrr_save_fixed_ranges(void *info) |
| 291 | { |
Andrew Morton | 84288ad | 2007-07-01 12:06:48 -0700 | [diff] [blame] | 292 | if (cpu_has_mtrr) |
| 293 | get_fixed_ranges(mtrr_state.fixed_ranges); |
Bernhard Kaindl | 2b3b483 | 2007-05-02 19:27:17 +0200 | [diff] [blame] | 294 | } |
| 295 | |
Yinghai Lu | d4c90e3 | 2009-03-13 14:08:49 -0700 | [diff] [blame] | 296 | static unsigned __initdata last_fixed_start; |
| 297 | static unsigned __initdata last_fixed_end; |
| 298 | static mtrr_type __initdata last_fixed_type; |
| 299 | |
| 300 | static void __init print_fixed_last(void) |
| 301 | { |
| 302 | if (!last_fixed_end) |
| 303 | return; |
| 304 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 305 | pr_debug(" %05X-%05X %s\n", last_fixed_start, |
| 306 | last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); |
Yinghai Lu | d4c90e3 | 2009-03-13 14:08:49 -0700 | [diff] [blame] | 307 | |
| 308 | last_fixed_end = 0; |
| 309 | } |
| 310 | |
| 311 | static void __init update_fixed_last(unsigned base, unsigned end, |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 312 | mtrr_type type) |
Yinghai Lu | d4c90e3 | 2009-03-13 14:08:49 -0700 | [diff] [blame] | 313 | { |
| 314 | last_fixed_start = base; |
| 315 | last_fixed_end = end; |
| 316 | last_fixed_type = type; |
| 317 | } |
| 318 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 319 | static void __init |
| 320 | print_fixed(unsigned base, unsigned step, const mtrr_type *types) |
Jan Beulich | 365bff8 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 321 | { |
| 322 | unsigned i; |
| 323 | |
Yinghai Lu | d4c90e3 | 2009-03-13 14:08:49 -0700 | [diff] [blame] | 324 | for (i = 0; i < 8; ++i, ++types, base += step) { |
| 325 | if (last_fixed_end == 0) { |
| 326 | update_fixed_last(base, base + step, *types); |
| 327 | continue; |
| 328 | } |
| 329 | if (last_fixed_end == base && last_fixed_type == *types) { |
| 330 | last_fixed_end = base + step; |
| 331 | continue; |
| 332 | } |
| 333 | /* new segments: gap or different type */ |
| 334 | print_fixed_last(); |
| 335 | update_fixed_last(base, base + step, *types); |
| 336 | } |
Jan Beulich | 365bff8 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 337 | } |
| 338 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 339 | static void prepare_set(void); |
| 340 | static void post_set(void); |
| 341 | |
Yinghai Lu | 8ad9790 | 2009-03-12 18:43:54 -0700 | [diff] [blame] | 342 | static void __init print_mtrr_state(void) |
| 343 | { |
| 344 | unsigned int i; |
| 345 | int high_width; |
| 346 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 347 | pr_debug("MTRR default type: %s\n", |
| 348 | mtrr_attrib_to_str(mtrr_state.def_type)); |
Yinghai Lu | 8ad9790 | 2009-03-12 18:43:54 -0700 | [diff] [blame] | 349 | if (mtrr_state.have_fixed) { |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 350 | pr_debug("MTRR fixed ranges %sabled:\n", |
| 351 | mtrr_state.enabled & 1 ? "en" : "dis"); |
Yinghai Lu | 8ad9790 | 2009-03-12 18:43:54 -0700 | [diff] [blame] | 352 | print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); |
| 353 | for (i = 0; i < 2; ++i) |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 354 | print_fixed(0x80000 + i * 0x20000, 0x04000, |
| 355 | mtrr_state.fixed_ranges + (i + 1) * 8); |
Yinghai Lu | 8ad9790 | 2009-03-12 18:43:54 -0700 | [diff] [blame] | 356 | for (i = 0; i < 8; ++i) |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 357 | print_fixed(0xC0000 + i * 0x08000, 0x01000, |
| 358 | mtrr_state.fixed_ranges + (i + 3) * 8); |
Yinghai Lu | d4c90e3 | 2009-03-13 14:08:49 -0700 | [diff] [blame] | 359 | |
| 360 | /* tail */ |
| 361 | print_fixed_last(); |
Yinghai Lu | 8ad9790 | 2009-03-12 18:43:54 -0700 | [diff] [blame] | 362 | } |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 363 | pr_debug("MTRR variable ranges %sabled:\n", |
| 364 | mtrr_state.enabled & 2 ? "en" : "dis"); |
Yinghai Lu | 917a015 | 2009-05-06 21:36:16 -0700 | [diff] [blame] | 365 | if (size_or_mask & 0xffffffffUL) |
| 366 | high_width = ffs(size_or_mask & 0xffffffffUL) - 1; |
| 367 | else |
| 368 | high_width = ffs(size_or_mask>>32) + 32 - 1; |
| 369 | high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4; |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 370 | |
Yinghai Lu | 8ad9790 | 2009-03-12 18:43:54 -0700 | [diff] [blame] | 371 | for (i = 0; i < num_var_ranges; ++i) { |
| 372 | if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 373 | pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n", |
| 374 | i, |
| 375 | high_width, |
| 376 | mtrr_state.var_ranges[i].base_hi, |
| 377 | mtrr_state.var_ranges[i].base_lo >> 12, |
| 378 | high_width, |
| 379 | mtrr_state.var_ranges[i].mask_hi, |
| 380 | mtrr_state.var_ranges[i].mask_lo >> 12, |
| 381 | mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); |
Yinghai Lu | 8ad9790 | 2009-03-12 18:43:54 -0700 | [diff] [blame] | 382 | else |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 383 | pr_debug(" %u disabled\n", i); |
Yinghai Lu | 8ad9790 | 2009-03-12 18:43:54 -0700 | [diff] [blame] | 384 | } |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 385 | if (mtrr_tom2) |
| 386 | pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20); |
Yinghai Lu | 8ad9790 | 2009-03-12 18:43:54 -0700 | [diff] [blame] | 387 | } |
| 388 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 389 | /* Grab all of the MTRR state for this CPU into *state */ |
Sam Ravnborg | 9ef231a | 2007-07-21 17:10:39 +0200 | [diff] [blame] | 390 | void __init get_mtrr_state(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | struct mtrr_var_range *vrs; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 393 | unsigned long flags; |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 394 | unsigned lo, dummy; |
| 395 | unsigned int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | vrs = mtrr_state.var_ranges; |
| 398 | |
Jaswinder Singh Rajput | d9bcc01 | 2009-05-14 12:06:12 +0530 | [diff] [blame] | 399 | rdmsr(MSR_MTRRcap, lo, dummy); |
Jan Beulich | 365bff8 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 400 | mtrr_state.have_fixed = (lo >> 8) & 1; |
| 401 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | for (i = 0; i < num_var_ranges; i++) |
| 403 | get_mtrr_var_range(i, &vrs[i]); |
Jan Beulich | 365bff8 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 404 | if (mtrr_state.have_fixed) |
| 405 | get_fixed_ranges(mtrr_state.fixed_ranges); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | |
Jaswinder Singh Rajput | 5265025 | 2009-05-14 12:35:46 +0530 | [diff] [blame] | 407 | rdmsr(MSR_MTRRdefType, lo, dummy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | mtrr_state.def_type = (lo & 0xff); |
| 409 | mtrr_state.enabled = (lo & 0xc00) >> 10; |
Jan Beulich | 365bff8 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 410 | |
Yinghai Lu | 35605a1 | 2008-03-24 16:02:01 -0700 | [diff] [blame] | 411 | if (amd_special_default_mtrr()) { |
Thomas Gleixner | 0da72a4 | 2008-04-30 20:11:51 +0200 | [diff] [blame] | 412 | unsigned low, high; |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 413 | |
Yinghai Lu | 35605a1 | 2008-03-24 16:02:01 -0700 | [diff] [blame] | 414 | /* TOP_MEM2 */ |
Thomas Gleixner | 0da72a4 | 2008-04-30 20:11:51 +0200 | [diff] [blame] | 415 | rdmsr(MSR_K8_TOP_MEM2, low, high); |
Yinghai Lu | 95ffa24 | 2008-04-29 03:52:33 -0700 | [diff] [blame] | 416 | mtrr_tom2 = high; |
| 417 | mtrr_tom2 <<= 32; |
| 418 | mtrr_tom2 |= low; |
Yinghai Lu | 8004dd9 | 2008-05-12 17:40:39 -0700 | [diff] [blame] | 419 | mtrr_tom2 &= 0xffffff800000ULL; |
Yinghai Lu | 35605a1 | 2008-03-24 16:02:01 -0700 | [diff] [blame] | 420 | } |
Jan Beulich | 365bff8 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 421 | |
Yinghai Lu | 8ad9790 | 2009-03-12 18:43:54 -0700 | [diff] [blame] | 422 | print_mtrr_state(); |
| 423 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 424 | mtrr_state_set = 1; |
| 425 | |
| 426 | /* PAT setup for BP. We need to go through sync steps here */ |
| 427 | local_irq_save(flags); |
| 428 | prepare_set(); |
| 429 | |
| 430 | pat_init(); |
| 431 | |
| 432 | post_set(); |
| 433 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | } |
| 435 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 436 | /* Some BIOS's are messed up and don't set all MTRRs the same! */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | void __init mtrr_state_warn(void) |
| 438 | { |
| 439 | unsigned long mask = smp_changes_mask; |
| 440 | |
| 441 | if (!mask) |
| 442 | return; |
| 443 | if (mask & MTRR_CHANGE_MASK_FIXED) |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 444 | pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | if (mask & MTRR_CHANGE_MASK_VARIABLE) |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 446 | pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | if (mask & MTRR_CHANGE_MASK_DEFTYPE) |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 448 | pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n"); |
| 449 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); |
| 451 | printk(KERN_INFO "mtrr: corrected configuration.\n"); |
| 452 | } |
| 453 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 454 | /* |
| 455 | * Doesn't attempt to pass an error out to MTRR users |
| 456 | * because it's quite complicated in some cases and probably not |
| 457 | * worth it because the best error handling is to ignore it. |
| 458 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) |
| 460 | { |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 461 | if (wrmsr_safe(msr, a, b) < 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | printk(KERN_ERR |
| 463 | "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", |
| 464 | smp_processor_id(), msr, a, b); |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 465 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | } |
| 467 | |
Bernhard Kaindl | de938c5 | 2007-05-02 19:27:17 +0200 | [diff] [blame] | 468 | /** |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 469 | * set_fixed_range - checks & updates a fixed-range MTRR if it |
| 470 | * differs from the value it should have |
Randy Dunlap | 1d3381e | 2008-03-13 16:59:12 -0700 | [diff] [blame] | 471 | * @msr: MSR address of the MTTR which should be checked and updated |
| 472 | * @changed: pointer which indicates whether the MTRR needed to be changed |
| 473 | * @msrwords: pointer to the MSR values which the MSR should have |
Bernhard Kaindl | de938c5 | 2007-05-02 19:27:17 +0200 | [diff] [blame] | 474 | */ |
Paul Jimenez | 2d2ee8d | 2008-01-30 13:30:31 +0100 | [diff] [blame] | 475 | static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) |
Bernhard Kaindl | de938c5 | 2007-05-02 19:27:17 +0200 | [diff] [blame] | 476 | { |
| 477 | unsigned lo, hi; |
| 478 | |
| 479 | rdmsr(msr, lo, hi); |
| 480 | |
| 481 | if (lo != msrwords[0] || hi != msrwords[1]) { |
Bernhard Kaindl | de938c5 | 2007-05-02 19:27:17 +0200 | [diff] [blame] | 482 | mtrr_wrmsr(msr, msrwords[0], msrwords[1]); |
Paul Jimenez | 2d2ee8d | 2008-01-30 13:30:31 +0100 | [diff] [blame] | 483 | *changed = true; |
Bernhard Kaindl | de938c5 | 2007-05-02 19:27:17 +0200 | [diff] [blame] | 484 | } |
| 485 | } |
| 486 | |
Randy Dunlap | 1d3381e | 2008-03-13 16:59:12 -0700 | [diff] [blame] | 487 | /** |
| 488 | * generic_get_free_region - Get a free MTRR. |
| 489 | * @base: The starting (base) address of the region. |
| 490 | * @size: The size (in bytes) of the region. |
| 491 | * @replace_reg: mtrr index to be replaced; set to invalid value if none. |
| 492 | * |
| 493 | * Returns: The index of the region on success, else negative on error. |
| 494 | */ |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 495 | int |
| 496 | generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | { |
Jan Beulich | 365bff8 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 498 | unsigned long lbase, lsize; |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 499 | mtrr_type ltype; |
| 500 | int i, max; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | |
| 502 | max = num_var_ranges; |
Jan Beulich | 365bff8 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 503 | if (replace_reg >= 0 && replace_reg < max) |
| 504 | return replace_reg; |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 505 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | for (i = 0; i < max; ++i) { |
| 507 | mtrr_if->get(i, &lbase, &lsize, <ype); |
| 508 | if (lsize == 0) |
| 509 | return i; |
| 510 | } |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 511 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | return -ENOSPC; |
| 513 | } |
| 514 | |
Adrian Bunk | 408b664 | 2005-05-01 08:59:29 -0700 | [diff] [blame] | 515 | static void generic_get_mtrr(unsigned int reg, unsigned long *base, |
Jan Beulich | 365bff8 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 516 | unsigned long *size, mtrr_type *type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | { |
| 518 | unsigned int mask_lo, mask_hi, base_lo, base_hi; |
Yinghai Lu | 38cc1c3 | 2008-08-21 20:24:24 -0700 | [diff] [blame] | 519 | unsigned int tmp, hi; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | |
Yinghai Lu | 8ad9790 | 2009-03-12 18:43:54 -0700 | [diff] [blame] | 521 | /* |
| 522 | * get_mtrr doesn't need to update mtrr_state, also it could be called |
| 523 | * from any cpu, so try to print it out directly. |
| 524 | */ |
Andi Kleen | fa10ba6 | 2010-07-20 15:19:49 -0700 | [diff] [blame] | 525 | get_cpu(); |
Yinghai Lu | 63516ef | 2009-03-13 12:46:07 -0700 | [diff] [blame] | 526 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); |
Yinghai Lu | 8ad9790 | 2009-03-12 18:43:54 -0700 | [diff] [blame] | 528 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | if ((mask_lo & 0x800) == 0) { |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 530 | /* Invalid (i.e. free) range */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | *base = 0; |
| 532 | *size = 0; |
| 533 | *type = 0; |
Yinghai Lu | 63516ef | 2009-03-13 12:46:07 -0700 | [diff] [blame] | 534 | goto out_put_cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | } |
| 536 | |
| 537 | rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); |
| 538 | |
Yinghai Lu | 63516ef | 2009-03-13 12:46:07 -0700 | [diff] [blame] | 539 | /* Work out the shifted address mask: */ |
Yinghai Lu | 38cc1c3 | 2008-08-21 20:24:24 -0700 | [diff] [blame] | 540 | tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; |
| 541 | mask_lo = size_or_mask | tmp; |
Yinghai Lu | 63516ef | 2009-03-13 12:46:07 -0700 | [diff] [blame] | 542 | |
| 543 | /* Expand tmp with high bits to all 1s: */ |
Yinghai Lu | 38cc1c3 | 2008-08-21 20:24:24 -0700 | [diff] [blame] | 544 | hi = fls(tmp); |
| 545 | if (hi > 0) { |
| 546 | tmp |= ~((1<<(hi - 1)) - 1); |
| 547 | |
| 548 | if (tmp != mask_lo) { |
Alan Cox | 942fa3b | 2010-02-08 10:03:17 +0000 | [diff] [blame] | 549 | printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n"); |
Prarit Bhargava | 644ddf5 | 2011-10-18 13:24:10 -0400 | [diff] [blame] | 550 | add_taint(TAINT_FIRMWARE_WORKAROUND); |
Yinghai Lu | 38cc1c3 | 2008-08-21 20:24:24 -0700 | [diff] [blame] | 551 | mask_lo = tmp; |
| 552 | } |
| 553 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | |
Yinghai Lu | 63516ef | 2009-03-13 12:46:07 -0700 | [diff] [blame] | 555 | /* |
| 556 | * This works correctly if size is a power of two, i.e. a |
| 557 | * contiguous range: |
| 558 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | *size = -mask_lo; |
| 560 | *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; |
| 561 | *type = base_lo & 0xff; |
Yinghai Lu | 8ad9790 | 2009-03-12 18:43:54 -0700 | [diff] [blame] | 562 | |
Yinghai Lu | 63516ef | 2009-03-13 12:46:07 -0700 | [diff] [blame] | 563 | out_put_cpu: |
| 564 | put_cpu(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | } |
| 566 | |
Bernhard Kaindl | de938c5 | 2007-05-02 19:27:17 +0200 | [diff] [blame] | 567 | /** |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 568 | * set_fixed_ranges - checks & updates the fixed-range MTRRs if they |
| 569 | * differ from the saved set |
Randy Dunlap | 1d3381e | 2008-03-13 16:59:12 -0700 | [diff] [blame] | 570 | * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() |
Bernhard Kaindl | de938c5 | 2007-05-02 19:27:17 +0200 | [diff] [blame] | 571 | */ |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 572 | static int set_fixed_ranges(mtrr_type *frs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | { |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 574 | unsigned long long *saved = (unsigned long long *)frs; |
Paul Jimenez | 2d2ee8d | 2008-01-30 13:30:31 +0100 | [diff] [blame] | 575 | bool changed = false; |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 576 | int block = -1, range; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | |
Andreas Herrmann | 3ff42da | 2009-03-12 17:39:37 +0100 | [diff] [blame] | 578 | k8_check_syscfg_dram_mod_en(); |
| 579 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 580 | while (fixed_range_blocks[++block].ranges) { |
| 581 | for (range = 0; range < fixed_range_blocks[block].ranges; range++) |
| 582 | set_fixed_range(fixed_range_blocks[block].base_msr + range, |
| 583 | &changed, (unsigned int *)saved++); |
| 584 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | return changed; |
| 587 | } |
| 588 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 589 | /* |
| 590 | * Set the MSR pair relating to a var range. |
| 591 | * Returns true if changes are made. |
| 592 | */ |
Paul Jimenez | 2d2ee8d | 2008-01-30 13:30:31 +0100 | [diff] [blame] | 593 | static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 594 | { |
| 595 | unsigned int lo, hi; |
Paul Jimenez | 2d2ee8d | 2008-01-30 13:30:31 +0100 | [diff] [blame] | 596 | bool changed = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 | |
| 598 | rdmsr(MTRRphysBase_MSR(index), lo, hi); |
| 599 | if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) |
Siddha, Suresh B | cf94b62 | 2005-04-16 15:25:11 -0700 | [diff] [blame] | 600 | || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != |
| 601 | (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 602 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); |
Paul Jimenez | 2d2ee8d | 2008-01-30 13:30:31 +0100 | [diff] [blame] | 604 | changed = true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | } |
| 606 | |
| 607 | rdmsr(MTRRphysMask_MSR(index), lo, hi); |
| 608 | |
| 609 | if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL) |
Siddha, Suresh B | cf94b62 | 2005-04-16 15:25:11 -0700 | [diff] [blame] | 610 | || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != |
| 611 | (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); |
Paul Jimenez | 2d2ee8d | 2008-01-30 13:30:31 +0100 | [diff] [blame] | 613 | changed = true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | } |
| 615 | return changed; |
| 616 | } |
| 617 | |
Jan Beulich | 365bff8 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 618 | static u32 deftype_lo, deftype_hi; |
| 619 | |
Randy Dunlap | 1d3381e | 2008-03-13 16:59:12 -0700 | [diff] [blame] | 620 | /** |
| 621 | * set_mtrr_state - Set the MTRR state for this CPU. |
| 622 | * |
| 623 | * NOTE: The CPU must already be in a safe state for MTRR changes. |
| 624 | * RETURNS: 0 if no changes made, else a mask indicating what was changed. |
| 625 | */ |
Jan Beulich | 365bff8 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 626 | static unsigned long set_mtrr_state(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | unsigned long change_mask = 0; |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 629 | unsigned int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 631 | for (i = 0; i < num_var_ranges; i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 632 | if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) |
| 633 | change_mask |= MTRR_CHANGE_MASK_VARIABLE; |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 634 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | |
Jan Beulich | 365bff8 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 636 | if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | change_mask |= MTRR_CHANGE_MASK_FIXED; |
| 638 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 639 | /* |
| 640 | * Set_mtrr_restore restores the old value of MTRRdefType, |
| 641 | * so to set it we fiddle with the saved value: |
| 642 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | if ((deftype_lo & 0xff) != mtrr_state.def_type |
| 644 | || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 645 | |
| 646 | deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | |
| 647 | (mtrr_state.enabled << 10); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 | change_mask |= MTRR_CHANGE_MASK_DEFTYPE; |
| 649 | } |
| 650 | |
| 651 | return change_mask; |
| 652 | } |
| 653 | |
| 654 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 655 | static unsigned long cr4; |
Thomas Gleixner | 40d6753 | 2009-07-25 18:33:11 +0200 | [diff] [blame] | 656 | static DEFINE_RAW_SPINLOCK(set_atomicity_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | |
| 658 | /* |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 659 | * Since we are disabling the cache don't allow any interrupts, |
| 660 | * they would run extremely slow and would only increase the pain. |
| 661 | * |
| 662 | * The caller must ensure that local interrupts are disabled and |
| 663 | * are reenabled after post_set() has been called. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | */ |
Josh Triplett | 182daa5 | 2006-09-25 23:32:36 -0700 | [diff] [blame] | 665 | static void prepare_set(void) __acquires(set_atomicity_lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | { |
| 667 | unsigned long cr0; |
| 668 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 669 | /* |
| 670 | * Note that this is not ideal |
| 671 | * since the cache is only flushed/disabled for this CPU while the |
| 672 | * MTRRs are changed, but changing this requires more invasive |
| 673 | * changes to the way the kernel boots |
| 674 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | |
Thomas Gleixner | 40d6753 | 2009-07-25 18:33:11 +0200 | [diff] [blame] | 676 | raw_spin_lock(&set_atomicity_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 678 | /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ |
Dave Jones | 7ebad70 | 2008-01-30 13:30:39 +0100 | [diff] [blame] | 679 | cr0 = read_cr0() | X86_CR0_CD; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | write_cr0(cr0); |
| 681 | wbinvd(); |
| 682 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 683 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
| 684 | if (cpu_has_pge) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 | cr4 = read_cr4(); |
| 686 | write_cr4(cr4 & ~X86_CR4_PGE); |
| 687 | } |
| 688 | |
| 689 | /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ |
| 690 | __flush_tlb(); |
| 691 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 692 | /* Save MTRR state */ |
Jaswinder Singh Rajput | 5265025 | 2009-05-14 12:35:46 +0530 | [diff] [blame] | 693 | rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 695 | /* Disable MTRRs, and set the default type to uncached */ |
Jaswinder Singh Rajput | 5265025 | 2009-05-14 12:35:46 +0530 | [diff] [blame] | 696 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); |
Ajaykumar Hotchandani | 8dbf4a3 | 2011-11-11 18:31:57 +0530 | [diff] [blame] | 697 | wbinvd(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | } |
| 699 | |
Josh Triplett | 182daa5 | 2006-09-25 23:32:36 -0700 | [diff] [blame] | 700 | static void post_set(void) __releases(set_atomicity_lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | { |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 702 | /* Flush TLBs (no need to flush caches - they are disabled) */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | __flush_tlb(); |
| 704 | |
| 705 | /* Intel (P6) standard MTRRs */ |
Jaswinder Singh Rajput | 5265025 | 2009-05-14 12:35:46 +0530 | [diff] [blame] | 706 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 707 | |
| 708 | /* Enable caches */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 709 | write_cr0(read_cr0() & 0xbfffffff); |
| 710 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 711 | /* Restore value of CR4 */ |
| 712 | if (cpu_has_pge) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | write_cr4(cr4); |
Thomas Gleixner | 40d6753 | 2009-07-25 18:33:11 +0200 | [diff] [blame] | 714 | raw_spin_unlock(&set_atomicity_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 715 | } |
| 716 | |
| 717 | static void generic_set_all(void) |
| 718 | { |
| 719 | unsigned long mask, count; |
| 720 | unsigned long flags; |
| 721 | |
| 722 | local_irq_save(flags); |
| 723 | prepare_set(); |
| 724 | |
| 725 | /* Actually set the state */ |
Jan Beulich | 365bff8 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 726 | mask = set_mtrr_state(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 728 | /* also set PAT */ |
| 729 | pat_init(); |
| 730 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 731 | post_set(); |
| 732 | local_irq_restore(flags); |
| 733 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 734 | /* Use the atomic bitops to update the global mask */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | for (count = 0; count < sizeof mask * 8; ++count) { |
| 736 | if (mask & 0x01) |
| 737 | set_bit(count, &smp_changes_mask); |
| 738 | mask >>= 1; |
| 739 | } |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 740 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 741 | } |
| 742 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 743 | /** |
| 744 | * generic_set_mtrr - set variable MTRR register on the local CPU. |
| 745 | * |
| 746 | * @reg: The register to set. |
| 747 | * @base: The base address of the region. |
| 748 | * @size: The size of the region. If this is 0 the region is disabled. |
| 749 | * @type: The type of the region. |
| 750 | * |
| 751 | * Returns nothing. |
| 752 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 753 | static void generic_set_mtrr(unsigned int reg, unsigned long base, |
| 754 | unsigned long size, mtrr_type type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | { |
| 756 | unsigned long flags; |
Shaohua Li | 3b520b2 | 2005-07-07 17:56:38 -0700 | [diff] [blame] | 757 | struct mtrr_var_range *vr; |
| 758 | |
| 759 | vr = &mtrr_state.var_ranges[reg]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 760 | |
| 761 | local_irq_save(flags); |
| 762 | prepare_set(); |
| 763 | |
| 764 | if (size == 0) { |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 765 | /* |
| 766 | * The invalid bit is kept in the mask, so we simply |
| 767 | * clear the relevant mask register to disable a range. |
| 768 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); |
Shaohua Li | 3b520b2 | 2005-07-07 17:56:38 -0700 | [diff] [blame] | 770 | memset(vr, 0, sizeof(struct mtrr_var_range)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | } else { |
Shaohua Li | 3b520b2 | 2005-07-07 17:56:38 -0700 | [diff] [blame] | 772 | vr->base_lo = base << PAGE_SHIFT | type; |
| 773 | vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); |
| 774 | vr->mask_lo = -size << PAGE_SHIFT | 0x800; |
| 775 | vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); |
| 776 | |
| 777 | mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi); |
| 778 | mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 779 | } |
| 780 | |
| 781 | post_set(); |
| 782 | local_irq_restore(flags); |
| 783 | } |
| 784 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 785 | int generic_validate_add_page(unsigned long base, unsigned long size, |
| 786 | unsigned int type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 787 | { |
| 788 | unsigned long lbase, last; |
| 789 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 790 | /* |
| 791 | * For Intel PPro stepping <= 7 |
| 792 | * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF |
| 793 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && |
| 795 | boot_cpu_data.x86_model == 1 && |
| 796 | boot_cpu_data.x86_mask <= 7) { |
| 797 | if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 798 | pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 799 | return -EINVAL; |
| 800 | } |
Andreas Mohr | 9b48341 | 2006-12-07 02:14:00 +0100 | [diff] [blame] | 801 | if (!(base + size < 0x70000 || base > 0x7003F) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 802 | (type == MTRR_TYPE_WRCOMB |
| 803 | || type == MTRR_TYPE_WRBACK)) { |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 804 | pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | return -EINVAL; |
| 806 | } |
| 807 | } |
| 808 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 809 | /* |
| 810 | * Check upper bits of base and last are equal and lower bits are 0 |
| 811 | * for base and 1 for last |
| 812 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 813 | last = base + size - 1; |
| 814 | for (lbase = base; !(lbase & 1) && (last & 1); |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 815 | lbase = lbase >> 1, last = last >> 1) |
| 816 | ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 817 | if (lbase != last) { |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 818 | pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 819 | return -EINVAL; |
| 820 | } |
| 821 | return 0; |
| 822 | } |
| 823 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 824 | static int generic_have_wrcomb(void) |
| 825 | { |
| 826 | unsigned long config, dummy; |
Jaswinder Singh Rajput | d9bcc01 | 2009-05-14 12:06:12 +0530 | [diff] [blame] | 827 | rdmsr(MSR_MTRRcap, config, dummy); |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 828 | return config & (1 << 10); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 | } |
| 830 | |
| 831 | int positive_have_wrcomb(void) |
| 832 | { |
| 833 | return 1; |
| 834 | } |
| 835 | |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 836 | /* |
| 837 | * Generic structure... |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | */ |
Emese Revfy | 3b9cfc0 | 2010-01-31 20:16:34 +0100 | [diff] [blame] | 839 | const struct mtrr_ops generic_mtrr_ops = { |
Jaswinder Singh Rajput | a1a499a | 2009-07-04 07:53:00 +0530 | [diff] [blame] | 840 | .use_intel_if = 1, |
| 841 | .set_all = generic_set_all, |
| 842 | .get = generic_get_mtrr, |
| 843 | .get_free_region = generic_get_free_region, |
| 844 | .set = generic_set_mtrr, |
| 845 | .validate_add_page = generic_validate_add_page, |
| 846 | .have_wrcomb = generic_have_wrcomb, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | }; |