blob: 0625d4158e5824f32905b10eda7992edf194e2b0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3#include <linux/init.h>
4#include <linux/slab.h>
5#include <linux/mm.h>
Jan Beulich365bff82006-12-07 02:14:09 +01006#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <asm/io.h>
8#include <asm/mtrr.h>
9#include <asm/msr.h>
10#include <asm/system.h>
11#include <asm/cpufeature.h>
Dave Jones7ebad702008-01-30 13:30:39 +010012#include <asm/processor-flags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/tlbflush.h>
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070014#include <asm/pat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include "mtrr.h"
16
17struct mtrr_state {
Jesse Barnes99fc8d42008-01-30 13:33:18 +010018 struct mtrr_var_range var_ranges[MAX_VAR_RANGES];
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
20 unsigned char enabled;
Jan Beulich365bff82006-12-07 02:14:09 +010021 unsigned char have_fixed;
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 mtrr_type def_type;
23};
24
Bernhard Kaindlde938c52007-05-02 19:27:17 +020025struct fixed_range_block {
26 int base_msr; /* start address of an MTRR block */
27 int ranges; /* number of MTRRs in this block */
28};
29
30static struct fixed_range_block fixed_range_blocks[] = {
31 { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */
32 { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */
33 { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */
34 {}
35};
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037static unsigned long smp_changes_mask;
38static struct mtrr_state mtrr_state = {};
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070039static int mtrr_state_set;
Yinghai Lu35605a12008-03-24 16:02:01 -070040static u64 tom2;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Jan Beulich365bff82006-12-07 02:14:09 +010042#undef MODULE_PARAM_PREFIX
43#define MODULE_PARAM_PREFIX "mtrr."
44
Randy Dunlap25c16b92007-05-02 19:27:18 +020045static int mtrr_show;
Jan Beulich365bff82006-12-07 02:14:09 +010046module_param_named(show, mtrr_show, bool, 0);
47
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070048/*
49 * Returns the effective MTRR type for the region
50 * Error returns:
51 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
52 * - 0xFF - when MTRR is not enabled
53 */
54u8 mtrr_type_lookup(u64 start, u64 end)
55{
56 int i;
57 u64 base, mask;
58 u8 prev_match, curr_match;
59
60 if (!mtrr_state_set)
61 return 0xFF;
62
63 if (!mtrr_state.enabled)
64 return 0xFF;
65
66 /* Make end inclusive end, instead of exclusive */
67 end--;
68
69 /* Look in fixed ranges. Just return the type as per start */
70 if (mtrr_state.have_fixed && (start < 0x100000)) {
71 int idx;
72
73 if (start < 0x80000) {
74 idx = 0;
75 idx += (start >> 16);
76 return mtrr_state.fixed_ranges[idx];
77 } else if (start < 0xC0000) {
78 idx = 1 * 8;
79 idx += ((start - 0x80000) >> 14);
80 return mtrr_state.fixed_ranges[idx];
81 } else if (start < 0x1000000) {
82 idx = 3 * 8;
83 idx += ((start - 0xC0000) >> 12);
84 return mtrr_state.fixed_ranges[idx];
85 }
86 }
87
88 /*
89 * Look in variable ranges
90 * Look of multiple ranges matching this address and pick type
91 * as per MTRR precedence
92 */
Harvey Harrisone686d342008-04-26 21:00:17 -070093 if (!(mtrr_state.enabled & 2)) {
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070094 return mtrr_state.def_type;
95 }
96
97 prev_match = 0xFF;
98 for (i = 0; i < num_var_ranges; ++i) {
99 unsigned short start_state, end_state;
100
101 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
102 continue;
103
104 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
105 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
106 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
107 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
108
109 start_state = ((start & mask) == (base & mask));
110 end_state = ((end & mask) == (base & mask));
111 if (start_state != end_state)
112 return 0xFE;
113
114 if ((start & mask) != (base & mask)) {
115 continue;
116 }
117
118 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
119 if (prev_match == 0xFF) {
120 prev_match = curr_match;
121 continue;
122 }
123
124 if (prev_match == MTRR_TYPE_UNCACHABLE ||
125 curr_match == MTRR_TYPE_UNCACHABLE) {
126 return MTRR_TYPE_UNCACHABLE;
127 }
128
129 if ((prev_match == MTRR_TYPE_WRBACK &&
130 curr_match == MTRR_TYPE_WRTHROUGH) ||
131 (prev_match == MTRR_TYPE_WRTHROUGH &&
132 curr_match == MTRR_TYPE_WRBACK)) {
133 prev_match = MTRR_TYPE_WRTHROUGH;
134 curr_match = MTRR_TYPE_WRTHROUGH;
135 }
136
137 if (prev_match != curr_match) {
138 return MTRR_TYPE_UNCACHABLE;
139 }
140 }
141
Yinghai Lu35605a12008-03-24 16:02:01 -0700142 if (tom2) {
143 if (start >= (1ULL<<32) && (end < tom2))
144 return MTRR_TYPE_WRBACK;
145 }
146
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700147 if (prev_match != 0xFF)
148 return prev_match;
149
150 return mtrr_state.def_type;
151}
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153/* Get the MSR pair relating to a var range */
Yinghai Lubf8c4812007-06-20 12:23:39 +0200154static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
156{
157 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
158 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
159}
160
Bernhard Kaindl2b3b4832007-05-02 19:27:17 +0200161static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162get_fixed_ranges(mtrr_type * frs)
163{
164 unsigned int *p = (unsigned int *) frs;
165 int i;
166
167 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
168
169 for (i = 0; i < 2; i++)
170 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
171 for (i = 0; i < 8; i++)
172 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
173}
174
Bernhard Kaindl2b3b4832007-05-02 19:27:17 +0200175void mtrr_save_fixed_ranges(void *info)
176{
Andrew Morton84288ad2007-07-01 12:06:48 -0700177 if (cpu_has_mtrr)
178 get_fixed_ranges(mtrr_state.fixed_ranges);
Bernhard Kaindl2b3b4832007-05-02 19:27:17 +0200179}
180
Yinghai Lubf8c4812007-06-20 12:23:39 +0200181static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
Jan Beulich365bff82006-12-07 02:14:09 +0100182{
183 unsigned i;
184
185 for (i = 0; i < 8; ++i, ++types, base += step)
Randy Dunlap25c16b92007-05-02 19:27:18 +0200186 printk(KERN_INFO "MTRR %05X-%05X %s\n",
187 base, base + step - 1, mtrr_attrib_to_str(*types));
Jan Beulich365bff82006-12-07 02:14:09 +0100188}
189
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700190static void prepare_set(void);
191static void post_set(void);
192
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193/* Grab all of the MTRR state for this CPU into *state */
Sam Ravnborg9ef231a2007-07-21 17:10:39 +0200194void __init get_mtrr_state(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
196 unsigned int i;
197 struct mtrr_var_range *vrs;
198 unsigned lo, dummy;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700199 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 vrs = mtrr_state.var_ranges;
202
Jan Beulich365bff82006-12-07 02:14:09 +0100203 rdmsr(MTRRcap_MSR, lo, dummy);
204 mtrr_state.have_fixed = (lo >> 8) & 1;
205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 for (i = 0; i < num_var_ranges; i++)
207 get_mtrr_var_range(i, &vrs[i]);
Jan Beulich365bff82006-12-07 02:14:09 +0100208 if (mtrr_state.have_fixed)
209 get_fixed_ranges(mtrr_state.fixed_ranges);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211 rdmsr(MTRRdefType_MSR, lo, dummy);
212 mtrr_state.def_type = (lo & 0xff);
213 mtrr_state.enabled = (lo & 0xc00) >> 10;
Jan Beulich365bff82006-12-07 02:14:09 +0100214
Yinghai Lu35605a12008-03-24 16:02:01 -0700215 if (amd_special_default_mtrr()) {
Thomas Gleixner0da72a42008-04-30 20:11:51 +0200216 unsigned low, high;
Yinghai Lu35605a12008-03-24 16:02:01 -0700217 /* TOP_MEM2 */
Thomas Gleixner0da72a42008-04-30 20:11:51 +0200218 rdmsr(MSR_K8_TOP_MEM2, low, high);
219 tom2 = high;
Yinghai Lu35605a12008-03-24 16:02:01 -0700220 tom2 <<= 32;
Thomas Gleixner0da72a42008-04-30 20:11:51 +0200221 tom2 |= low;
Yinghai Lu35605a12008-03-24 16:02:01 -0700222 tom2 &= 0xffffff8000000ULL;
223 }
Jan Beulich365bff82006-12-07 02:14:09 +0100224 if (mtrr_show) {
225 int high_width;
226
227 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
228 if (mtrr_state.have_fixed) {
229 printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
230 mtrr_state.enabled & 1 ? "en" : "dis");
231 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
232 for (i = 0; i < 2; ++i)
233 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
234 for (i = 0; i < 8; ++i)
235 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
236 }
237 printk(KERN_INFO "MTRR variable ranges %sabled:\n",
238 mtrr_state.enabled & 2 ? "en" : "dis");
239 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
240 for (i = 0; i < num_var_ranges; ++i) {
241 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
242 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
243 i,
244 high_width,
245 mtrr_state.var_ranges[i].base_hi,
246 mtrr_state.var_ranges[i].base_lo >> 12,
247 high_width,
248 mtrr_state.var_ranges[i].mask_hi,
249 mtrr_state.var_ranges[i].mask_lo >> 12,
250 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
251 else
252 printk(KERN_INFO "MTRR %u disabled\n", i);
253 }
Ingo Molnara7c7d0e2008-04-08 16:25:42 +0200254 if (tom2) {
255 printk(KERN_INFO "TOM2: %016llx aka %lldM\n",
256 tom2, tom2>>20);
257 }
Jan Beulich365bff82006-12-07 02:14:09 +0100258 }
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700259 mtrr_state_set = 1;
260
261 /* PAT setup for BP. We need to go through sync steps here */
262 local_irq_save(flags);
263 prepare_set();
264
265 pat_init();
266
267 post_set();
268 local_irq_restore(flags);
269
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270}
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272/* Some BIOS's are fucked and don't set all MTRRs the same! */
273void __init mtrr_state_warn(void)
274{
275 unsigned long mask = smp_changes_mask;
276
277 if (!mask)
278 return;
279 if (mask & MTRR_CHANGE_MASK_FIXED)
280 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
281 if (mask & MTRR_CHANGE_MASK_VARIABLE)
282 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
283 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
284 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
285 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
286 printk(KERN_INFO "mtrr: corrected configuration.\n");
287}
288
289/* Doesn't attempt to pass an error out to MTRR users
290 because it's quite complicated in some cases and probably not
291 worth it because the best error handling is to ignore it. */
292void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
293{
294 if (wrmsr_safe(msr, a, b) < 0)
295 printk(KERN_ERR
296 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
297 smp_processor_id(), msr, a, b);
298}
299
Bernhard Kaindlde938c52007-05-02 19:27:17 +0200300/**
301 * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
302 * see AMD publication no. 24593, chapter 3.2.1 for more information
303 */
304static inline void k8_enable_fixed_iorrs(void)
305{
306 unsigned lo, hi;
307
308 rdmsr(MSR_K8_SYSCFG, lo, hi);
309 mtrr_wrmsr(MSR_K8_SYSCFG, lo
310 | K8_MTRRFIXRANGE_DRAM_ENABLE
311 | K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
312}
313
314/**
Randy Dunlap1d3381e2008-03-13 16:59:12 -0700315 * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
316 * @msr: MSR address of the MTTR which should be checked and updated
317 * @changed: pointer which indicates whether the MTRR needed to be changed
318 * @msrwords: pointer to the MSR values which the MSR should have
319 *
320 * If K8 extentions are wanted, update the K8 SYSCFG MSR also.
321 * See AMD publication no. 24593, chapter 7.8.1, page 233 for more information.
Bernhard Kaindlde938c52007-05-02 19:27:17 +0200322 */
Paul Jimenez2d2ee8d2008-01-30 13:30:31 +0100323static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
Bernhard Kaindlde938c52007-05-02 19:27:17 +0200324{
325 unsigned lo, hi;
326
327 rdmsr(msr, lo, hi);
328
329 if (lo != msrwords[0] || hi != msrwords[1]) {
330 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
331 boot_cpu_data.x86 == 15 &&
332 ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
333 k8_enable_fixed_iorrs();
334 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
Paul Jimenez2d2ee8d2008-01-30 13:30:31 +0100335 *changed = true;
Bernhard Kaindlde938c52007-05-02 19:27:17 +0200336 }
337}
338
Randy Dunlap1d3381e2008-03-13 16:59:12 -0700339/**
340 * generic_get_free_region - Get a free MTRR.
341 * @base: The starting (base) address of the region.
342 * @size: The size (in bytes) of the region.
343 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
344 *
345 * Returns: The index of the region on success, else negative on error.
346 */
Jan Beulich365bff82006-12-07 02:14:09 +0100347int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348{
349 int i, max;
350 mtrr_type ltype;
Jan Beulich365bff82006-12-07 02:14:09 +0100351 unsigned long lbase, lsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
353 max = num_var_ranges;
Jan Beulich365bff82006-12-07 02:14:09 +0100354 if (replace_reg >= 0 && replace_reg < max)
355 return replace_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 for (i = 0; i < max; ++i) {
357 mtrr_if->get(i, &lbase, &lsize, &ltype);
358 if (lsize == 0)
359 return i;
360 }
361 return -ENOSPC;
362}
363
Adrian Bunk408b6642005-05-01 08:59:29 -0700364static void generic_get_mtrr(unsigned int reg, unsigned long *base,
Jan Beulich365bff82006-12-07 02:14:09 +0100365 unsigned long *size, mtrr_type *type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
367 unsigned int mask_lo, mask_hi, base_lo, base_hi;
368
369 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
370 if ((mask_lo & 0x800) == 0) {
371 /* Invalid (i.e. free) range */
372 *base = 0;
373 *size = 0;
374 *type = 0;
375 return;
376 }
377
378 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
379
380 /* Work out the shifted address mask. */
381 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
382 | mask_lo >> PAGE_SHIFT;
383
384 /* This works correctly if size is a power of two, i.e. a
385 contiguous range. */
386 *size = -mask_lo;
387 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
388 *type = base_lo & 0xff;
389}
390
Bernhard Kaindlde938c52007-05-02 19:27:17 +0200391/**
Randy Dunlap1d3381e2008-03-13 16:59:12 -0700392 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
393 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
Bernhard Kaindlde938c52007-05-02 19:27:17 +0200394 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395static int set_fixed_ranges(mtrr_type * frs)
396{
Bernhard Kaindlde938c52007-05-02 19:27:17 +0200397 unsigned long long *saved = (unsigned long long *) frs;
Paul Jimenez2d2ee8d2008-01-30 13:30:31 +0100398 bool changed = false;
Bernhard Kaindlde938c52007-05-02 19:27:17 +0200399 int block=-1, range;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
Bernhard Kaindlde938c52007-05-02 19:27:17 +0200401 while (fixed_range_blocks[++block].ranges)
402 for (range=0; range < fixed_range_blocks[block].ranges; range++)
403 set_fixed_range(fixed_range_blocks[block].base_msr + range,
404 &changed, (unsigned int *) saved++);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 return changed;
407}
408
409/* Set the MSR pair relating to a var range. Returns TRUE if
410 changes are made */
Paul Jimenez2d2ee8d2008-01-30 13:30:31 +0100411static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412{
413 unsigned int lo, hi;
Paul Jimenez2d2ee8d2008-01-30 13:30:31 +0100414 bool changed = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
416 rdmsr(MTRRphysBase_MSR(index), lo, hi);
417 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
Siddha, Suresh Bcf94b622005-04-16 15:25:11 -0700418 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
419 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
Paul Jimenez2d2ee8d2008-01-30 13:30:31 +0100421 changed = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 }
423
424 rdmsr(MTRRphysMask_MSR(index), lo, hi);
425
426 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
Siddha, Suresh Bcf94b622005-04-16 15:25:11 -0700427 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
428 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
Paul Jimenez2d2ee8d2008-01-30 13:30:31 +0100430 changed = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 }
432 return changed;
433}
434
Jan Beulich365bff82006-12-07 02:14:09 +0100435static u32 deftype_lo, deftype_hi;
436
Randy Dunlap1d3381e2008-03-13 16:59:12 -0700437/**
438 * set_mtrr_state - Set the MTRR state for this CPU.
439 *
440 * NOTE: The CPU must already be in a safe state for MTRR changes.
441 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
442 */
Jan Beulich365bff82006-12-07 02:14:09 +0100443static unsigned long set_mtrr_state(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444{
445 unsigned int i;
446 unsigned long change_mask = 0;
447
448 for (i = 0; i < num_var_ranges; i++)
449 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
450 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
451
Jan Beulich365bff82006-12-07 02:14:09 +0100452 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 change_mask |= MTRR_CHANGE_MASK_FIXED;
454
455 /* Set_mtrr_restore restores the old value of MTRRdefType,
456 so to set it we fiddle with the saved value */
457 if ((deftype_lo & 0xff) != mtrr_state.def_type
458 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
Jan Beulich365bff82006-12-07 02:14:09 +0100459 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
461 }
462
463 return change_mask;
464}
465
466
467static unsigned long cr4 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468static DEFINE_SPINLOCK(set_atomicity_lock);
469
470/*
471 * Since we are disabling the cache don't allow any interrupts - they
472 * would run extremely slow and would only increase the pain. The caller must
473 * ensure that local interrupts are disabled and are reenabled after post_set()
474 * has been called.
475 */
476
Josh Triplett182daa52006-09-25 23:32:36 -0700477static void prepare_set(void) __acquires(set_atomicity_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
479 unsigned long cr0;
480
481 /* Note that this is not ideal, since the cache is only flushed/disabled
482 for this CPU while the MTRRs are changed, but changing this requires
483 more invasive changes to the way the kernel boots */
484
485 spin_lock(&set_atomicity_lock);
486
487 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
Dave Jones7ebad702008-01-30 13:30:39 +0100488 cr0 = read_cr0() | X86_CR0_CD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 write_cr0(cr0);
490 wbinvd();
491
492 /* Save value of CR4 and clear Page Global Enable (bit 7) */
493 if ( cpu_has_pge ) {
494 cr4 = read_cr4();
495 write_cr4(cr4 & ~X86_CR4_PGE);
496 }
497
498 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
499 __flush_tlb();
500
501 /* Save MTRR state */
502 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
503
504 /* Disable MTRRs, and set the default type to uncached */
Jan Beulich365bff82006-12-07 02:14:09 +0100505 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506}
507
Josh Triplett182daa52006-09-25 23:32:36 -0700508static void post_set(void) __releases(set_atomicity_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509{
510 /* Flush TLBs (no need to flush caches - they are disabled) */
511 __flush_tlb();
512
513 /* Intel (P6) standard MTRRs */
514 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
515
516 /* Enable caches */
517 write_cr0(read_cr0() & 0xbfffffff);
518
519 /* Restore value of CR4 */
520 if ( cpu_has_pge )
521 write_cr4(cr4);
522 spin_unlock(&set_atomicity_lock);
523}
524
525static void generic_set_all(void)
526{
527 unsigned long mask, count;
528 unsigned long flags;
529
530 local_irq_save(flags);
531 prepare_set();
532
533 /* Actually set the state */
Jan Beulich365bff82006-12-07 02:14:09 +0100534 mask = set_mtrr_state();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700536 /* also set PAT */
537 pat_init();
538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 post_set();
540 local_irq_restore(flags);
541
542 /* Use the atomic bitops to update the global mask */
543 for (count = 0; count < sizeof mask * 8; ++count) {
544 if (mask & 0x01)
545 set_bit(count, &smp_changes_mask);
546 mask >>= 1;
547 }
548
549}
550
551static void generic_set_mtrr(unsigned int reg, unsigned long base,
552 unsigned long size, mtrr_type type)
553/* [SUMMARY] Set variable MTRR register on the local CPU.
554 <reg> The register to set.
555 <base> The base address of the region.
556 <size> The size of the region. If this is 0 the region is disabled.
557 <type> The type of the region.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 [RETURNS] Nothing.
559*/
560{
561 unsigned long flags;
Shaohua Li3b520b22005-07-07 17:56:38 -0700562 struct mtrr_var_range *vr;
563
564 vr = &mtrr_state.var_ranges[reg];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
566 local_irq_save(flags);
567 prepare_set();
568
569 if (size == 0) {
570 /* The invalid bit is kept in the mask, so we simply clear the
571 relevant mask register to disable a range. */
572 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
Shaohua Li3b520b22005-07-07 17:56:38 -0700573 memset(vr, 0, sizeof(struct mtrr_var_range));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 } else {
Shaohua Li3b520b22005-07-07 17:56:38 -0700575 vr->base_lo = base << PAGE_SHIFT | type;
576 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
577 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
578 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
579
580 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
581 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 }
583
584 post_set();
585 local_irq_restore(flags);
586}
587
588int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
589{
590 unsigned long lbase, last;
591
592 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
593 and not touch 0x70000000->0x7003FFFF */
594 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
595 boot_cpu_data.x86_model == 1 &&
596 boot_cpu_data.x86_mask <= 7) {
597 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
598 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
599 return -EINVAL;
600 }
Andreas Mohr9b483412006-12-07 02:14:00 +0100601 if (!(base + size < 0x70000 || base > 0x7003F) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 (type == MTRR_TYPE_WRCOMB
603 || type == MTRR_TYPE_WRBACK)) {
604 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
605 return -EINVAL;
606 }
607 }
608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 /* Check upper bits of base and last are equal and lower bits are 0
610 for base and 1 for last */
611 last = base + size - 1;
612 for (lbase = base; !(lbase & 1) && (last & 1);
613 lbase = lbase >> 1, last = last >> 1) ;
614 if (lbase != last) {
615 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
616 base, size);
617 return -EINVAL;
618 }
619 return 0;
620}
621
622
623static int generic_have_wrcomb(void)
624{
625 unsigned long config, dummy;
626 rdmsr(MTRRcap_MSR, config, dummy);
627 return (config & (1 << 10));
628}
629
630int positive_have_wrcomb(void)
631{
632 return 1;
633}
634
635/* generic structure...
636 */
637struct mtrr_ops generic_mtrr_ops = {
638 .use_intel_if = 1,
639 .set_all = generic_set_all,
640 .get = generic_get_mtrr,
641 .get_free_region = generic_get_free_region,
642 .set = generic_set_mtrr,
643 .validate_add_page = generic_validate_add_page,
644 .have_wrcomb = generic_have_wrcomb,
645};