2 * mtrr.c: setting MTRR to decent values for cache initialization on P6
4 * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
6 * Copyright 2000 Silicon Integrated System Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 * Reference: Intel Architecture Software Developer's Manual, Volume 3: System Programming
27 2005.1 yhlu add NC support to spare mtrrs for 64G memory above installed
28 2005.6 Eric add address bit in x86_setup_mtrrs
29 2005.6 yhlu split x86_setup_var_mtrrs and x86_setup_fixed_mtrrs,
30 for AMD, it will not use x86_setup_fixed_mtrrs
34 #include <console/console.h>
35 #include <device/device.h>
36 #include <cpu/x86/msr.h>
37 #include <cpu/x86/mtrr.h>
38 #include <cpu/x86/cache.h>
39 #include <cpu/x86/lapic.h>
41 #include <arch/acpi.h>
44 extern uint64_t uma_memory_base, uma_memory_size;
47 static unsigned int mtrr_msr[] = {
48 MTRRfix64K_00000_MSR, MTRRfix16K_80000_MSR, MTRRfix16K_A0000_MSR,
49 MTRRfix4K_C0000_MSR, MTRRfix4K_C8000_MSR, MTRRfix4K_D0000_MSR, MTRRfix4K_D8000_MSR,
50 MTRRfix4K_E0000_MSR, MTRRfix4K_E8000_MSR, MTRRfix4K_F0000_MSR, MTRRfix4K_F8000_MSR,
53 void enable_fixed_mtrr(void)
57 msr = rdmsr(MTRRdefType_MSR);
59 wrmsr(MTRRdefType_MSR, msr);
62 static void enable_var_mtrr(void)
66 msr = rdmsr(MTRRdefType_MSR);
67 msr.lo |= MTRRdefTypeEn;
68 wrmsr(MTRRdefType_MSR, msr);
71 /* setting variable mtrr, comes from linux kernel source */
72 static void set_var_mtrr(
73 unsigned int reg, unsigned long basek, unsigned long sizek,
74 unsigned char type, unsigned address_bits)
77 unsigned address_mask_high;
82 // it is recommended that we disable and enable cache when we
88 zero.lo = zero.hi = 0;
89 /* The invalid bit is kept in the mask, so we simply clear the
90 relevant mask register to disable a range. */
91 wrmsr (MTRRphysMask_MSR(reg), zero);
98 address_mask_high = ((1u << (address_bits - 32u)) - 1u);
100 base.hi = basek >> 22;
101 base.lo = basek << 10;
103 printk(BIOS_SPEW, "ADDRESS_MASK_HIGH=%#x\n", address_mask_high);
105 if (sizek < 4*1024*1024) {
106 mask.hi = address_mask_high;
107 mask.lo = ~((sizek << 10) -1);
110 mask.hi = address_mask_high & (~((sizek >> 22) -1));
114 // it is recommended that we disable and enable cache when we
118 /* Bit 32-35 of MTRRphysMask should be set to 1 */
120 mask.lo |= MTRRphysMaskValid;
121 wrmsr (MTRRphysBase_MSR(reg), base);
122 wrmsr (MTRRphysMask_MSR(reg), mask);
127 /* fms: find most sigificant bit set, stolen from Linux Kernel Source. */
128 static inline unsigned int fms(unsigned int x)
132 __asm__("bsrl %1,%0\n\t"
135 "1:" : "=r" (r) : "g" (x));
139 /* fls: find least sigificant bit set */
140 static inline unsigned int fls(unsigned int x)
144 __asm__("bsfl %1,%0\n\t"
147 "1:" : "=r" (r) : "g" (x));
151 /* setting up variable and fixed mtrr
153 * From Intel Vol. III Section 9.12.4, the Range Size and Base Alignment has some kind of requirement:
154 * 1. The range size must be 2^N byte for N >= 12 (i.e 4KB minimum).
155 * 2. The base address must be 2^N aligned, where the N here is equal to the N in previous
156 * requirement. So a 8K range must be 8K aligned not 4K aligned.
158 * These requirement is meet by "decompositing" the ramsize into Sum(Cn * 2^n, n = [0..N], Cn = [0, 1]).
159 * For Cm = 1, there is a WB range of 2^m size at base address Sum(Cm * 2^m, m = [N..n]).
160 * A 124MB (128MB - 4MB SMA) example:
161 * ramsize = 124MB == 64MB (at 0MB) + 32MB (at 64MB) + 16MB (at 96MB ) + 8MB (at 112MB) + 4MB (120MB).
162 * But this wastes a lot of MTRR registers so we use another more "aggresive" way with Uncacheable Regions.
164 * In the Uncacheable Region scheme, we try to cover the whole ramsize by one WB region as possible,
165 * If (an only if) this can not be done we will try to decomposite the ramesize, the mathematical formula
166 * whould be ramsize = Sum(Cn * 2^n, n = [0..N], Cn = [-1, 0, 1]). For Cn = -1, a Uncachable Region is used.
167 * The same 124MB example:
168 * ramsize = 124MB == 128MB WB (at 0MB) + 4MB UC (at 124MB)
169 * or a 156MB (128MB + 32MB - 4MB SMA) example:
170 * ramsize = 156MB == 128MB WB (at 0MB) + 32MB WB (at 128MB) + 4MB UC (at 156MB)
172 /* 2 MTRRS are reserved for the operating system */
180 #define MTRRS (BIOS_MTRRS + OS_MTRRS)
183 static void set_fixed_mtrrs(unsigned int first, unsigned int last, unsigned char type)
186 unsigned int fixed_msr = NUM_FIXED_RANGES >> 3;
188 msr.lo = msr.hi = 0; /* Shut up gcc */
189 for(i = first; i < last; i++) {
190 /* When I switch to a new msr read it in */
191 if (fixed_msr != i >> 3) {
192 /* But first write out the old msr */
193 if (fixed_msr < (NUM_FIXED_RANGES >> 3)) {
195 wrmsr(mtrr_msr[fixed_msr], msr);
199 msr = rdmsr(mtrr_msr[fixed_msr]);
202 msr.lo &= ~(0xff << ((i&3)*8));
203 msr.lo |= type << ((i&3)*8);
205 msr.hi &= ~(0xff << ((i&3)*8));
206 msr.hi |= type << ((i&3)*8);
209 /* Write out the final msr */
210 if (fixed_msr < (NUM_FIXED_RANGES >> 3)) {
212 wrmsr(mtrr_msr[fixed_msr], msr);
217 static unsigned fixed_mtrr_index(unsigned long addrk)
220 index = (addrk - 0) >> 6;
222 index = ((addrk - 8*64) >> 4) + 8;
225 index = ((addrk - (8*64 + 16*16)) >> 2) + 24;
227 if (index > NUM_FIXED_RANGES) {
228 index = NUM_FIXED_RANGES;
233 static unsigned int range_to_mtrr(unsigned int reg,
234 unsigned long range_startk, unsigned long range_sizek,
235 unsigned long next_range_startk, unsigned char type,
236 unsigned int address_bits, unsigned int above4gb)
239 /* If there's no MTRR hole, this function will bail out
240 * here when called for the hole.
242 printk(BIOS_SPEW, "Zero-sized MTRR range @%ldKB\n", range_startk);
246 if (reg >= BIOS_MTRRS) {
247 printk(BIOS_ERR, "Warning: Out of MTRRs for base: %4ldMB, range: %ldMB, type %s\n",
248 range_startk >>10, range_sizek >> 10,
249 (type==MTRR_TYPE_UNCACHEABLE)?"UC":
250 ((type==MTRR_TYPE_WRBACK)?"WB":"Other") );
255 unsigned long max_align, align;
257 /* Compute the maximum size I can make a range */
258 max_align = fls(range_startk);
259 align = fms(range_sizek);
260 if (align > max_align) {
264 printk(BIOS_DEBUG, "Setting variable MTRR %d, base: %4ldMB, range: %4ldMB, type %s\n",
265 reg, range_startk >>10, sizek >> 10,
266 (type==MTRR_TYPE_UNCACHEABLE)?"UC":
267 ((type==MTRR_TYPE_WRBACK)?"WB":"Other")
270 /* if range is above 4GB, MTRR is needed
271 * only if above4gb flag is set
273 if (range_startk < 0x100000000ull / 1024 || above4gb)
274 set_var_mtrr(reg++, range_startk, sizek, type, address_bits);
275 range_startk += sizek;
276 range_sizek -= sizek;
277 if (reg >= BIOS_MTRRS) {
278 printk(BIOS_ERR, "Running out of variable MTRRs!\n");
285 static unsigned long resk(uint64_t value)
287 unsigned long resultk;
288 if (value < (1ULL << 42)) {
289 resultk = value >> 10;
292 resultk = 0xffffffff;
297 static void set_fixed_mtrr_resource(void *gp, struct device *dev, struct resource *res)
299 unsigned int start_mtrr;
300 unsigned int last_mtrr;
301 start_mtrr = fixed_mtrr_index(resk(res->base));
302 last_mtrr = fixed_mtrr_index(resk((res->base + res->size)));
303 if (start_mtrr >= NUM_FIXED_RANGES) {
306 printk(BIOS_DEBUG, "Setting fixed MTRRs(%d-%d) Type: WB\n",
307 start_mtrr, last_mtrr);
308 set_fixed_mtrrs(start_mtrr, last_mtrr, MTRR_TYPE_WRBACK);
312 #ifndef CONFIG_VAR_MTRR_HOLE
313 #define CONFIG_VAR_MTRR_HOLE 1
316 struct var_mtrr_state {
317 unsigned long range_startk, range_sizek;
319 unsigned long hole_startk, hole_sizek;
320 unsigned int address_bits;
321 unsigned int above4gb; /* Set if MTRRs are needed for DRAM above 4GB */
324 void set_var_mtrr_resource(void *gp, struct device *dev, struct resource *res)
326 struct var_mtrr_state *state = gp;
327 unsigned long basek, sizek;
328 if (state->reg >= BIOS_MTRRS)
330 basek = resk(res->base);
331 sizek = resk(res->size);
332 /* See if I can merge with the last range
333 * Either I am below 1M and the fixed mtrrs handle it, or
336 if ((basek <= 1024) || (state->range_startk + state->range_sizek == basek)) {
337 unsigned long endk = basek + sizek;
338 state->range_sizek = endk - state->range_startk;
341 /* Write the range mtrrs */
342 if (state->range_sizek != 0) {
343 #if CONFIG_VAR_MTRR_HOLE
344 if (state->hole_sizek == 0) {
345 /* We need to put that on to hole */
346 unsigned long endk = basek + sizek;
347 state->hole_startk = state->range_startk + state->range_sizek;
348 state->hole_sizek = basek - state->hole_startk;
349 state->range_sizek = endk - state->range_startk;
353 state->reg = range_to_mtrr(state->reg, state->range_startk,
354 state->range_sizek, basek, MTRR_TYPE_WRBACK,
355 state->address_bits, state->above4gb);
356 #if CONFIG_VAR_MTRR_HOLE
357 state->reg = range_to_mtrr(state->reg, state->hole_startk,
358 state->hole_sizek, basek, MTRR_TYPE_UNCACHEABLE,
359 state->address_bits, state->above4gb);
361 state->range_startk = 0;
362 state->range_sizek = 0;
363 state->hole_startk = 0;
364 state->hole_sizek = 0;
366 /* Allocate an msr */
367 printk(BIOS_SPEW, " Allocate an msr - basek = %08lx, sizek = %08lx,\n", basek, sizek);
368 state->range_startk = basek;
369 state->range_sizek = sizek;
372 void x86_setup_fixed_mtrrs(void)
374 /* Try this the simple way of incrementally adding together
375 * mtrrs. If this doesn't work out we can get smart again
376 * and clear out the mtrrs.
379 printk(BIOS_DEBUG, "\n");
380 /* Initialized the fixed_mtrrs to uncached */
381 printk(BIOS_DEBUG, "Setting fixed MTRRs(%d-%d) Type: UC\n",
382 0, NUM_FIXED_RANGES);
383 set_fixed_mtrrs(0, NUM_FIXED_RANGES, MTRR_TYPE_UNCACHEABLE);
385 /* Now see which of the fixed mtrrs cover ram.
387 search_global_resources(
388 IORESOURCE_MEM | IORESOURCE_CACHEABLE, IORESOURCE_MEM | IORESOURCE_CACHEABLE,
389 set_fixed_mtrr_resource, NULL);
390 printk(BIOS_DEBUG, "DONE fixed MTRRs\n");
392 /* enable fixed MTRR */
393 printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
398 void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
399 /* this routine needs to know how many address bits a given processor
400 * supports. CPUs get grumpy when you set too many bits in
401 * their mtrr registers :( I would generically call cpuid here
402 * and find out how many physically supported but some cpus are
403 * buggy, and report more bits then they actually support.
404 * If above4gb flag is set, variable MTRR ranges must be used to
405 * set cacheability of DRAM above 4GB. If above4gb flag is clear,
406 * some other mechanism is controlling cacheability of DRAM above 4GB.
409 /* Try this the simple way of incrementally adding together
410 * mtrrs. If this doesn't work out we can get smart again
411 * and clear out the mtrrs.
413 struct var_mtrr_state var_state;
415 /* Cache as many memory areas as possible */
416 /* FIXME is there an algorithm for computing the optimal set of mtrrs?
417 * In some cases it is definitely possible to do better.
419 var_state.range_startk = 0;
420 var_state.range_sizek = 0;
421 var_state.hole_startk = 0;
422 var_state.hole_sizek = 0;
424 var_state.address_bits = address_bits;
425 var_state.above4gb = above4gb;
427 search_global_resources(
428 IORESOURCE_MEM | IORESOURCE_CACHEABLE, IORESOURCE_MEM | IORESOURCE_CACHEABLE,
429 set_var_mtrr_resource, &var_state);
431 #if (CONFIG_GFXUMA == 1) /* UMA or SP. */
432 /* For now we assume the UMA space is at the end of memory below 4GB */
433 if (var_state.hole_startk || var_state.hole_sizek) {
434 printk(BIOS_DEBUG, "Warning: Can't set up MTRR hole for UMA due to pre-existing MTRR hole.\n");
436 #if CONFIG_VAR_MTRR_HOLE
437 // Increase the base range and set up UMA as an UC hole instead
438 var_state.range_sizek += (uma_memory_size >> 10);
440 var_state.hole_startk = (uma_memory_base >> 10);
441 var_state.hole_sizek = (uma_memory_size >> 10);
445 /* Write the last range */
446 var_state.reg = range_to_mtrr(var_state.reg, var_state.range_startk,
447 var_state.range_sizek, 0, MTRR_TYPE_WRBACK,
448 var_state.address_bits, var_state.above4gb);
449 #if CONFIG_VAR_MTRR_HOLE
450 var_state.reg = range_to_mtrr(var_state.reg, var_state.hole_startk,
451 var_state.hole_sizek, 0, MTRR_TYPE_UNCACHEABLE,
452 var_state.address_bits, var_state.above4gb);
454 printk(BIOS_DEBUG, "DONE variable MTRRs\n");
455 printk(BIOS_DEBUG, "Clear out the extra MTRR's\n");
456 /* Clear out the extra MTRR's */
457 while(var_state.reg < MTRRS) {
458 set_var_mtrr(var_state.reg++, 0, 0, 0, var_state.address_bits);
462 /* Enable Caching and speculative Reads for the
463 * complete ROM now that we actually have RAM.
465 if (boot_cpu() && (acpi_slp_type != 3)) {
466 set_var_mtrr(7, (4096-4)*1024, 4*1024,
467 MTRR_TYPE_WRPROT, address_bits);
471 printk(BIOS_SPEW, "call enable_var_mtrr()\n");
473 printk(BIOS_SPEW, "Leave %s\n", __func__);
478 void x86_setup_mtrrs(void)
481 x86_setup_fixed_mtrrs();
482 address_size = cpu_phys_address_size();
483 printk(BIOS_DEBUG, "CPU physical address size: %d bits\n", address_size);
484 x86_setup_var_mtrrs(address_size, 1);
488 int x86_mtrr_check(void)
490 /* Only Pentium Pro and later have MTRR */
492 printk(BIOS_DEBUG, "\nMTRR check\n");
497 printk(BIOS_DEBUG, "Fixed MTRRs : ");
499 printk(BIOS_DEBUG, "Enabled\n");
501 printk(BIOS_DEBUG, "Disabled\n");
503 printk(BIOS_DEBUG, "Variable MTRRs: ");
505 printk(BIOS_DEBUG, "Enabled\n");
507 printk(BIOS_DEBUG, "Disabled\n");
509 printk(BIOS_DEBUG, "\n");
512 return ((int) msr.lo);