#include <cpu/x86/msr.h>
#include <cpu/x86/mtrr.h>
#include <cpu/x86/cache.h>
+#include <cpu/x86/lapic.h>
+#include <arch/cpu.h>
+#include <arch/acpi.h>
#if CONFIG_GFXUMA
extern uint64_t uma_memory_base, uma_memory_size;
MTRRfix4K_E0000_MSR, MTRRfix4K_E8000_MSR, MTRRfix4K_F0000_MSR, MTRRfix4K_F8000_MSR,
};
-
void enable_fixed_mtrr(void)
{
msr_t msr;
msr_t msr;
msr = rdmsr(MTRRdefType_MSR);
- msr.lo |= 0x800;
+ msr.lo |= MTRRdefTypeEn;
wrmsr(MTRRdefType_MSR, msr);
}
/* setting variable mtrr, comes from linux kernel source */
static void set_var_mtrr(
- unsigned int reg, unsigned long basek, unsigned long sizek,
+ unsigned int reg, unsigned long basek, unsigned long sizek,
unsigned char type, unsigned address_bits)
{
msr_t base, mask;
// do this.
if (sizek == 0) {
disable_cache();
-
+
msr_t zero;
zero.lo = zero.hi = 0;
/* The invalid bit is kept in the mask, so we simply clear the
mask.lo = 0;
}
- // it is recommended that we disable and enable cache when we
- // do this.
+ // it is recommended that we disable and enable cache when we
+ // do this.
disable_cache();
/* Bit 32-35 of MTRRphysMask should be set to 1 */
base.lo |= type;
- mask.lo |= 0x800;
+ mask.lo |= MTRRphysMaskValid;
wrmsr (MTRRphysBase_MSR(reg), base);
wrmsr (MTRRphysMask_MSR(reg), mask);
return index;
}
-static unsigned int range_to_mtrr(unsigned int reg,
+static unsigned int range_to_mtrr(unsigned int reg,
unsigned long range_startk, unsigned long range_sizek,
- unsigned long next_range_startk, unsigned char type, unsigned address_bits)
+ unsigned long next_range_startk, unsigned char type,
+ unsigned int address_bits, unsigned int above4gb)
{
if (!range_sizek) {
/* If there's no MTRR hole, this function will bail out
unsigned long sizek;
/* Compute the maximum size I can make a range */
max_align = fls(range_startk);
- align = fms(range_sizek);
+ align = fms(range_sizek);
if (align > max_align) {
align = max_align;
}
(type==MTRR_TYPE_UNCACHEABLE)?"UC":
((type==MTRR_TYPE_WRBACK)?"WB":"Other")
);
- set_var_mtrr(reg++, range_startk, sizek, type, address_bits);
+
+ /* if range is above 4GB, MTRR is needed
+ * only if above4gb flag is set
+ */
+ if (range_startk < 0x100000000ull / 1024 || above4gb)
+ set_var_mtrr(reg++, range_startk, sizek, type, address_bits);
range_startk += sizek;
range_sizek -= sizek;
if (reg >= BIOS_MTRRS) {
return reg;
}
-static unsigned long resk(uint64_t value)
+static unsigned long resk(uint64_t value)
{
unsigned long resultk;
if (value < (1ULL << 42)) {
printk(BIOS_DEBUG, "Setting fixed MTRRs(%d-%d) Type: WB\n",
start_mtrr, last_mtrr);
set_fixed_mtrrs(start_mtrr, last_mtrr, MTRR_TYPE_WRBACK);
-
+
}
#ifndef CONFIG_VAR_MTRR_HOLE
struct var_mtrr_state {
unsigned long range_startk, range_sizek;
unsigned int reg;
-#if CONFIG_VAR_MTRR_HOLE
unsigned long hole_startk, hole_sizek;
-#endif
- unsigned address_bits;
+ unsigned int address_bits;
+ unsigned int above4gb; /* Set if MTRRs are needed for DRAM above 4GB */
};
void set_var_mtrr_resource(void *gp, struct device *dev, struct resource *res)
return;
}
#endif
- state->reg = range_to_mtrr(state->reg, state->range_startk,
- state->range_sizek, basek, MTRR_TYPE_WRBACK, state->address_bits);
+ state->reg = range_to_mtrr(state->reg, state->range_startk,
+ state->range_sizek, basek, MTRR_TYPE_WRBACK,
+ state->address_bits, state->above4gb);
#if CONFIG_VAR_MTRR_HOLE
- state->reg = range_to_mtrr(state->reg, state->hole_startk,
- state->hole_sizek, basek, MTRR_TYPE_UNCACHEABLE, state->address_bits);
+ state->reg = range_to_mtrr(state->reg, state->hole_startk,
+ state->hole_sizek, basek, MTRR_TYPE_UNCACHEABLE,
+ state->address_bits, state->above4gb);
#endif
state->range_startk = 0;
state->range_sizek = 0;
-#if CONFIG_VAR_MTRR_HOLE
- state->hole_startk = 0;
- state->hole_sizek = 0;
-#endif
+ state->hole_startk = 0;
+ state->hole_sizek = 0;
}
- /* Allocate an msr */
+ /* Allocate an msr */
printk(BIOS_SPEW, " Allocate an msr - basek = %08lx, sizek = %08lx,\n", basek, sizek);
state->range_startk = basek;
state->range_sizek = sizek;
void x86_setup_fixed_mtrrs(void)
{
/* Try this the simple way of incrementally adding together
- * mtrrs. If this doesn't work out we can get smart again
+ * mtrrs. If this doesn't work out we can get smart again
* and clear out the mtrrs.
*/
}
-void x86_setup_var_mtrrs(unsigned address_bits)
+void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
/* this routine needs to know how many address bits a given processor
- * supports. CPUs get grumpy when you set too many bits in
+ * supports. CPUs get grumpy when you set too many bits in
* their mtrr registers :( I would generically call cpuid here
* and find out how many physically supported but some cpus are
* buggy, and report more bits then they actually support.
+ * If above4gb flag is set, variable MTRR ranges must be used to
+ * set cacheability of DRAM above 4GB. If above4gb flag is clear,
+ * some other mechanism is controlling cacheability of DRAM above 4GB.
*/
{
/* Try this the simple way of incrementally adding together
- * mtrrs. If this doesn't work out we can get smart again
+ * mtrrs. If this doesn't work out we can get smart again
* and clear out the mtrrs.
*/
struct var_mtrr_state var_state;
/* Cache as many memory areas as possible */
- /* FIXME is there an algorithm for computing the optimal set of mtrrs?
+ /* FIXME is there an algorithm for computing the optimal set of mtrrs?
* In some cases it is definitely possible to do better.
*/
var_state.range_startk = 0;
var_state.range_sizek = 0;
-#if CONFIG_VAR_MTRR_HOLE
var_state.hole_startk = 0;
var_state.hole_sizek = 0;
-#endif
var_state.reg = 0;
var_state.address_bits = address_bits;
+ var_state.above4gb = above4gb;
search_global_resources(
IORESOURCE_MEM | IORESOURCE_CACHEABLE, IORESOURCE_MEM | IORESOURCE_CACHEABLE,
set_var_mtrr_resource, &var_state);
+
#if (CONFIG_GFXUMA == 1) /* UMA or SP. */
- // For now we assume the UMA space is at the end of memory
+ /* For now we assume the UMA space is at the end of memory below 4GB */
if (var_state.hole_startk || var_state.hole_sizek) {
printk(BIOS_DEBUG, "Warning: Can't set up MTRR hole for UMA due to pre-existing MTRR hole.\n");
} else {
+#if CONFIG_VAR_MTRR_HOLE
// Increase the base range and set up UMA as an UC hole instead
var_state.range_sizek += (uma_memory_size >> 10);
var_state.hole_startk = (uma_memory_base >> 10);
var_state.hole_sizek = (uma_memory_size >> 10);
+#endif
}
#endif
/* Write the last range */
- var_state.reg = range_to_mtrr(var_state.reg, var_state.range_startk,
- var_state.range_sizek, 0, MTRR_TYPE_WRBACK, var_state.address_bits);
+ var_state.reg = range_to_mtrr(var_state.reg, var_state.range_startk,
+ var_state.range_sizek, 0, MTRR_TYPE_WRBACK,
+ var_state.address_bits, var_state.above4gb);
#if CONFIG_VAR_MTRR_HOLE
var_state.reg = range_to_mtrr(var_state.reg, var_state.hole_startk,
- var_state.hole_sizek, 0, MTRR_TYPE_UNCACHEABLE, var_state.address_bits);
+ var_state.hole_sizek, 0, MTRR_TYPE_UNCACHEABLE,
+ var_state.address_bits, var_state.above4gb);
#endif
printk(BIOS_DEBUG, "DONE variable MTRRs\n");
printk(BIOS_DEBUG, "Clear out the extra MTRR's\n");
while(var_state.reg < MTRRS) {
set_var_mtrr(var_state.reg++, 0, 0, 0, var_state.address_bits);
}
+
+#if CONFIG_CACHE_ROM
+ /* Enable Caching and speculative Reads for the
+ * complete ROM now that we actually have RAM.
+ */
+ if (boot_cpu() && (acpi_slp_type != 3)) {
+ set_var_mtrr(7, (4096-4)*1024, 4*1024,
+ MTRR_TYPE_WRPROT, address_bits);
+ }
+#endif
+
printk(BIOS_SPEW, "call enable_var_mtrr()\n");
enable_var_mtrr();
printk(BIOS_SPEW, "Leave %s\n", __func__);
post_code(0x6A);
}
-void x86_setup_mtrrs(unsigned address_bits)
+
+void x86_setup_mtrrs(void)
{
+ int address_size;
x86_setup_fixed_mtrrs();
- x86_setup_var_mtrrs(address_bits);
+ address_size = cpu_phys_address_size();
+ printk(BIOS_DEBUG, "CPU physical address size: %d bits\n", address_size);
+ x86_setup_var_mtrrs(address_size, 1);
}