3 #include <cpu/x86/cache.h>
4 #include <cpu/x86/mtrr.h>
5 #include <cpu/x86/msr.h>
7 /* Validate CONFIG_XIP_ROM_SIZE and CONFIG_XIP_ROM_BASE */
8 #if defined(CONFIG_XIP_ROM_SIZE) && !defined(CONFIG_XIP_ROM_BASE)
9 # error "CONFIG_XIP_ROM_SIZE without CONFIG_XIP_ROM_BASE"
11 #if defined(CONFIG_XIP_ROM_BASE) && !defined(CONFIG_XIP_ROM_SIZE)
12 # error "CONFIG_XIP_ROM_BASE without CONFIG_XIP_ROM_SIZE"
14 #if !defined(CONFIG_LB_MEM_TOPK)
15 # error "CONFIG_LB_MEM_TOPK not defined"
18 #if defined(CONFIG_XIP_ROM_SIZE) && ((CONFIG_XIP_ROM_SIZE & (CONFIG_XIP_ROM_SIZE -1)) != 0)
19 # error "CONFIG_XIP_ROM_SIZE is not a power of 2"
21 #if defined(CONFIG_XIP_ROM_SIZE) && ((CONFIG_XIP_ROM_BASE % CONFIG_XIP_ROM_SIZE) != 0)
22 # error "CONFIG_XIP_ROM_BASE is not a multiple of CONFIG_XIP_ROM_SIZE"
25 #if (CONFIG_LB_MEM_TOPK & (CONFIG_LB_MEM_TOPK -1)) != 0
26 # error "CONFIG_LB_MEM_TOPK must be a power of 2"
29 static void disable_var_mtrr(unsigned reg)
31 /* The invalid bit is kept in the mask so we simply
32 * clear the relevent mask register to disable a
36 zero.lo = zero.hi = 0;
37 wrmsr(MTRRphysMask_MSR(reg), zero);
40 static void set_var_mtrr(
41 unsigned reg, unsigned base, unsigned size, unsigned type)
44 /* Bit Bit 32-35 of MTRRphysMask should be set to 1 */
45 /* FIXME: It only support 4G less range */
47 basem.lo = base | type;
49 wrmsr(MTRRphysBase_MSR(reg), basem);
50 maskm.lo = ~(size - 1) | 0x800;
51 maskm.hi = (1<<(CONFIG_CPU_ADDR_BITS-32))-1;
52 wrmsr(MTRRphysMask_MSR(reg), maskm);
55 static void set_var_mtrr_x(
56 unsigned reg, uint32_t base_lo, uint32_t base_hi, uint32_t size_lo, uint32_t size_hi, unsigned type)
59 /* Bit Bit 32-35 of MTRRphysMask should be set to 1 */
61 basem.lo = (base_lo & 0xfffff000) | type;
62 basem.hi = base_hi & ((1<<(CONFIG_CPU_ADDR_BITS-32))-1);
63 wrmsr(MTRRphysBase_MSR(reg), basem);
64 maskm.hi = (1<<(CONFIG_CPU_ADDR_BITS-32))-1;
66 maskm.lo = ~(size_lo - 1) | 0x800;
69 maskm.hi &= ~(size_hi - 1);
71 wrmsr(MTRRphysMask_MSR(reg), maskm);
74 static void cache_lbmem(int type)
76 /* Enable caching for 0 - 1MB using variable mtrr */
78 set_var_mtrr(0, 0x00000000, CONFIG_LB_MEM_TOPK << 10, type);
82 /* the fixed and variable MTTRs are power-up with random values,
83 * clear them to MTRR_TYPE_UNCACHEABLE for safty.
85 static void do_early_mtrr_init(const unsigned long *mtrr_msrs)
88 * The cache is not enabled in cr0 nor in MTRRdefType_MSR
89 * entry32.inc ensures the cache is not enabled in cr0
92 const unsigned long *msr_addr;
94 /* Inialize all of the relevant msrs to 0 */
98 for(msr_addr = mtrr_msrs; (msr_nr = *msr_addr); msr_addr++) {
102 #if defined(CONFIG_XIP_ROM_SIZE)
103 /* enable write through caching so we can do execute in place
106 set_var_mtrr(1, CONFIG_XIP_ROM_BASE, CONFIG_XIP_ROM_SIZE, MTRR_TYPE_WRBACK);
109 /* Set the default memory type and enable fixed and variable MTRRs
111 /* Enable Variable MTRRs */
114 wrmsr(MTRRdefType_MSR, msr);
118 static void early_mtrr_init(void)
120 static const unsigned long mtrr_msrs[] = {
127 0x200, 0x201, 0x202, 0x203,
128 0x204, 0x205, 0x206, 0x207,
129 0x208, 0x209, 0x20A, 0x20B,
130 0x20C, 0x20D, 0x20E, 0x20F,
131 /* NULL end of table */
135 do_early_mtrr_init(mtrr_msrs);
139 static int early_mtrr_init_detected(void)
142 /* See if MTRR's are enabled.
143 * a #RESET disables them while an #INIT
144 * preserves their state. This works
145 * on both Intel and AMD cpus, at least
146 * according to the documentation.
148 msr = rdmsr(MTRRdefType_MSR);
149 return msr.lo & 0x00000800;
152 #endif /* EARLYMTRR_C */