1 #include <console/console.h>
2 #include <device/device.h>
3 #include <cpu/x86/mtrr.h>
4 #include <cpu/amd/mtrr.h>
5 #include <cpu/x86/cache.h>
6 #include <cpu/x86/msr.h>
8 static unsigned long resk(uint64_t value)
10 unsigned long resultk;
11 if (value < (1ULL << 42)) {
12 resultk = value >> 10;
21 static unsigned fixed_mtrr_index(unsigned long addrk)
24 index = (addrk - 0) >> 6;
26 index = ((addrk - 8*64) >> 4) + 8;
29 index = ((addrk - (8*64 + 16*16)) >> 2) + 24;
31 if (index > NUM_FIXED_RANGES) {
32 index = NUM_FIXED_RANGES;
38 static unsigned int mtrr_msr[] = {
39 MTRRfix64K_00000_MSR, MTRRfix16K_80000_MSR, MTRRfix16K_A0000_MSR,
40 MTRRfix4K_C0000_MSR, MTRRfix4K_C8000_MSR, MTRRfix4K_D0000_MSR, MTRRfix4K_D8000_MSR,
41 MTRRfix4K_E0000_MSR, MTRRfix4K_E8000_MSR, MTRRfix4K_F0000_MSR, MTRRfix4K_F8000_MSR,
44 static void set_fixed_mtrrs(unsigned int first, unsigned int last, unsigned char type)
47 unsigned int fixed_msr = NUM_FIXED_RANGES >> 3;
49 msr.lo = msr.hi = 0; /* Shut up gcc */
50 for (i = first; i < last; i++) {
51 /* When I switch to a new msr read it in */
52 if (fixed_msr != i >> 3) {
53 /* But first write out the old msr */
54 if (fixed_msr < (NUM_FIXED_RANGES >> 3)) {
56 wrmsr(mtrr_msr[fixed_msr], msr);
60 msr = rdmsr(mtrr_msr[fixed_msr]);
63 msr.lo &= ~(0xff << ((i&3)*8));
64 msr.lo |= type << ((i&3)*8);
66 msr.hi &= ~(0xff << ((i&3)*8));
67 msr.hi |= type << ((i&3)*8);
70 /* Write out the final msr */
71 if (fixed_msr < (NUM_FIXED_RANGES >> 3)) {
73 wrmsr(mtrr_msr[fixed_msr], msr);
80 void amd_setup_mtrrs(void)
82 unsigned long mmio_basek, tomk;
87 /* Enable the access to AMD RdDram and WrDram extension bits */
88 msr = rdmsr(SYSCFG_MSR);
89 msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
90 wrmsr(SYSCFG_MSR, msr);
93 /* Initialized the fixed_mtrrs to uncached */
94 printk_debug("Setting fixed MTRRs(%d-%d) type: UC\n",
96 set_fixed_mtrrs(0, NUM_FIXED_RANGES, MTRR_TYPE_UNCACHEABLE);
98 /* Except for the PCI MMIO hole just before 4GB there are no
99 * significant holes in the address space, so just account
100 * for those two and move on.
102 mmio_basek = tomk = 0;
103 for(dev = all_devices; dev; dev = dev->next) {
104 struct resource *res, *last;
105 last = &dev->resource[dev->resources];
106 for(res = &dev->resource[0]; res < last; res++) {
108 unsigned long start_mtrr, last_mtrr;
109 if (!(res->flags & IORESOURCE_MEM) ||
110 (!(res->flags & IORESOURCE_CACHEABLE))) {
113 topk = resk(res->base + res->size);
117 if ((topk < 4*1024*1024) && (mmio_basek < topk)) {
121 start_mtrr = fixed_mtrr_index(resk(res->base));
122 last_mtrr = fixed_mtrr_index(resk(res->base + res->size));
123 if (start_mtrr >= NUM_FIXED_RANGES) {
126 printk_debug("Setting fixed MTRRs(%d-%d) Type: WB\n",
127 start_mtrr, last_mtrr);
128 set_fixed_mtrrs(start_mtrr, last_mtrr, MTRR_TYPE_WRBACK | MTRR_READ_MEM | MTRR_WRITE_MEM);
131 printk_debug("DONE fixed MTRRs\n");
132 if (mmio_basek > tomk) {
135 /* Round mmio_basek down to the nearst size that will fit in TOP_MEM */
136 mmio_basek = mmio_basek & ~TOP_MEM_MASK_KB;
137 /* Round tomk up to the next greater size that will fit in TOP_MEM */
138 tomk = (tomk + TOP_MEM_MASK_KB) & ~TOP_MEM_MASK_KB;
143 msr.hi = mmio_basek >> 22;
144 msr.lo = mmio_basek << 10;
150 wrmsr(TOP_MEM2, msr);
152 /* zero the IORR's before we enable to prevent
153 * undefined side effects.
156 for(i = IORR_FIRST; i <= IORR_LAST; i++) {
160 /* Enable Variable Mtrrs
161 * Enable the RdMem and WrMem bits in the fixed mtrrs.
162 * Disable access to the RdMem and WrMem in the fixed mtrr.
164 msr = rdmsr(SYSCFG_MSR);
165 msr.lo |= SYSCFG_MSR_MtrrVarDramEn | SYSCFG_MSR_MtrrFixDramEn | SYSCFG_MSR_TOM2En;
166 msr.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
167 wrmsr(SYSCFG_MSR, msr);
171 /* Now that I have mapped what is memory and what is not
172 * Setup the mtrrs so we can cache the memory.