1 /* Needed so the AMD K8 runs correctly. */
2 #include <console/console.h>
4 #include <cpu/p6/msr.h>
5 #include <cpu/k8/mtrr.h>
6 #include <device/device.h>
7 #include <device/chip.h>
8 #include <device/device.h>
9 #include <device/pci.h>
10 #include <smp/start_stop.h>
12 #include <cpu/p6/msr.h>
13 #include <cpu/p6/pgtbl.h>
14 #include <pc80/mc146818rtc.h>
15 #include <arch/smp/lapic.h>
16 #include "../../northbridge/amd/amdk8/amdk8.h"
17 #include "../../northbridge/amd/amdk8/cpu_rev.c"
20 #define MCI_STATUS 0x401
22 static inline void disable_cache(void)
26 /* Write back the cache */
29 "orl $0x40000000, %0\n\t"
37 static inline void enable_cache(void)
40 // turn cache back on.
43 "andl $0x9fffffff, %0\n\t"
49 static inline msr_t rdmsr_amd(unsigned index)
52 __asm__ __volatile__ (
54 : "=a" (result.lo), "=d" (result.hi)
55 : "c" (index), "D" (0x9c5a203a)
60 static inline void wrmsr_amd(unsigned index, msr_t msr)
62 __asm__ __volatile__ (
65 : "c" (index), "a" (msr.lo), "d" (msr.hi), "D" (0x9c5a203a)
72 #define ZERO_CHUNK_KB 0x800UL /* 2M */
73 #define TOLM_KB 0x400000UL
80 struct mtrr mtrrs[MTRR_COUNT];
81 msr_t top_mem, top_mem2;
85 static void save_mtrr_state(struct mtrr_state *state)
88 for(i = 0; i < MTRR_COUNT; i++) {
89 state->mtrrs[i].base = rdmsr(MTRRphysBase_MSR(i));
90 state->mtrrs[i].mask = rdmsr(MTRRphysMask_MSR(i));
92 state->top_mem = rdmsr(TOP_MEM);
93 state->top_mem2 = rdmsr(TOP_MEM2);
94 state->def_type = rdmsr(MTRRdefType_MSR);
97 static void restore_mtrr_state(struct mtrr_state *state)
102 for(i = 0; i < MTRR_COUNT; i++) {
103 wrmsr(MTRRphysBase_MSR(i), state->mtrrs[i].base);
104 wrmsr(MTRRphysMask_MSR(i), state->mtrrs[i].mask);
106 wrmsr(TOP_MEM, state->top_mem);
107 wrmsr(TOP_MEM2, state->top_mem2);
108 wrmsr(MTRRdefType_MSR, state->def_type);
115 static void print_mtrr_state(struct mtrr_state *state)
118 for(i = 0; i < MTRR_COUNT; i++) {
119 printk_debug("var mtrr %d: %08x%08x mask: %08x%08x\n",
121 state->mtrrs[i].base.hi, state->mtrrs[i].base.lo,
122 state->mtrrs[i].mask.hi, state->mtrrs[i].mask.lo);
124 printk_debug("top_mem: %08x%08x\n",
125 state->top_mem.hi, state->top_mem.lo);
126 printk_debug("top_mem2: %08x%08x\n",
127 state->top_mem2.hi, state->top_mem2.lo);
128 printk_debug("def_type: %08x%08x\n",
129 state->def_type.hi, state->def_type.lo);
133 static void set_init_ecc_mtrrs(void)
139 /* First clear all of the msrs to be safe */
140 for(i = 0; i < MTRR_COUNT; i++) {
142 zero.lo = zero.hi = 0;
143 wrmsr(MTRRphysBase_MSR(i), zero);
144 wrmsr(MTRRphysMask_MSR(i), zero);
147 /* Write back cache the first 1MB */
149 msr.lo = 0x00000000 | MTRR_TYPE_WRBACK;
150 wrmsr(MTRRphysBase_MSR(0), msr);
152 msr.lo = ~((CONFIG_LB_MEM_TOPK << 10) - 1) | 0x800;
153 wrmsr(MTRRphysMask_MSR(0), msr);
155 /* Set the default type to write combining */
157 msr.lo = 0xc00 | MTRR_TYPE_WRCOMB;
158 wrmsr(MTRRdefType_MSR, msr);
160 /* Set TOP_MEM to 4G */
169 static void init_ecc_memory(void)
171 unsigned long startk, begink, endk;
173 struct mtrr_state mtrr_state;
174 device_t f1_dev, f2_dev, f3_dev;
175 int cpu_index, cpu_id, node_id;
176 int enable_scrubbing;
178 cpu_id = this_processors_id();
179 cpu_index = processor_index(cpu_id);
180 /* For now there is a 1-1 mapping between node_id and cpu_id */
183 f1_dev = dev_find_slot(0, PCI_DEVFN(0x18 + node_id, 1));
185 die("Cannot find cpu function 1\n");
187 f2_dev = dev_find_slot(0, PCI_DEVFN(0x18 + node_id, 2));
189 die("Cannot find cpu function 2\n");
191 f3_dev = dev_find_slot(0, PCI_DEVFN(0x18 + node_id, 3));
193 die("Cannot find cpu function 3\n");
196 /* See if we scrubbing should be enabled */
197 enable_scrubbing = 1;
198 get_option(&enable_scrubbing, "hw_scrubber");
200 /* Enable cache scrubbing at the lowest possible rate */
201 if (enable_scrubbing) {
202 pci_write_config32(f3_dev, SCRUB_CONTROL,
203 (SCRUB_84ms << 16) | (SCRUB_84ms << 8) | (SCRUB_NONE << 0));
205 pci_write_config32(f3_dev, SCRUB_CONTROL,
206 (SCRUB_NONE << 16) | (SCRUB_NONE << 8) | (SCRUB_NONE << 0));
207 printk_debug("Scrubbing Disabled\n");
211 /* If ecc support is not enabled don't touch memory */
212 dcl = pci_read_config32(f2_dev, DRAM_CONFIG_LOW);
213 if (!(dcl & DCL_DimmEccEn)) {
217 startk = (pci_read_config32(f1_dev, 0x40 + (node_id*8)) & 0xffff0000) >> 2;
218 endk = ((pci_read_config32(f1_dev, 0x44 + (node_id*8)) & 0xffff0000) >> 2) + 0x4000;
220 /* Don't start too early */
222 if (begink < CONFIG_LB_MEM_TOPK) {
223 begink = CONFIG_LB_MEM_TOPK;
225 printk_debug("Clearing memory %uK - %uK: ", startk, endk);
227 /* Save the normal state */
228 save_mtrr_state(&mtrr_state);
230 /* Switch to the init ecc state */
231 set_init_ecc_mtrrs();
234 /* Walk through 2M chunks and zero them */
235 for(basek = begink; basek < endk; basek = ((basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1))) {
236 unsigned long limitk;
240 /* Report every 64M */
241 if ((basek % (64*1024)) == 0) {
242 /* Restore the normal state */
243 map_2M_page(cpu_index, 0);
244 restore_mtrr_state(&mtrr_state);
247 /* Print a status message */
248 printk_debug("%c", (basek >= TOLM_KB)?'+':'-');
250 /* Return to the initialization state */
251 set_init_ecc_mtrrs();
254 limitk = (basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1);
258 size = (limitk - basek) << 10;
259 addr = map_2M_page(cpu_index, basek >> 11);
260 addr = (void *)(((uint32_t)addr) | ((basek & 0x7ff) << 10));
261 if (addr == MAPPING_ERROR) {
265 /* clear memory 2M (limitk - basek) */
273 : "a" (0), "D" (addr), "c" (size)
276 /* Restore the normal state */
277 map_2M_page(cpu_index, 0);
278 restore_mtrr_state(&mtrr_state);
281 /* Set the scrub base address registers */
282 pci_write_config32(f3_dev, SCRUB_ADDR_LOW, startk << 10);
283 pci_write_config32(f3_dev, SCRUB_ADDR_HIGH, startk >> 22);
285 /* Enable the scrubber? */
286 if (enable_scrubbing) {
287 /* Enable scrubbing at the lowest possible rate */
288 pci_write_config32(f3_dev, SCRUB_CONTROL,
289 (SCRUB_84ms << 16) | (SCRUB_84ms << 8) | (SCRUB_84ms << 0));
292 printk_debug(" done\n");
295 void k8_cpufixup(struct mem_range *mem)
297 unsigned long mmio_basek, tomk;
303 /* Except for the PCI MMIO hold just before 4GB there are no
304 * significant holes in the address space, so just account
305 * for those two and move on.
307 mmio_basek = tomk = 0;
308 for(i = 0; mem[i].sizek; i++) {
310 topk = mem[i].basek + mem[i].sizek;
314 if ((topk < 4*1024*1024) && (mmio_basek < topk)) {
318 if (mmio_basek > tomk) {
321 /* Round mmio_basek down to the nearst size that will fit in TOP_MEM */
322 mmio_basek = mmio_basek & ~TOP_MEM_MASK_KB;
323 /* Round tomk up to the next greater size that will fit in TOP_MEM */
324 tomk = (tomk + TOP_MEM_MASK_KB) & ~TOP_MEM_MASK_KB;
327 msr.hi = mmio_basek >> 22;
328 msr.lo = mmio_basek << 10;
334 wrmsr(TOP_MEM2, msr);
336 /* zero the IORR's before we enable to prevent
337 * undefined side effects.
340 for(i = IORR_FIRST; i <= IORR_LAST; i++) {
344 msr = rdmsr(SYSCFG_MSR);
345 msr.lo |= SYSCFG_MSR_MtrrVarDramEn | SYSCFG_MSR_TOM2En;
346 wrmsr(SYSCFG_MSR, msr);
348 /* zero the machine check error status registers */
352 wrmsr(MCI_STATUS + (i*4),msr);
355 if (is_cpu_pre_c0()) {
357 msr = rdmsr(HWCR_MSR);
359 wrmsr(HWCR_MSR, msr);
362 msr = rdmsr_amd(BU_CFG_MSR);
363 msr.hi |= (1 << (45 - 32));
364 wrmsr_amd(BU_CFG_MSR, msr);
367 msr = rdmsr_amd(DC_CFG_MSR);
369 wrmsr_amd(DC_CFG_MSR, msr);
372 /* I can't touch this msr on early buggy cpus */
373 if (!is_cpu_pre_b3()) {
376 msr = rdmsr(NB_CFG_MSR);
379 if (!is_cpu_pre_c0()) {
380 /* Erratum 86 Disable data masking on C0 and
381 * later processor revs.
382 * FIXME this is only needed if ECC is enabled.
384 msr.hi |= 1 << (36 - 32);
386 wrmsr(NB_CFG_MSR, msr);
390 if (!is_cpu_pre_c0()) {
391 msr = rdmsr_amd(DC_CFG_MSR);
393 wrmsr_amd(DC_CFG_MSR, msr);
397 msr = rdmsr_amd(IC_CFG_MSR);
399 wrmsr_amd(IC_CFG_MSR, msr);
401 /* Erratum 91 prefetch miss is handled in the kernel */
405 /* Is this a bad location? In particular can another node prefecth
406 * data from this node before we have initialized it?
412 void k8_enable(struct chip *chip, enum chip_pass pass)
415 struct cpu_k8_config *conf = (struct cpu_k8_config *)chip->chip_info;
418 case CONF_PASS_PRE_CONSOLE:
420 case CONF_PASS_PRE_PCI:
429 struct chip_control cpu_k8_control = {