1 /* Needed so the AMD K8 runs correctly. */
2 /* this should be done by Eric
3 * 2004.11 yhlu add d0 e0 support
4 * 2004.12 yhlu add dual core support
5 * 2005.02 yhlu add e0 memory hole support
8 * 2005.08 yhlu add microcode support
11 #include <console/console.h>
12 #include <cpu/x86/msr.h>
13 #include <cpu/amd/mtrr.h>
14 #include <device/device.h>
15 #include <device/device.h>
16 #include <device/pci.h>
18 #include <cpu/x86/msr.h>
19 #include <cpu/x86/pae.h>
20 #include <pc80/mc146818rtc.h>
21 #include <cpu/x86/lapic.h>
22 #include "../../../northbridge/amd/amdk8/amdk8.h"
23 #include "../../../northbridge/amd/amdk8/cpu_rev.c"
25 #include <cpu/amd/microcode.h>
26 #include <cpu/x86/cache.h>
27 #include <cpu/x86/mtrr.h>
28 #include <cpu/x86/mem.h>
29 #include <cpu/amd/dualcore.h>
31 #include "model_fxx_msr.h"
33 #define MCI_STATUS 0x401
35 static inline msr_t rdmsr_amd(unsigned index)
38 __asm__ __volatile__ (
40 : "=a" (result.lo), "=d" (result.hi)
41 : "c" (index), "D" (0x9c5a203a)
46 static inline void wrmsr_amd(unsigned index, msr_t msr)
48 __asm__ __volatile__ (
51 : "c" (index), "a" (msr.lo), "d" (msr.hi), "D" (0x9c5a203a)
58 #define ZERO_CHUNK_KB 0x800UL /* 2M */
59 #define TOLM_KB 0x400000UL
66 struct mtrr mtrrs[MTRR_COUNT];
67 msr_t top_mem, top_mem2;
71 static void save_mtrr_state(struct mtrr_state *state)
74 for(i = 0; i < MTRR_COUNT; i++) {
75 state->mtrrs[i].base = rdmsr(MTRRphysBase_MSR(i));
76 state->mtrrs[i].mask = rdmsr(MTRRphysMask_MSR(i));
78 state->top_mem = rdmsr(TOP_MEM);
79 state->top_mem2 = rdmsr(TOP_MEM2);
80 state->def_type = rdmsr(MTRRdefType_MSR);
83 static void restore_mtrr_state(struct mtrr_state *state)
88 for(i = 0; i < MTRR_COUNT; i++) {
89 wrmsr(MTRRphysBase_MSR(i), state->mtrrs[i].base);
90 wrmsr(MTRRphysMask_MSR(i), state->mtrrs[i].mask);
92 wrmsr(TOP_MEM, state->top_mem);
93 wrmsr(TOP_MEM2, state->top_mem2);
94 wrmsr(MTRRdefType_MSR, state->def_type);
101 static void print_mtrr_state(struct mtrr_state *state)
104 for(i = 0; i < MTRR_COUNT; i++) {
105 printk_debug("var mtrr %d: %08x%08x mask: %08x%08x\n",
107 state->mtrrs[i].base.hi, state->mtrrs[i].base.lo,
108 state->mtrrs[i].mask.hi, state->mtrrs[i].mask.lo);
110 printk_debug("top_mem: %08x%08x\n",
111 state->top_mem.hi, state->top_mem.lo);
112 printk_debug("top_mem2: %08x%08x\n",
113 state->top_mem2.hi, state->top_mem2.lo);
114 printk_debug("def_type: %08x%08x\n",
115 state->def_type.hi, state->def_type.lo);
119 static void set_init_ecc_mtrrs(void)
125 /* First clear all of the msrs to be safe */
126 for(i = 0; i < MTRR_COUNT; i++) {
128 zero.lo = zero.hi = 0;
129 wrmsr(MTRRphysBase_MSR(i), zero);
130 wrmsr(MTRRphysMask_MSR(i), zero);
133 /* Write back cache the first 1MB */
135 msr.lo = 0x00000000 | MTRR_TYPE_WRBACK;
136 wrmsr(MTRRphysBase_MSR(0), msr);
138 msr.lo = ~((CONFIG_LB_MEM_TOPK << 10) - 1) | 0x800;
139 wrmsr(MTRRphysMask_MSR(0), msr);
141 /* Set the default type to write combining */
143 msr.lo = 0xc00 | MTRR_TYPE_WRCOMB;
144 wrmsr(MTRRdefType_MSR, msr);
146 /* Set TOP_MEM to 4G */
154 static void init_ecc_memory(unsigned node_id)
156 unsigned long startk, begink, endk;
158 struct mtrr_state mtrr_state;
159 device_t f1_dev, f2_dev, f3_dev;
160 int enable_scrubbing;
163 f1_dev = dev_find_slot(0, PCI_DEVFN(0x18 + node_id, 1));
165 die("Cannot find cpu function 1\n");
167 f2_dev = dev_find_slot(0, PCI_DEVFN(0x18 + node_id, 2));
169 die("Cannot find cpu function 2\n");
171 f3_dev = dev_find_slot(0, PCI_DEVFN(0x18 + node_id, 3));
173 die("Cannot find cpu function 3\n");
176 /* See if we scrubbing should be enabled */
177 enable_scrubbing = 1;
178 get_option(&enable_scrubbing, "hw_scrubber");
180 /* Enable cache scrubbing at the lowest possible rate */
181 if (enable_scrubbing) {
182 pci_write_config32(f3_dev, SCRUB_CONTROL,
183 (SCRUB_84ms << 16) | (SCRUB_84ms << 8) | (SCRUB_NONE << 0));
185 pci_write_config32(f3_dev, SCRUB_CONTROL,
186 (SCRUB_NONE << 16) | (SCRUB_NONE << 8) | (SCRUB_NONE << 0));
187 printk_debug("Scrubbing Disabled\n");
191 /* If ecc support is not enabled don't touch memory */
192 dcl = pci_read_config32(f2_dev, DRAM_CONFIG_LOW);
193 if (!(dcl & DCL_DimmEccEn)) {
194 printk_debug("ECC Disabled\n");
198 startk = (pci_read_config32(f1_dev, 0x40 + (node_id*8)) & 0xffff0000) >> 2;
199 endk = ((pci_read_config32(f1_dev, 0x44 + (node_id*8)) & 0xffff0000) >> 2) + 0x4000;
202 /* Don't start too early */
204 if (begink < CONFIG_LB_MEM_TOPK) {
205 begink = CONFIG_LB_MEM_TOPK;
207 printk_debug("Clearing memory %uK - %uK: ", begink, endk);
209 /* Save the normal state */
210 save_mtrr_state(&mtrr_state);
212 /* Switch to the init ecc state */
213 set_init_ecc_mtrrs();
216 /* Walk through 2M chunks and zero them */
217 for(basek = begink; basek < endk;
218 basek = ((basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1)))
220 unsigned long limitk;
224 /* Report every 64M */
225 if ((basek % (64*1024)) == 0) {
226 /* Restore the normal state */
228 restore_mtrr_state(&mtrr_state);
231 /* Print a status message */
232 printk_debug("%c", (basek >= TOLM_KB)?'+':'-');
234 /* Return to the initialization state */
235 set_init_ecc_mtrrs();
239 limitk = (basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1);
243 size = (limitk - basek) << 10;
244 addr = map_2M_page(basek >> 11);
245 if (addr == MAPPING_ERROR) {
246 printk_err("Cannot map page: %x\n", basek >> 11);
250 /* clear memory 2M (limitk - basek) */
251 addr = (void *)(((uint32_t)addr) | ((basek & 0x7ff) << 10));
252 clear_memory(addr, size);
254 /* Restore the normal state */
256 restore_mtrr_state(&mtrr_state);
259 /* Set the scrub base address registers */
260 pci_write_config32(f3_dev, SCRUB_ADDR_LOW, startk << 10);
261 pci_write_config32(f3_dev, SCRUB_ADDR_HIGH, startk >> 22);
263 /* Enable the scrubber? */
264 if (enable_scrubbing) {
265 /* Enable scrubbing at the lowest possible rate */
266 pci_write_config32(f3_dev, SCRUB_CONTROL,
267 (SCRUB_84ms << 16) | (SCRUB_84ms << 8) | (SCRUB_84ms << 0));
270 printk_debug(" done\n");
273 static inline void k8_errata(void)
276 if (is_cpu_pre_c0()) {
278 msr = rdmsr(HWCR_MSR);
280 wrmsr(HWCR_MSR, msr);
283 msr = rdmsr_amd(BU_CFG_MSR);
284 msr.hi |= (1 << (45 - 32));
285 wrmsr_amd(BU_CFG_MSR, msr);
288 msr = rdmsr_amd(DC_CFG_MSR);
290 wrmsr_amd(DC_CFG_MSR, msr);
293 /* I can't touch this msr on early buggy cpus */
294 if (!is_cpu_pre_b3()) {
297 msr = rdmsr(NB_CFG_MSR);
300 if (!is_cpu_pre_c0() && is_cpu_pre_d0()) {
301 /* D0 later don't need it */
302 /* Erratum 86 Disable data masking on C0 and
303 * later processor revs.
304 * FIXME this is only needed if ECC is enabled.
306 msr.hi |= 1 << (36 - 32);
308 wrmsr(NB_CFG_MSR, msr);
312 if (!is_cpu_pre_c0() && is_cpu_pre_d0()) {
313 msr = rdmsr_amd(DC_CFG_MSR);
315 wrmsr_amd(DC_CFG_MSR, msr);
319 if (is_cpu_pre_d0()) {
320 msr = rdmsr_amd(IC_CFG_MSR);
322 wrmsr_amd(IC_CFG_MSR, msr);
325 /* Erratum 91 prefetch miss is handled in the kernel */
328 /* Erratum 106 ... */
329 msr = rdmsr_amd(LS_CFG_MSR);
331 wrmsr_amd(LS_CFG_MSR, msr);
333 /* Erratum 107 ... */
334 msr = rdmsr_amd(BU_CFG_MSR);
335 msr.hi |= 1 << (43 - 32);
336 wrmsr_amd(BU_CFG_MSR, msr);
338 if (is_cpu_pre_e0() && !is_cpu_pre_d0()) {
340 msr = rdmsr_amd(CPU_ID_HYPER_EXT_FEATURES);
342 wrmsr_amd(CPU_ID_HYPER_EXT_FEATURES, msr);
345 if (!is_cpu_pre_e0()) {
346 /* Erratum 110 ... */
347 msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR);
349 wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr);
351 /* Erratum 113 ... */
352 msr = rdmsr_amd(BU_CFG_MSR);
354 wrmsr_amd(BU_CFG_MSR, msr);
358 if (!is_cpu_pre_c0()) {
359 msr = rdmsr(HWCR_MSR);
361 wrmsr(HWCR_MSR, msr);
364 /* Erratum 123? dual core deadlock? */
367 msr = rdmsr(NB_CFG_MSR);
369 wrmsr(NB_CFG_MSR, msr);
373 extern void model_fxx_update_microcode(unsigned cpu_deviceid);
375 void model_fxx_init(device_t cpu)
379 struct node_core_id id;
380 unsigned equivalent_processor_rev_id;
382 /* Turn on caching if we haven't already */
387 /* Update the microcode */
388 model_fxx_update_microcode(cpu->device);
392 /* zero the machine check error status registers */
396 wrmsr(MCI_STATUS + (i*4),msr);
403 /* Enable the local cpu apics */
406 /* Find our node and core */
407 id = get_node_core_id();
409 /* Is this a bad location? In particular can another node prefetch
410 * data from this node before we have initialized it?
412 if (id.coreid == 0) {
413 init_ecc_memory(id.nodeid); // only do it for core 0
416 /* Deal with sibling cpus */
417 amd_sibling_init(cpu, id);
420 static struct device_operations cpu_dev_ops = {
421 .init = model_fxx_init,
423 static struct cpu_device_id cpu_table[] = {
424 { X86_VENDOR_AMD, 0xf50 }, /* B3 */
425 { X86_VENDOR_AMD, 0xf51 }, /* SH7-B3 */
426 { X86_VENDOR_AMD, 0xf58 }, /* SH7-C0 */
427 { X86_VENDOR_AMD, 0xf48 },
429 { X86_VENDOR_AMD, 0xf5A }, /* SH7-CG */
430 { X86_VENDOR_AMD, 0xf4A },
431 { X86_VENDOR_AMD, 0xf7A },
432 { X86_VENDOR_AMD, 0xfc0 }, /* DH7-CG */
433 { X86_VENDOR_AMD, 0xfe0 },
434 { X86_VENDOR_AMD, 0xff0 },
435 { X86_VENDOR_AMD, 0xf82 }, /* CH7-CG */
436 { X86_VENDOR_AMD, 0xfb2 },
438 { X86_VENDOR_AMD, 0x10f50 }, /* SH7-D0 */
439 { X86_VENDOR_AMD, 0x10f40 },
440 { X86_VENDOR_AMD, 0x10f70 },
441 { X86_VENDOR_AMD, 0x10fc0 }, /* DH7-D0 */
442 { X86_VENDOR_AMD, 0x10ff0 },
443 { X86_VENDOR_AMD, 0x10f80 }, /* CH7-D0 */
444 { X86_VENDOR_AMD, 0x10fb0 },
446 { X86_VENDOR_AMD, 0x20f50 }, /* SH8-E0*/
447 { X86_VENDOR_AMD, 0x20f40 },
448 { X86_VENDOR_AMD, 0x20f70 },
449 { X86_VENDOR_AMD, 0x20fc0 }, /* DH8-E0 */ /* DH-E3 */
450 { X86_VENDOR_AMD, 0x20ff0 },
451 { X86_VENDOR_AMD, 0x20f10 }, /* JH8-E1 */
452 { X86_VENDOR_AMD, 0x20f30 },
453 { X86_VENDOR_AMD, 0x20f51 }, /* SH-E4 */
454 { X86_VENDOR_AMD, 0x20f71 },
455 { X86_VENDOR_AMD, 0x20f42 }, /* SH-E5 */
456 { X86_VENDOR_AMD, 0x20ff2 }, /* DH-E6 */
457 { X86_VENDOR_AMD, 0x20fc2 },
458 { X86_VENDOR_AMD, 0x20f12 }, /* JH-E6 */
459 { X86_VENDOR_AMD, 0x20f32 },
463 static struct cpu_driver model_fxx __cpu_driver = {
465 .id_table = cpu_table,