1 /* Needed so the AMD K8 runs correctly. */
2 /* this should be done by Eric
3 * 2004.11 yhlu add d0 e0 support
4 * 2004.12 yhlu add dual core support
5 * 2005.02 yhlu add e0 memory hole support
7 #include <console/console.h>
8 #include <cpu/x86/msr.h>
9 #include <cpu/amd/mtrr.h>
10 #include <device/device.h>
11 #include <device/device.h>
12 #include <device/pci.h>
14 #include <cpu/x86/msr.h>
15 #include <cpu/x86/pae.h>
16 #include <pc80/mc146818rtc.h>
17 #include <cpu/x86/lapic.h>
18 #include "../../../northbridge/amd/amdk8/amdk8.h"
19 #include "../../../northbridge/amd/amdk8/cpu_rev.c"
21 #include <cpu/x86/cache.h>
22 #include <cpu/x86/mtrr.h>
23 #include <cpu/x86/mem.h>
24 #include <cpu/amd/dualcore.h>
26 #include "model_fxx_msr.h"
28 #define MCI_STATUS 0x401
30 static inline msr_t rdmsr_amd(unsigned index)
33 __asm__ __volatile__ (
35 : "=a" (result.lo), "=d" (result.hi)
36 : "c" (index), "D" (0x9c5a203a)
41 static inline void wrmsr_amd(unsigned index, msr_t msr)
43 __asm__ __volatile__ (
46 : "c" (index), "a" (msr.lo), "d" (msr.hi), "D" (0x9c5a203a)
53 #define ZERO_CHUNK_KB 0x800UL /* 2M */
54 #define TOLM_KB 0x400000UL
61 struct mtrr mtrrs[MTRR_COUNT];
62 msr_t top_mem, top_mem2;
66 static void save_mtrr_state(struct mtrr_state *state)
69 for(i = 0; i < MTRR_COUNT; i++) {
70 state->mtrrs[i].base = rdmsr(MTRRphysBase_MSR(i));
71 state->mtrrs[i].mask = rdmsr(MTRRphysMask_MSR(i));
73 state->top_mem = rdmsr(TOP_MEM);
74 state->top_mem2 = rdmsr(TOP_MEM2);
75 state->def_type = rdmsr(MTRRdefType_MSR);
78 static void restore_mtrr_state(struct mtrr_state *state)
83 for(i = 0; i < MTRR_COUNT; i++) {
84 wrmsr(MTRRphysBase_MSR(i), state->mtrrs[i].base);
85 wrmsr(MTRRphysMask_MSR(i), state->mtrrs[i].mask);
87 wrmsr(TOP_MEM, state->top_mem);
88 wrmsr(TOP_MEM2, state->top_mem2);
89 wrmsr(MTRRdefType_MSR, state->def_type);
96 static void print_mtrr_state(struct mtrr_state *state)
99 for(i = 0; i < MTRR_COUNT; i++) {
100 printk_debug("var mtrr %d: %08x%08x mask: %08x%08x\n",
102 state->mtrrs[i].base.hi, state->mtrrs[i].base.lo,
103 state->mtrrs[i].mask.hi, state->mtrrs[i].mask.lo);
105 printk_debug("top_mem: %08x%08x\n",
106 state->top_mem.hi, state->top_mem.lo);
107 printk_debug("top_mem2: %08x%08x\n",
108 state->top_mem2.hi, state->top_mem2.lo);
109 printk_debug("def_type: %08x%08x\n",
110 state->def_type.hi, state->def_type.lo);
114 static void set_init_ecc_mtrrs(void)
120 /* First clear all of the msrs to be safe */
121 for(i = 0; i < MTRR_COUNT; i++) {
123 zero.lo = zero.hi = 0;
124 wrmsr(MTRRphysBase_MSR(i), zero);
125 wrmsr(MTRRphysMask_MSR(i), zero);
128 /* Write back cache the first 1MB */
130 msr.lo = 0x00000000 | MTRR_TYPE_WRBACK;
131 wrmsr(MTRRphysBase_MSR(0), msr);
133 msr.lo = ~((CONFIG_LB_MEM_TOPK << 10) - 1) | 0x800;
134 wrmsr(MTRRphysMask_MSR(0), msr);
136 /* Set the default type to write combining */
138 msr.lo = 0xc00 | MTRR_TYPE_WRCOMB;
139 wrmsr(MTRRdefType_MSR, msr);
141 /* Set TOP_MEM to 4G */
149 static void init_ecc_memory(unsigned node_id)
151 unsigned long startk, begink, endk;
153 struct mtrr_state mtrr_state;
154 device_t f1_dev, f2_dev, f3_dev;
155 int enable_scrubbing;
158 f1_dev = dev_find_slot(0, PCI_DEVFN(0x18 + node_id, 1));
160 die("Cannot find cpu function 1\n");
162 f2_dev = dev_find_slot(0, PCI_DEVFN(0x18 + node_id, 2));
164 die("Cannot find cpu function 2\n");
166 f3_dev = dev_find_slot(0, PCI_DEVFN(0x18 + node_id, 3));
168 die("Cannot find cpu function 3\n");
171 /* See if we scrubbing should be enabled */
172 enable_scrubbing = 1;
173 get_option(&enable_scrubbing, "hw_scrubber");
175 /* Enable cache scrubbing at the lowest possible rate */
176 if (enable_scrubbing) {
177 pci_write_config32(f3_dev, SCRUB_CONTROL,
178 (SCRUB_84ms << 16) | (SCRUB_84ms << 8) | (SCRUB_NONE << 0));
180 pci_write_config32(f3_dev, SCRUB_CONTROL,
181 (SCRUB_NONE << 16) | (SCRUB_NONE << 8) | (SCRUB_NONE << 0));
182 printk_debug("Scrubbing Disabled\n");
186 /* If ecc support is not enabled don't touch memory */
187 dcl = pci_read_config32(f2_dev, DRAM_CONFIG_LOW);
188 if (!(dcl & DCL_DimmEccEn)) {
189 printk_debug("ECC Disabled\n");
193 startk = (pci_read_config32(f1_dev, 0x40 + (node_id*8)) & 0xffff0000) >> 2;
194 endk = ((pci_read_config32(f1_dev, 0x44 + (node_id*8)) & 0xffff0000) >> 2) + 0x4000;
197 /* Don't start too early */
199 if (begink < CONFIG_LB_MEM_TOPK) {
200 begink = CONFIG_LB_MEM_TOPK;
202 printk_debug("Clearing memory %uK - %uK: ", begink, endk);
204 /* Save the normal state */
205 save_mtrr_state(&mtrr_state);
207 /* Switch to the init ecc state */
208 set_init_ecc_mtrrs();
211 /* Walk through 2M chunks and zero them */
212 for(basek = begink; basek < endk;
213 basek = ((basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1)))
215 unsigned long limitk;
219 /* Report every 64M */
220 if ((basek % (64*1024)) == 0) {
221 /* Restore the normal state */
223 restore_mtrr_state(&mtrr_state);
226 /* Print a status message */
227 printk_debug("%c", (basek >= TOLM_KB)?'+':'-');
229 /* Return to the initialization state */
230 set_init_ecc_mtrrs();
234 limitk = (basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1);
238 size = (limitk - basek) << 10;
239 addr = map_2M_page(basek >> 11);
240 if (addr == MAPPING_ERROR) {
241 printk_err("Cannot map page: %x\n", basek >> 11);
245 /* clear memory 2M (limitk - basek) */
246 addr = (void *)(((uint32_t)addr) | ((basek & 0x7ff) << 10));
247 clear_memory(addr, size);
249 /* Restore the normal state */
251 restore_mtrr_state(&mtrr_state);
254 /* Set the scrub base address registers */
255 pci_write_config32(f3_dev, SCRUB_ADDR_LOW, startk << 10);
256 pci_write_config32(f3_dev, SCRUB_ADDR_HIGH, startk >> 22);
258 /* Enable the scrubber? */
259 if (enable_scrubbing) {
260 /* Enable scrubbing at the lowest possible rate */
261 pci_write_config32(f3_dev, SCRUB_CONTROL,
262 (SCRUB_84ms << 16) | (SCRUB_84ms << 8) | (SCRUB_84ms << 0));
265 printk_debug(" done\n");
268 static inline void k8_errata(void)
271 if (is_cpu_pre_c0()) {
273 msr = rdmsr(HWCR_MSR);
275 wrmsr(HWCR_MSR, msr);
278 msr = rdmsr_amd(BU_CFG_MSR);
279 msr.hi |= (1 << (45 - 32));
280 wrmsr_amd(BU_CFG_MSR, msr);
283 msr = rdmsr_amd(DC_CFG_MSR);
285 wrmsr_amd(DC_CFG_MSR, msr);
288 /* I can't touch this msr on early buggy cpus */
289 if (!is_cpu_pre_b3()) {
292 msr = rdmsr(NB_CFG_MSR);
295 if (!is_cpu_pre_c0() && is_cpu_pre_d0()) {
296 /* D0 later don't need it */
297 /* Erratum 86 Disable data masking on C0 and
298 * later processor revs.
299 * FIXME this is only needed if ECC is enabled.
301 msr.hi |= 1 << (36 - 32);
303 wrmsr(NB_CFG_MSR, msr);
307 if (!is_cpu_pre_c0() && is_cpu_pre_d0()) {
308 msr = rdmsr_amd(DC_CFG_MSR);
310 wrmsr_amd(DC_CFG_MSR, msr);
314 if (is_cpu_pre_d0()) {
315 msr = rdmsr_amd(IC_CFG_MSR);
317 wrmsr_amd(IC_CFG_MSR, msr);
320 /* Erratum 91 prefetch miss is handled in the kernel */
323 /* Erratum 106 ... */
324 msr = rdmsr_amd(LS_CFG_MSR);
326 wrmsr_amd(LS_CFG_MSR, msr);
328 /* Erratum 107 ... */
329 msr = rdmsr_amd(BU_CFG_MSR);
330 msr.hi |= 1 << (43 - 32);
331 wrmsr_amd(BU_CFG_MSR, msr);
333 if (is_cpu_pre_e0() && !is_cpu_pre_d0()) {
335 msr = rdmsr_amd(CPU_ID_HYPER_EXT_FEATURES);
337 wrmsr_amd(CPU_ID_HYPER_EXT_FEATURES, msr);
340 if (!is_cpu_pre_e0()) {
341 /* Erratum 110 ... */
342 msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR);
344 wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr);
346 /* Erratum 113 ... */
347 msr = rdmsr_amd(BU_CFG_MSR);
349 wrmsr_amd(BU_CFG_MSR, msr);
353 if (!is_cpu_pre_c0()) {
354 msr = rdmsr(HWCR_MSR);
356 wrmsr(HWCR_MSR, msr);
359 /* Erratum 123? dual core deadlock? */
362 msr = rdmsr(NB_CFG_MSR);
364 wrmsr(NB_CFG_MSR, msr);
368 void model_fxx_init(device_t cpu)
372 struct node_core_id id;
374 /* Turn on caching if we haven't already */
381 /* zero the machine check error status registers */
385 wrmsr(MCI_STATUS + (i*4),msr);
392 /* Enable the local cpu apics */
395 /* Find our node and core */
396 id = get_node_core_id();
398 /* Is this a bad location? In particular can another node prefetch
399 * data from this node before we have initialized it?
401 if (id.coreid == 0) {
402 init_ecc_memory(id.nodeid); // only do it for core 0
405 /* Deal with sibling cpus */
406 amd_sibling_init(cpu, id);
409 static struct device_operations cpu_dev_ops = {
410 .init = model_fxx_init,
412 static struct cpu_device_id cpu_table[] = {
413 { X86_VENDOR_AMD, 0xf50 }, /* B3 */
414 { X86_VENDOR_AMD, 0xf51 }, /* SH7-B3 */
415 { X86_VENDOR_AMD, 0xf58 }, /* SH7-C0 */
416 { X86_VENDOR_AMD, 0xf48 },
418 { X86_VENDOR_AMD, 0xf5A }, /* SH7-CG */
419 { X86_VENDOR_AMD, 0xf4A },
420 { X86_VENDOR_AMD, 0xf7A },
421 { X86_VENDOR_AMD, 0xfc0 }, /* DH7-CG */
422 { X86_VENDOR_AMD, 0xfe0 },
423 { X86_VENDOR_AMD, 0xff0 },
424 { X86_VENDOR_AMD, 0xf82 }, /* CH7-CG */
425 { X86_VENDOR_AMD, 0xfb2 },
427 { X86_VENDOR_AMD, 0x10f50 }, /* SH7-D0 */
428 { X86_VENDOR_AMD, 0x10f40 },
429 { X86_VENDOR_AMD, 0x10f70 },
430 { X86_VENDOR_AMD, 0x10fc0 }, /* DH7-D0 */
431 { X86_VENDOR_AMD, 0x10ff0 },
432 { X86_VENDOR_AMD, 0x10f80 }, /* CH7-D0 */
433 { X86_VENDOR_AMD, 0x10fb0 },
435 { X86_VENDOR_AMD, 0x20f50 }, /* SH8-E0*/
436 { X86_VENDOR_AMD, 0x20f40 },
437 { X86_VENDOR_AMD, 0x20f70 },
438 { X86_VENDOR_AMD, 0x20fc0 }, /* DH8-E0 */ /* DH-E3 */
439 { X86_VENDOR_AMD, 0x20ff0 },
440 { X86_VENDOR_AMD, 0x20f10 }, /* JH8-E1 */
441 { X86_VENDOR_AMD, 0x20f30 },
442 { X86_VENDOR_AMD, 0x20f51 }, /* SH-E4 */
443 { X86_VENDOR_AMD, 0x20f71 },
444 { X86_VENDOR_AMD, 0x20f42 }, /* SH-E5 */
445 { X86_VENDOR_AMD, 0x20ff2 }, /* DH-E6 */
446 { X86_VENDOR_AMD, 0x20fc2 },
447 { X86_VENDOR_AMD, 0x20f12 }, /* JH-E6 */
448 { X86_VENDOR_AMD, 0x20f32 },
452 static struct cpu_driver model_fxx __cpu_driver = {
454 .id_table = cpu_table,