1 /* Needed so the AMD K8 runs correctly. */
2 /* this should be done by Eric
3 * 2004.11 yhlu add d0 e0 support
4 * 2004.12 yhlu add dual core support
5 * 2005.02 yhlu add e0 memory hole support
7 #include <console/console.h>
8 #include <cpu/x86/msr.h>
9 #include <cpu/amd/mtrr.h>
10 #include <device/device.h>
11 #include <device/device.h>
12 #include <device/pci.h>
14 #include <cpu/x86/msr.h>
15 #include <cpu/x86/pae.h>
16 #include <pc80/mc146818rtc.h>
17 #include <cpu/x86/lapic.h>
18 #include "../../../northbridge/amd/amdk8/amdk8.h"
19 #include "../../../northbridge/amd/amdk8/cpu_rev.c"
21 #include <cpu/x86/cache.h>
22 #include <cpu/x86/mtrr.h>
23 #include <cpu/x86/mem.h>
25 #if CONFIG_LOGICAL_CPUS==1
26 #include <cpu/amd/dualcore.h>
29 #include "model_fxx_msr.h"
31 #define MCI_STATUS 0x401
33 static inline msr_t rdmsr_amd(unsigned index)
36 __asm__ __volatile__ (
38 : "=a" (result.lo), "=d" (result.hi)
39 : "c" (index), "D" (0x9c5a203a)
44 static inline void wrmsr_amd(unsigned index, msr_t msr)
46 __asm__ __volatile__ (
49 : "c" (index), "a" (msr.lo), "d" (msr.hi), "D" (0x9c5a203a)
56 #define ZERO_CHUNK_KB 0x800UL /* 2M */
57 #define TOLM_KB 0x400000UL
64 struct mtrr mtrrs[MTRR_COUNT];
65 msr_t top_mem, top_mem2;
69 static void save_mtrr_state(struct mtrr_state *state)
72 for(i = 0; i < MTRR_COUNT; i++) {
73 state->mtrrs[i].base = rdmsr(MTRRphysBase_MSR(i));
74 state->mtrrs[i].mask = rdmsr(MTRRphysMask_MSR(i));
76 state->top_mem = rdmsr(TOP_MEM);
77 state->top_mem2 = rdmsr(TOP_MEM2);
78 state->def_type = rdmsr(MTRRdefType_MSR);
81 static void restore_mtrr_state(struct mtrr_state *state)
86 for(i = 0; i < MTRR_COUNT; i++) {
87 wrmsr(MTRRphysBase_MSR(i), state->mtrrs[i].base);
88 wrmsr(MTRRphysMask_MSR(i), state->mtrrs[i].mask);
90 wrmsr(TOP_MEM, state->top_mem);
91 wrmsr(TOP_MEM2, state->top_mem2);
92 wrmsr(MTRRdefType_MSR, state->def_type);
99 static void print_mtrr_state(struct mtrr_state *state)
102 for(i = 0; i < MTRR_COUNT; i++) {
103 printk_debug("var mtrr %d: %08x%08x mask: %08x%08x\n",
105 state->mtrrs[i].base.hi, state->mtrrs[i].base.lo,
106 state->mtrrs[i].mask.hi, state->mtrrs[i].mask.lo);
108 printk_debug("top_mem: %08x%08x\n",
109 state->top_mem.hi, state->top_mem.lo);
110 printk_debug("top_mem2: %08x%08x\n",
111 state->top_mem2.hi, state->top_mem2.lo);
112 printk_debug("def_type: %08x%08x\n",
113 state->def_type.hi, state->def_type.lo);
117 static void set_init_ecc_mtrrs(void)
123 /* First clear all of the msrs to be safe */
124 for(i = 0; i < MTRR_COUNT; i++) {
126 zero.lo = zero.hi = 0;
127 wrmsr(MTRRphysBase_MSR(i), zero);
128 wrmsr(MTRRphysMask_MSR(i), zero);
131 /* Write back cache the first 1MB */
133 msr.lo = 0x00000000 | MTRR_TYPE_WRBACK;
134 wrmsr(MTRRphysBase_MSR(0), msr);
136 msr.lo = ~((CONFIG_LB_MEM_TOPK << 10) - 1) | 0x800;
137 wrmsr(MTRRphysMask_MSR(0), msr);
139 /* Set the default type to write combining */
141 msr.lo = 0xc00 | MTRR_TYPE_WRCOMB;
142 wrmsr(MTRRdefType_MSR, msr);
144 /* Set TOP_MEM to 4G */
152 static void init_ecc_memory(unsigned node_id)
154 unsigned long startk, begink, endk;
155 #if K8_E0_MEM_HOLE_SIZEK != 0
156 unsigned long hole_startk = 0, hole_endk = 0;
159 struct mtrr_state mtrr_state;
160 device_t f1_dev, f2_dev, f3_dev;
161 int enable_scrubbing;
164 f1_dev = dev_find_slot(0, PCI_DEVFN(0x18 + node_id, 1));
166 die("Cannot find cpu function 1\n");
168 f2_dev = dev_find_slot(0, PCI_DEVFN(0x18 + node_id, 2));
170 die("Cannot find cpu function 2\n");
172 f3_dev = dev_find_slot(0, PCI_DEVFN(0x18 + node_id, 3));
174 die("Cannot find cpu function 3\n");
177 /* See if we scrubbing should be enabled */
178 enable_scrubbing = 1;
179 get_option(&enable_scrubbing, "hw_scrubber");
181 /* Enable cache scrubbing at the lowest possible rate */
182 if (enable_scrubbing) {
183 pci_write_config32(f3_dev, SCRUB_CONTROL,
184 (SCRUB_84ms << 16) | (SCRUB_84ms << 8) | (SCRUB_NONE << 0));
186 pci_write_config32(f3_dev, SCRUB_CONTROL,
187 (SCRUB_NONE << 16) | (SCRUB_NONE << 8) | (SCRUB_NONE << 0));
188 printk_debug("Scrubbing Disabled\n");
192 /* If ecc support is not enabled don't touch memory */
193 dcl = pci_read_config32(f2_dev, DRAM_CONFIG_LOW);
194 if (!(dcl & DCL_DimmEccEn)) {
198 startk = (pci_read_config32(f1_dev, 0x40 + (node_id*8)) & 0xffff0000) >> 2;
199 endk = ((pci_read_config32(f1_dev, 0x44 + (node_id*8)) & 0xffff0000) >> 2) + 0x4000;
201 #if K8_E0_MEM_HOLE_SIZEK != 0
202 if (!is_cpu_pre_e0()) {
204 val = pci_read_config32(f1_dev, 0xf0);
206 hole_startk = ((val & (0xff<<24)) >> 10);
207 hole_endk = ((val & (0xff<<8))<<(16-10)) - startk;
208 hole_endk += hole_startk;
214 /* Don't start too early */
216 if (begink < CONFIG_LB_MEM_TOPK) {
217 begink = CONFIG_LB_MEM_TOPK;
219 printk_debug("Clearing memory %uK - %uK: ", startk, endk);
221 /* Save the normal state */
222 save_mtrr_state(&mtrr_state);
224 /* Switch to the init ecc state */
225 set_init_ecc_mtrrs();
228 /* Walk through 2M chunks and zero them */
229 for(basek = begink; basek < endk; basek = ((basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1))) {
230 unsigned long limitk;
234 #if K8_E0_MEM_HOLE_SIZEK != 0
235 if ((basek >= hole_startk) && (basek < hole_endk)) continue;
237 /* Report every 64M */
238 if ((basek % (64*1024)) == 0) {
239 /* Restore the normal state */
241 restore_mtrr_state(&mtrr_state);
244 /* Print a status message */
245 printk_debug("%c", (basek >= TOLM_KB)?'+':'-');
247 /* Return to the initialization state */
248 set_init_ecc_mtrrs();
252 limitk = (basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1);
256 size = (limitk - basek) << 10;
257 addr = map_2M_page(basek >> 11);
258 addr = (void *)(((uint32_t)addr) | ((basek & 0x7ff) << 10));
259 if (addr == MAPPING_ERROR) {
263 /* clear memory 2M (limitk - basek) */
264 clear_memory(addr, size);
266 /* Restore the normal state */
268 restore_mtrr_state(&mtrr_state);
271 /* Set the scrub base address registers */
272 pci_write_config32(f3_dev, SCRUB_ADDR_LOW, startk << 10);
273 pci_write_config32(f3_dev, SCRUB_ADDR_HIGH, startk >> 22);
275 /* Enable the scrubber? */
276 if (enable_scrubbing) {
277 /* Enable scrubbing at the lowest possible rate */
278 pci_write_config32(f3_dev, SCRUB_CONTROL,
279 (SCRUB_84ms << 16) | (SCRUB_84ms << 8) | (SCRUB_84ms << 0));
282 printk_debug(" done\n");
285 static inline void k8_errata(void)
288 if (is_cpu_pre_c0()) {
290 msr = rdmsr(HWCR_MSR);
292 wrmsr(HWCR_MSR, msr);
295 msr = rdmsr_amd(BU_CFG_MSR);
296 msr.hi |= (1 << (45 - 32));
297 wrmsr_amd(BU_CFG_MSR, msr);
300 msr = rdmsr_amd(DC_CFG_MSR);
302 wrmsr_amd(DC_CFG_MSR, msr);
305 /* I can't touch this msr on early buggy cpus */
306 if (!is_cpu_pre_b3()) {
309 msr = rdmsr(NB_CFG_MSR);
312 if (!is_cpu_pre_c0() && is_cpu_pre_d0()) {
313 /* D0 later don't need it */
314 /* Erratum 86 Disable data masking on C0 and
315 * later processor revs.
316 * FIXME this is only needed if ECC is enabled.
318 msr.hi |= 1 << (36 - 32);
320 wrmsr(NB_CFG_MSR, msr);
323 if (!is_cpu_pre_c0() && is_cpu_pre_d0()) {
324 /* D0 later don't need it */
326 msr = rdmsr_amd(DC_CFG_MSR);
328 wrmsr_amd(DC_CFG_MSR, msr);
332 if(is_cpu_pre_d0()) {
333 /*D0 later don't need it */
335 msr = rdmsr_amd(IC_CFG_MSR);
337 wrmsr_amd(IC_CFG_MSR, msr);
340 /* Erratum 91 prefetch miss is handled in the kernel */
345 msr = rdmsr_amd(CPU_ID_HYPER_EXT_FEATURES);
347 wrmsr_amd(CPU_ID_HYPER_EXT_FEATURES, msr);
351 if(!is_cpu_pre_e0()) {
353 msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR);
355 wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr);
359 void model_fxx_init(device_t dev)
363 #if CONFIG_LOGICAL_CPUS==1
364 struct node_core_id id;
371 /* Turn on caching if we haven't already */
378 /* zero the machine check error status registers */
382 wrmsr(MCI_STATUS + (i*4),msr);
389 #if CONFIG_LOGICAL_CPUS==1
390 //AMD_DUAL_CORE_SUPPORT
391 siblings = cpuid_ecx(0x80000008) & 0xff;
393 // id = get_node_core_id((!is_cpu_pre_e0())? read_nb_cfg_54():0);
394 id = get_node_core_id(read_nb_cfg_54()); // pre e0 nb_cfg_54 can not be set
397 msr = rdmsr_amd(CPU_ID_FEATURES_MSR);
399 wrmsr_amd(CPU_ID_FEATURES_MSR, msr);
401 msr = rdmsr_amd(LOGICAL_CPUS_NUM_MSR);
402 msr.lo = (siblings+1)<<16;
403 wrmsr_amd(LOGICAL_CPUS_NUM_MSR, msr);
405 msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR);
406 msr.hi |= 1<<(33-32);
407 wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr);
410 /* Is this a bad location? In particular can another node prefecth
411 * data from this node before we have initialized it?
413 if(id.coreid == 0) init_ecc_memory(id.nodeid); // only do it for core0
415 /* For now there is a 1-1 mapping between node_id and cpu_id */
416 nodeid = lapicid() & 0x7;
417 init_ecc_memory(nodeid);
420 /* Enable the local cpu apics */
423 #if CONFIG_LOGICAL_CPUS==1
424 //AMD_DUAL_CORE_SUPPORT
425 /* Start up my cpu siblings */
426 // if(id.coreid==0) amd_sibling_init(dev); // Don't need core1 is already be put in the CPU BUS in bus_cpu_scan
430 static struct device_operations cpu_dev_ops = {
431 .init = model_fxx_init,
433 static struct cpu_device_id cpu_table[] = {
434 { X86_VENDOR_AMD, 0xf50 }, /* B3 */
435 { X86_VENDOR_AMD, 0xf51 }, /* SH7-B3 */
436 { X86_VENDOR_AMD, 0xf58 }, /* SH7-C0 */
437 { X86_VENDOR_AMD, 0xf48 },
439 { X86_VENDOR_AMD, 0xf5A }, /* SH7-CG */
440 { X86_VENDOR_AMD, 0xf4A },
441 { X86_VENDOR_AMD, 0xf7A },
442 { X86_VENDOR_AMD, 0xfc0 }, /* DH7-CG */
443 { X86_VENDOR_AMD, 0xfe0 },
444 { X86_VENDOR_AMD, 0xff0 },
445 { X86_VENDOR_AMD, 0xf82 }, /* CH7-CG */
446 { X86_VENDOR_AMD, 0xfb2 },
448 { X86_VENDOR_AMD, 0x10f50 }, /* SH7-D0 */
449 { X86_VENDOR_AMD, 0x10f40 },
450 { X86_VENDOR_AMD, 0x10f70 },
451 { X86_VENDOR_AMD, 0x10fc0 }, /* DH7-D0 */
452 { X86_VENDOR_AMD, 0x10ff0 },
453 { X86_VENDOR_AMD, 0x10f80 }, /* CH7-D0 */
454 { X86_VENDOR_AMD, 0x10fb0 },
456 { X86_VENDOR_AMD, 0x20f50 }, /* SH8-E0*/
457 { X86_VENDOR_AMD, 0x20f40 },
458 { X86_VENDOR_AMD, 0x20f70 },
459 { X86_VENDOR_AMD, 0x20fc0 }, /* DH8-E0 */ /* DH-E3 */
460 { X86_VENDOR_AMD, 0x20ff0 },
461 { X86_VENDOR_AMD, 0x20f10 }, /* JH8-E1 */
462 { X86_VENDOR_AMD, 0x20f30 },
463 { X86_VENDOR_AMD, 0x20f51 }, /* SH-E4 */
464 { X86_VENDOR_AMD, 0x20f71 },
465 { X86_VENDOR_AMD, 0x20f42 }, /* SH-E5 */
466 { X86_VENDOR_AMD, 0x20ff2 }, /* DH-E6 */
467 { X86_VENDOR_AMD, 0x20fc2 },
468 { X86_VENDOR_AMD, 0x20f12 }, /* JH-E6 */
469 { X86_VENDOR_AMD, 0x20f32 },
474 static struct cpu_driver model_fxx __cpu_driver = {
476 .id_table = cpu_table,