2 * This file is part of the coreboot project.
4 * Copyright (C) 2007 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <console/console.h>
21 #include <cpu/x86/msr.h>
22 #include <cpu/amd/mtrr.h>
23 #include <device/device.h>
24 #include <device/pci.h>
26 #include <cpu/x86/msr.h>
27 #include <cpu/x86/pae.h>
28 #include <pc80/mc146818rtc.h>
29 #include <cpu/x86/lapic.h>
31 #include "../../../northbridge/amd/amdfam10/amdfam10.h"
33 #include <cpu/amd/model_10xxx_rev.h>
35 #include <cpu/x86/cache.h>
36 #include <cpu/x86/mtrr.h>
37 #include <cpu/x86/mem.h>
39 #include <cpu/amd/quadcore.h>
41 #include <cpu/amd/model_10xxx_msr.h>
43 extern void prep_pstates_all(void);
44 extern void init_pstates(device_t dev, u32 nodeid, u32 coreid);
45 extern device_t get_node_pci(u32 nodeid, u32 fn);
49 void cpus_ready_for_init(void)
52 #if MEM_TRAIN_SEQ == 1
53 struct sys_info *sysinfox = (struct sys_info *)((CONFIG_LB_MEM_TOPK<<10) - DCACHE_RAM_GLOBAL_VAR_SIZE);
54 // wait for ap memory to trained
55 wait_all_core0_mem_trained(sysinfox);
60 #define MCI_STATUS 0x401
63 static inline msr_t rdmsr_amd(u32 index)
66 __asm__ __volatile__ (
68 : "=a" (result.lo), "=d" (result.hi)
69 : "c" (index), "D" (0x9c5a203a)
75 static inline void wrmsr_amd(u32 index, msr_t msr)
77 __asm__ __volatile__ (
80 : "c" (index), "a" (msr.lo), "d" (msr.hi), "D" (0x9c5a203a)
86 #define ZERO_CHUNK_KB 0x800UL /* 2M */
87 #define TOLM_KB 0x400000UL
96 struct mtrr mtrrs[MTRR_COUNT];
97 msr_t top_mem, top_mem2;
102 static void save_mtrr_state(struct mtrr_state *state)
105 for(i = 0; i < MTRR_COUNT; i++) {
106 state->mtrrs[i].base = rdmsr(MTRRphysBase_MSR(i));
107 state->mtrrs[i].mask = rdmsr(MTRRphysMask_MSR(i));
109 state->top_mem = rdmsr(TOP_MEM);
110 state->top_mem2 = rdmsr(TOP_MEM2);
111 state->def_type = rdmsr(MTRRdefType_MSR);
115 static void restore_mtrr_state(struct mtrr_state *state)
120 for(i = 0; i < MTRR_COUNT; i++) {
121 wrmsr(MTRRphysBase_MSR(i), state->mtrrs[i].base);
122 wrmsr(MTRRphysMask_MSR(i), state->mtrrs[i].mask);
124 wrmsr(TOP_MEM, state->top_mem);
125 wrmsr(TOP_MEM2, state->top_mem2);
126 wrmsr(MTRRdefType_MSR, state->def_type);
133 static void print_mtrr_state(struct mtrr_state *state)
136 for(i = 0; i < MTRR_COUNT; i++) {
137 printk_debug("var mtrr %d: %08x%08x mask: %08x%08x\n",
139 state->mtrrs[i].base.hi, state->mtrrs[i].base.lo,
140 state->mtrrs[i].mask.hi, state->mtrrs[i].mask.lo);
142 printk_debug("top_mem: %08x%08x\n",
143 state->top_mem.hi, state->top_mem.lo);
144 printk_debug("top_mem2: %08x%08x\n",
145 state->top_mem2.hi, state->top_mem2.lo);
146 printk_debug("def_type: %08x%08x\n",
147 state->def_type.hi, state->def_type.lo);
152 static void set_init_ecc_mtrrs(void)
158 /* First clear all of the msrs to be safe */
159 for(i = 0; i < MTRR_COUNT; i++) {
161 zero.lo = zero.hi = 0;
162 wrmsr(MTRRphysBase_MSR(i), zero);
163 wrmsr(MTRRphysMask_MSR(i), zero);
166 /* Write back cache the first 1MB */
168 msr.lo = 0x00000000 | MTRR_TYPE_WRBACK;
169 wrmsr(MTRRphysBase_MSR(0), msr);
171 msr.lo = ~((CONFIG_LB_MEM_TOPK << 10) - 1) | 0x800;
172 wrmsr(MTRRphysMask_MSR(0), msr);
174 /* Set the default type to write combining */
176 msr.lo = 0xc00 | MTRR_TYPE_WRCOMB;
177 wrmsr(MTRRdefType_MSR, msr);
179 /* Set TOP_MEM to 4G */
188 static inline void clear_2M_ram(unsigned long basek, struct mtrr_state *mtrr_state)
190 unsigned long limitk;
194 /* Report every 64M */
195 if ((basek % (64*1024)) == 0) {
197 /* Restore the normal state */
199 restore_mtrr_state(mtrr_state);
202 /* Print a status message */
203 printk_debug("%c", (basek >= TOLM_KB)?'+':'-');
205 /* Return to the initialization state */
206 set_init_ecc_mtrrs();
211 limitk = (basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1);
213 size = (limitk - basek) << 10;
214 addr = map_2M_page(basek >> 11);
215 if (addr == MAPPING_ERROR) {
216 printk_err("Cannot map page: %x\n", basek >> 11);
220 /* clear memory 2M (limitk - basek) */
221 addr = (void *)(((u32)addr) | ((basek & 0x7ff) << 10));
222 clear_memory(addr, size);
226 static void init_ecc_memory(u32 node_id)
228 unsigned long startk, begink, endk;
229 unsigned long hole_startk = 0;
231 struct mtrr_state mtrr_state;
233 device_t f1_dev, f2_dev, f3_dev;
234 int enable_scrubbing;
237 f1_dev = get_node_pci(node_id, 1);
240 die("Cannot find cpu function 1\n");
242 f2_dev = get_node_pci(node_id, 2);
244 die("Cannot find cpu function 2\n");
246 f3_dev = get_node_pci(node_id, 3);
248 die("Cannot find cpu function 3\n");
251 /* See if we scrubbing should be enabled */
252 enable_scrubbing = 1;
253 get_option(&enable_scrubbing, "hw_scrubber");
255 /* Enable cache scrubbing at the lowest possible rate */
256 if (enable_scrubbing) {
257 pci_write_config32(f3_dev, DRAM_SCRUB_RATE_CTRL,
258 (SCRUB_84ms << 16) | (SCRUB_84ms << 8) | (SCRUB_NONE << 0));
260 pci_write_config32(f3_dev, DRAM_SCRUB_RATE_CTRL,
261 (SCRUB_NONE << 16) | (SCRUB_NONE << 8) | (SCRUB_NONE << 0));
262 printk_debug("Scrubbing Disabled\n");
266 /* If ecc support is not enabled don't touch memory */
267 dcl = pci_read_config32(f2_dev, DRAM_CONFIG_LOW);
268 if (!(dcl & DCL_DimmEccEn)) {
269 printk_debug("ECC Disabled\n");
273 startk = (pci_read_config32(f1_dev, 0x40 + (node_id*8)) & 0xffff0000) >> 2;
274 endk = ((pci_read_config32(f1_dev, 0x44 + (node_id*8)) & 0xffff0000) >> 2) + 0x4000;
276 #if HW_MEM_HOLE_SIZEK != 0
278 val = pci_read_config32(f1_dev, 0xf0);
280 hole_startk = ((val & (0xff<<24)) >> 10);
285 /* Don't start too early */
287 if (begink < CONFIG_LB_MEM_TOPK) {
288 begink = CONFIG_LB_MEM_TOPK;
291 printk_debug("Clearing memory %uK - %uK: ", begink, endk);
293 /* Save the normal state */
294 save_mtrr_state(&mtrr_state);
296 /* Switch to the init ecc state */
297 set_init_ecc_mtrrs();
300 /* Walk through 2M chunks and zero them */
301 #if HW_MEM_HOLE_SIZEK != 0
302 /* here hole_startk can not be equal to begink, never. Also hole_startk is in 2M boundary, 64M? */
303 if ( (hole_startk != 0) && ((begink < hole_startk) && (endk>(4*1024*1024)))) {
304 for(basek = begink; basek < hole_startk;
305 basek = ((basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1)))
307 clear_2M_ram(basek, &mtrr_state);
309 for(basek = 4*1024*1024; basek < endk;
310 basek = ((basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1)))
312 clear_2M_ram(basek, &mtrr_state);
316 for(basek = begink; basek < endk;
317 basek = ((basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1))) {
318 clear_2M_ram(basek, &mtrr_state);
322 /* Restore the normal state */
324 restore_mtrr_state(&mtrr_state);
327 /* Set the scrub base address registers */
328 pci_write_config32(f3_dev, DRAM_SCRUB_ADDR_LOW, startk << 10);
329 pci_write_config32(f3_dev, DRAM_SCRUB_ADDR_HIGH, startk >> 22);
331 /* Enable the scrubber? */
332 if (enable_scrubbing) {
333 /* Enable scrubbing at the lowest possible rate */
334 pci_write_config32(f3_dev, DRAM_SCRUB_RATE_CTRL,
335 (SCRUB_84ms << 16) | (SCRUB_84ms << 8) | (SCRUB_84ms << 0));
338 printk_debug(" done\n");
342 static inline void fam10_errata(void)
345 /* FIXME: Is doing errata here too late? */
347 /* 298 : FIXME: Fixed in B3/C1 */
348 /* msr = rdmsr(0xC0010015);
350 wrmsr(0xC0010015, msr);
352 msr = rdmsr(0xC0010023);
354 wrmsr(0xC0010023, msr);
358 static void smash1Gpages(void)
362 /* 1G pages are smashed and installed in the TLB as 2M pages.
363 BIOS must set this bit for revision B. */
364 /* FIXME: What about RevC? */
366 msr = rdmsr(0xC001102A);
368 wrmsr(0xC001102A, msr);
372 extern void update_microcode(u32 cpu_deviceid);
375 void model_10xxx_init(device_t dev)
379 struct node_core_id id;
380 #if CONFIG_LOGICAL_CPUS == 1
384 /* Turn on caching if we haven't already */
390 /* Update the microcode */
391 update_microcode(dev->device);
395 /* zero the machine check error status registers */
398 for(i=0; i < 5; i++) {
399 wrmsr(MCI_STATUS + (i * 4),msr);
406 /* Enable the local cpu apics */
409 #if CONFIG_LOGICAL_CPUS == 1
410 siblings = cpuid_ecx(0x80000008) & 0xff;
413 msr = rdmsr_amd(CPU_ID_FEATURES_MSR);
415 wrmsr_amd(CPU_ID_FEATURES_MSR, msr);
417 msr = rdmsr_amd(LOGICAL_CPUS_NUM_MSR);
418 msr.lo = (siblings+1)<<16;
419 wrmsr_amd(LOGICAL_CPUS_NUM_MSR, msr);
421 msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR);
422 msr.hi |= 1<<(33-32);
423 wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr);
425 printk_debug("siblings = %02d, ", siblings);
428 id = get_node_core_id(read_nb_cfg_54()); // pre e0 nb_cfg_54 can not be set
430 printk_debug("nodeid = %02d, coreid = %02d\n", id.nodeid, id.coreid);
432 init_pstates(dev, id.nodeid, id.coreid); // is it a good place? some cores are clearing their ram
434 /* Is this a bad location? In particular can another node prefecth
435 * data from this node before we have initialized it?
437 if (id.coreid == 0) init_ecc_memory(id.nodeid); // only do it for core 0
439 #if CONFIG_LOGICAL_CPUS==1
440 /* Start up my cpu siblings */
441 // if(id.coreid==0) amd_sibling_init(dev); // Don't need core1 is already be put in the CPU BUS in bus_cpu_scan
447 static struct device_operations cpu_dev_ops = {
448 .init = model_10xxx_init,
450 static struct cpu_device_id cpu_table[] = {
452 { X86_VENDOR_AMD, 0x100f00 }, /* SH-F0 L1 */
453 { X86_VENDOR_AMD, 0x100f10 }, /* M2 */
454 { X86_VENDOR_AMD, 0x100f20 }, /* S1g1 */
455 { X86_VENDOR_AMD, 0x100f21 },
456 { X86_VENDOR_AMD, 0x100f2A },
457 { X86_VENDOR_AMD, 0x100f22 },
460 static struct cpu_driver model_10xxx __cpu_driver = {
462 .id_table = cpu_table,