2 * This file is part of the coreboot project.
4 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <cpu/x86/lapic.h>
23 #include <cpu/x86/mtrr.h>
24 #include <northbridge/amd/amdfam10/amdfam10.h>
25 #include <northbridge/amd/amdht/AsPsDefs.h>
26 #include <northbridge/amd/amdht/porting.h>
28 #include <cpu/x86/mtrr/earlymtrr.c>
29 #include <northbridge/amd/amdfam10/raminit_amdmct.c>
31 //it takes the CONFIG_ENABLE_APIC_EXT_ID and CONFIG_APIC_ID_OFFSET and CONFIG_LIFT_BSP_APIC_ID
36 #ifndef SET_FIDVID_CORE0_ONLY
37 /* MSR FIDVID_CTL and FIDVID_STATUS are shared by cores,
38 Need to do every AP to set common FID/VID */
39 #define SET_FIDVID_CORE0_ONLY 0
42 static void prep_fid_change(void);
43 static void init_fidvid_stage2(u32 apicid, u32 nodeid);
44 void cpuSetAMDMSR(void);
46 #if CONFIG_PCI_IO_CFG_EXT == 1
47 static void set_EnableCf8ExtCfg(void)
49 // set the NB_CFG[46]=1;
51 msr = rdmsr(NB_CFG_MSR);
52 // EnableCf8ExtCfg: We need that to access CONFIG_PCI_IO_CFG_EXT 4K range
53 msr.hi |= (1 << (46 - 32));
54 wrmsr(NB_CFG_MSR, msr);
57 static void set_EnableCf8ExtCfg(void) { }
61 #define PCI_MMIO_BASE 0xfe000000
62 /* because we will use gs to store hi, so need to make sure lo can start
63 from 0, So PCI_MMIO_BASE & 0x00ffffff should be equal to 0*/
65 static void set_pci_mmio_conf_reg(void)
67 #if CONFIG_MMCONF_SUPPORT
69 msr = rdmsr(0xc0010058);
70 msr.lo &= ~(0xfff00000 | (0xf << 2));
71 // 256 bus per segment, MMIO reg will be 4G , enable MMIO Config space
72 msr.lo |= ((8 + CONFIG_PCI_BUS_SEGN_BITS) << 2) | (1 << 0);
73 msr.hi &= ~(0x0000ffff);
74 msr.hi |= (PCI_MMIO_BASE >> (32 - 8));
75 wrmsr(0xc0010058, msr); // MMIO Config Base Address Reg
77 //mtrr for that range?
78 // set_var_mtrr_x(7, PCI_MMIO_BASE<<8, PCI_MMIO_BASE>>(32-8), 0x00000000, 0x01, MTRR_TYPE_UNCACHEABLE);
82 msr.hi = (PCI_MMIO_BASE >> (32 - 8));
84 wrmsr(0xc0000101, msr); //GS_Base Reg
89 typedef void (*process_ap_t) (u32 apicid, void *gp);
91 //core_range = 0 : all cores
92 //core range = 1 : core 0 only
93 //core range = 2 : cores other than core0
95 static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
98 // here assume the OS don't change our apicid
103 u32 disable_siblings;
107 u32 ApicIdCoreIdSize;
109 /* get_nodes define in ht_wrapper.c */
112 if (!CONFIG_LOGICAL_CPUS ||
113 read_option(CMOS_VSTART_multi_core, CMOS_VLEN_multi_core, 0) != 0) { // 0 means multi core
114 disable_siblings = 1;
116 disable_siblings = 0;
119 /* Assume that all node are same stepping, otherwise we can use use
120 nb_cfg_54 from bsp for all nodes */
121 nb_cfg_54 = read_nb_cfg_54();
123 ApicIdCoreIdSize = (cpuid_ecx(0x80000008) >> 12 & 0xf);
124 if (ApicIdCoreIdSize) {
125 siblings = ((1 << ApicIdCoreIdSize) - 1);
127 siblings = 3; //quad core
130 for (i = 0; i < nodes; i++) {
131 cores_found = get_core_num_in_bsp(i);
135 if (core_range == 2) {
141 if (disable_siblings || (core_range == 1)) {
147 for (j = jstart; j <= jend; j++) {
149 i * (nb_cfg_54 ? (siblings + 1) : 1) +
150 j * (nb_cfg_54 ? 1 : 64);
152 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
153 #if CONFIG_LIFT_BSP_APIC_ID == 0
154 if ((i != 0) || (j != 0)) /* except bsp */
156 ap_apicid += CONFIG_APIC_ID_OFFSET;
159 if (ap_apicid == bsp_apicid)
162 process_ap(ap_apicid, gp);
168 static inline int lapic_remote_read(int apicid, int reg, u32 *pvalue)
173 lapic_wait_icr_idle();
174 lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
175 lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
177 /* Extra busy check compared to lapic.h */
180 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
181 } while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
185 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
186 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
190 if (status == LAPIC_ICR_RR_VALID) {
191 *pvalue = lapic_read(LAPIC_RRR);
197 /* Use the LAPIC timer count register to hold each cores init status */
198 #define LAPIC_MSG_REG 0x380
201 static void init_fidvid_ap(u32 bsp_apicid, u32 apicid, u32 nodeid, u32 coreid);
204 static inline __attribute__ ((always_inline))
205 void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id,
209 "%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str,
210 apicid, id.nodeid, id.coreid);
213 static u32 wait_cpu_state(u32 apicid, u32 state)
219 if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0)
221 if ((readback & 0x3f) == state) {
223 break; //target cpu is in stage started
235 static void wait_ap_started(u32 ap_apicid, void *gp)
238 timeout = wait_cpu_state(ap_apicid, 0x13); // started
239 printk(BIOS_DEBUG, "* AP %02x", ap_apicid);
241 printk(BIOS_DEBUG, " timed out:%08x\n", timeout);
243 printk(BIOS_DEBUG, "started\n");
247 void wait_all_other_cores_started(u32 bsp_apicid)
249 // all aps other than core0
250 printk(BIOS_DEBUG, "started ap apicid: ");
251 for_each_ap(bsp_apicid, 2, wait_ap_started, (void *)0);
252 printk(BIOS_DEBUG, "\n");
255 static void allow_all_aps_stop(u32 bsp_apicid)
257 /* Called by the BSP to indicate AP can stop */
259 /* FIXME Do APs use this?
260 Looks like wait_till_sysinfo_in_ram is used instead. */
262 // allow aps to stop use 6 bits for state
263 lapic_write(LAPIC_MSG_REG, (bsp_apicid << 24) | 0x14);
266 static void enable_apic_ext_id(u32 node)
270 val = pci_read_config32(NODE_HT(node), 0x68);
271 val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
272 pci_write_config32(NODE_HT(node), 0x68, val);
275 static void STOP_CAR_AND_CPU(void)
279 /* Disable L2 IC to L3 connection (Only for CAR) */
280 msr = rdmsr(BU_CFG2);
281 msr.lo &= ~(1 << ClLinesToNbDis);
284 disable_cache_as_ram(); // inline
285 /* stop all cores except node0/core0 the bsp .... */
289 #if RAMINIT_SYSINFO == 1
290 static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo)
292 static u32 init_cpus(u32 cpu_init_detectedx)
297 struct node_core_id id;
300 * already set early mtrr in cache_as_ram.inc
303 /* enable access pci conf via mmio */
304 set_pci_mmio_conf_reg();
306 /* that is from initial apicid, we need nodeid and coreid
308 id = get_node_core_id_x();
310 /* NB_CFG MSR is shared between cores, so we need make sure
311 core0 is done at first --- use wait_all_core0_started */
312 if (id.coreid == 0) {
313 set_apicid_cpuid_lo(); /* only set it on core0 */
314 set_EnableCf8ExtCfg(); /* only set it on core0 */
315 #if (CONFIG_ENABLE_APIC_EXT_ID == 1)
316 enable_apic_ext_id(id.nodeid);
322 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
323 u32 initial_apicid = get_initial_apicid();
325 #if CONFIG_LIFT_BSP_APIC_ID == 0
326 if (initial_apicid != 0) // other than bsp
329 /* use initial apic id to lift it */
330 u32 dword = lapic_read(LAPIC_ID);
331 dword &= ~(0xff << 24);
333 (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
335 lapic_write(LAPIC_ID, dword);
337 #if CONFIG_LIFT_BSP_APIC_ID == 1
338 bsp_apicid += CONFIG_APIC_ID_OFFSET;
343 /* get the apicid, it may be lifted already */
346 // show our apicid, nodeid, and coreid
347 if (id.coreid == 0) {
348 if (id.nodeid != 0) //all core0 except bsp
349 print_apicid_nodeid_coreid(apicid, id, " core0: ");
350 } else { //all other cores
351 print_apicid_nodeid_coreid(apicid, id, " corex: ");
354 if (cpu_init_detectedx) {
355 print_apicid_nodeid_coreid(apicid, id,
356 "\n\n\nINIT detected from ");
357 printk(BIOS_DEBUG, "\nIssuing SOFT_RESET...\n");
361 if (id.coreid == 0) {
362 if (!(warm_reset_detect(id.nodeid))) //FIXME: INIT is checked above but check for more resets?
363 distinguish_cpu_resets(id.nodeid); // Also indicates we are started
365 // Mark the core as started.
366 lapic_write(LAPIC_MSG_REG, (apicid << 24) | 0x13);
368 if (apicid != bsp_apicid) {
369 /* Setup each AP's cores MSRs.
370 * This happens after HTinit.
371 * The BSP runs this code in it's own path.
373 update_microcode(cpuid_eax(1));
377 #if (CONFIG_LOGICAL_CPUS == 1) && (SET_FIDVID_CORE0_ONLY == 1)
378 // Run on all AP for proper FID/VID setup.
379 if (id.coreid == 0) // only need set fid for core0
382 // check warm(bios) reset to call stage2 otherwise do stage1
383 if (warm_reset_detect(id.nodeid)) {
385 "init_fidvid_stage2 apicid: %02x\n",
387 init_fidvid_stage2(apicid, id.nodeid);
390 "init_fidvid_ap(stage1) apicid: %02x\n",
392 init_fidvid_ap(bsp_apicid, apicid, id.nodeid,
398 /* AP is ready, Wait for the BSP to get memory configured */
399 /* FIXME: many cores spinning on node0 pci register seems to be bad.
400 * Why do we need to wait? These APs are just going to go sit in a hlt.
402 //wait_till_sysinfo_in_ram();
404 set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
408 "\nAP %02x should be halted but you are reading this....\n",
415 static u32 is_core0_started(u32 nodeid)
419 device = NODE_PCI(nodeid, 0);
420 htic = pci_read_config32(device, HT_INIT_CONTROL);
421 htic &= HTIC_ColdR_Detect;
425 static void wait_all_core0_started(void)
427 /* When core0 is started, it will distingush_cpu_resets
428 * So wait for that to finish */
430 u32 nodes = get_nodes();
432 printk(BIOS_DEBUG, "core0 started: ");
433 for (i = 1; i < nodes; i++) { // skip bsp, because it is running on bsp
434 while (!is_core0_started(i)) {
436 printk(BIOS_DEBUG, " %02x", i);
438 printk(BIOS_DEBUG, "\n");
441 #if CONFIG_MAX_PHYSICAL_CPUS > 1
443 * void start_node(u32 node)
445 * start the core0 in node, so it can generate HT packet to feature code.
447 * This function starts the AP nodes core0s. wait_all_core0_started() in
448 * romstage.c waits for all the AP to be finished before continuing
451 static void start_node(u8 node)
455 /* Enable routing table */
456 printk(BIOS_DEBUG, "Start node %02x", node);
458 #if CONFIG_NORTHBRIDGE_AMD_AMDFAM10
459 /* For FAM10 support, we need to set Dram base/limit for the new node */
460 pci_write_config32(NODE_MP(node), 0x44, 0);
461 pci_write_config32(NODE_MP(node), 0x40, 3);
464 /* Allow APs to make requests (ROM fetch) */
465 val = pci_read_config32(NODE_HT(node), 0x6c);
467 pci_write_config32(NODE_HT(node), 0x6c, val);
469 printk(BIOS_DEBUG, " done.\n");
473 * static void setup_remote_node(u32 node)
475 * Copy the BSP Adress Map to each AP.
477 static void setup_remote_node(u8 node)
479 /* There registers can be used with F1x114_x Address Map at the
480 same time, So must set them even 32 node */
481 static const u16 pci_reg[] = {
482 /* DRAM Base/Limits Registers */
483 0x44, 0x4c, 0x54, 0x5c, 0x64, 0x6c, 0x74, 0x7c,
484 0x40, 0x48, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78,
485 0x144, 0x14c, 0x154, 0x15c, 0x164, 0x16c, 0x174, 0x17c,
486 0x140, 0x148, 0x150, 0x158, 0x160, 0x168, 0x170, 0x178,
487 /* MMIO Base/Limits Registers */
488 0x84, 0x8c, 0x94, 0x9c, 0xa4, 0xac, 0xb4, 0xbc,
489 0x80, 0x88, 0x90, 0x98, 0xa0, 0xa8, 0xb0, 0xb8,
490 /* IO Base/Limits Registers */
491 0xc4, 0xcc, 0xd4, 0xdc,
492 0xc0, 0xc8, 0xd0, 0xd8,
493 /* Configuration Map Registers */
494 0xe0, 0xe4, 0xe8, 0xec,
498 printk(BIOS_DEBUG, "setup_remote_node: %02x", node);
500 /* copy the default resource map from node 0 */
501 for (i = 0; i < ARRAY_SIZE(pci_reg); i++) {
505 value = pci_read_config32(NODE_MP(0), reg);
506 pci_write_config32(NODE_MP(node), reg, value);
509 printk(BIOS_DEBUG, " done\n");
511 #endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */
513 static void AMD_Errata281(u8 node, u32 revision, u32 platform)
515 /* Workaround for Transaction Scheduling Conflict in
516 * Northbridge Cross Bar. Implement XCS Token adjustment
517 * for ganged links. Also, perform fix up for the mixed
524 u8 nodes = get_nodes();
526 if (platform & AMD_PTYPE_SVR) {
527 /* For each node we need to check for a "broken" node */
528 if (!(revision & (AMD_DR_B0 | AMD_DR_B1))) {
529 for (i = 0; i < nodes; i++) {
530 if (mctGetLogicalCPUID(i) &
531 (AMD_DR_B0 | AMD_DR_B1)) {
538 if ((revision & (AMD_DR_B0 | AMD_DR_B1)) || mixed) {
540 /* F0X68[22:21] DsNpReqLmt0 = 01b */
541 val = pci_read_config32(NODE_PCI(node, 0), 0x68);
544 pci_write_config32(NODE_PCI(node, 0), 0x68, val);
547 val = pci_read_config32(NODE_PCI(node, 3), 0x6C);
550 pci_write_config32(NODE_PCI(node, 3), 0x6C, val);
553 val = pci_read_config32(NODE_PCI(node, 3), 0x7C);
556 pci_write_config32(NODE_PCI(node, 3), 0x7C, val);
558 /* F3X144[3:0] RspTok = 0001b */
559 val = pci_read_config32(NODE_PCI(node, 3), 0x144);
562 pci_write_config32(NODE_PCI(node, 3), 0x144, val);
564 for (i = 0; i < 3; i++) {
565 reg = 0x148 + (i * 4);
566 val = pci_read_config32(NODE_PCI(node, 3), reg);
569 pci_write_config32(NODE_PCI(node, 3), reg, val);
575 static void AMD_Errata298(void)
577 /* Workaround for L2 Eviction May Occur during operation to
578 * set Accessed or dirty bit.
584 u8 nodes = get_nodes();
586 /* For each core we need to check for a "broken" node */
587 for (i = 0; i < nodes; i++) {
588 if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_B2)) {
596 msr.lo |= 0x08; /* Set TlbCacheDis bit[3] */
600 msr.lo |= 0x02; /* Set TlbForceMemTypeUc bit[1] */
603 msr = rdmsr(OSVW_ID_Length);
604 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
605 wrmsr(OSVW_ID_Length, msr);
607 msr = rdmsr(OSVW_Status);
608 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
609 wrmsr(OSVW_Status, msr);
612 if (!affectedRev && (mctGetLogicalCPUID(0xFF) & AMD_DR_B3)) {
613 msr = rdmsr(OSVW_ID_Length);
614 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
615 wrmsr(OSVW_ID_Length, msr);
620 static u32 get_platform_type(void)
624 switch (SYSTEM_TYPE) {
626 ret |= AMD_PTYPE_DSK;
629 ret |= AMD_PTYPE_MOB;
632 ret |= AMD_PTYPE_SVR;
638 /* FIXME: add UMA support. */
640 /* All Fam10 are multi core */
646 static void AMD_SetupPSIVID_d(u32 platform_type, u8 node)
652 if (platform_type & (AMD_PTYPE_MOB | AMD_PTYPE_DSK)) {
654 /* The following code sets the PSIVID to the lowest support P state
655 * assuming that the VID for the lowest power state is below
656 * the VDD voltage regulator threshold. (This also assumes that there
657 * is a Pstate lower than P0)
660 for (i = 4; i >= 0; i--) {
661 msr = rdmsr(PS_REG_BASE + i);
663 if (msr.hi & PS_EN_MASK) {
664 dword = pci_read_config32(NODE_PCI(i, 3), 0xA0);
666 dword |= (msr.lo >> 9) & 0x7F;
667 pci_write_config32(NODE_PCI(i, 3), 0xA0, dword);
675 * AMD_CpuFindCapability - Traverse PCI capability list to find host HT links.
676 * HT Phy operations are not valid on links that aren't present, so this
677 * prevents invalid accesses.
679 * Returns the offset of the link register.
681 static BOOL AMD_CpuFindCapability(u8 node, u8 cap_count, u8 * offset)
686 /* get start of CPU HT Host Capabilities */
687 val = pci_read_config32(NODE_PCI(node, 0), 0x34);
688 val &= 0xFF; //reg offset of first link
692 /* Traverse through the capabilities. */
694 reg = pci_read_config32(NODE_PCI(node, 0), val);
695 /* Is the capability block a HyperTransport capability block? */
696 if ((reg & 0xFF) == 0x08) {
697 /* Is the HT capability block an HT Host Capability? */
698 if ((reg & 0xE0000000) == (1 << 29))
703 val = (reg >> 8) & 0xFF; //update reg offset
704 } while (cap_count && val);
708 /* If requested capability found val != 0 */
716 * AMD_checkLinkType - Compare desired link characteristics using a logical
719 * Returns the link characteristic mask.
721 static u32 AMD_checkLinkType(u8 node, u8 link, u8 regoff)
726 /* Check connect, init and coherency */
727 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x18);
731 linktype |= HTPHY_LINKTYPE_COHERENT;
734 linktype |= HTPHY_LINKTYPE_NONCOHERENT;
738 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x08);
740 if (((val >> 8) & 0x0F) > 6)
741 linktype |= HTPHY_LINKTYPE_HT3;
743 linktype |= HTPHY_LINKTYPE_HT1;
746 val = pci_read_config32(NODE_PCI(node, 0), (link << 2) + 0x170);
749 linktype |= HTPHY_LINKTYPE_GANGED;
751 linktype |= HTPHY_LINKTYPE_UNGANGED;
757 * AMD_SetHtPhyRegister - Use the HT link's HT Phy portal registers to update
758 * a phy setting for that link.
760 static void AMD_SetHtPhyRegister(u8 node, u8 link, u8 entry)
766 /* Determine this link's portal */
770 phyBase = ((u32) link << 3) | 0x180;
772 /* Get the portal control register's initial value
773 * and update it to access the desired phy register
775 phyReg = pci_read_config32(NODE_PCI(node, 4), phyBase);
777 if (fam10_htphy_default[entry].htreg > 0x1FF) {
778 phyReg &= ~HTPHY_DIRECT_OFFSET_MASK;
779 phyReg |= HTPHY_DIRECT_MAP;
781 phyReg &= ~HTPHY_OFFSET_MASK;
784 /* Now get the current phy register data
785 * LinkPhyDone = 0, LinkPhyWrite = 0 is a read
787 phyReg |= fam10_htphy_default[entry].htreg;
788 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
791 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
792 } while (!(val & HTPHY_IS_COMPLETE_MASK));
794 /* Now we have the phy register data, apply the change */
795 val = pci_read_config32(NODE_PCI(node, 4), phyBase + 4);
796 val &= ~fam10_htphy_default[entry].mask;
797 val |= fam10_htphy_default[entry].data;
798 pci_write_config32(NODE_PCI(node, 4), phyBase + 4, val);
800 /* write it through the portal to the phy
801 * LinkPhyDone = 0, LinkPhyWrite = 1 is a write
803 phyReg |= HTPHY_WRITE_CMD;
804 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
807 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
808 } while (!(val & HTPHY_IS_COMPLETE_MASK));
811 void cpuSetAMDMSR(void)
813 /* This routine loads the CPU with default settings in fam10_msr_default
814 * table . It must be run after Cache-As-RAM has been enabled, and
815 * Hypertransport initialization has taken place. Also note
816 * that it is run on the current processor only, and only for the current
821 u32 revision, platform;
823 printk(BIOS_DEBUG, "cpuSetAMDMSR ");
825 revision = mctGetLogicalCPUID(0xFF);
826 platform = get_platform_type();
828 for (i = 0; i < ARRAY_SIZE(fam10_msr_default); i++) {
829 if ((fam10_msr_default[i].revision & revision) &&
830 (fam10_msr_default[i].platform & platform)) {
831 msr = rdmsr(fam10_msr_default[i].msr);
832 msr.hi &= ~fam10_msr_default[i].mask_hi;
833 msr.hi |= fam10_msr_default[i].data_hi;
834 msr.lo &= ~fam10_msr_default[i].mask_lo;
835 msr.lo |= fam10_msr_default[i].data_lo;
836 wrmsr(fam10_msr_default[i].msr, msr);
841 printk(BIOS_DEBUG, " done\n");
844 static void cpuSetAMDPCI(u8 node)
846 /* This routine loads the CPU with default settings in fam10_pci_default
847 * table . It must be run after Cache-As-RAM has been enabled, and
848 * Hypertransport initialization has taken place. Also note
849 * that it is run for the first core on each node
852 u32 revision, platform;
856 printk(BIOS_DEBUG, "cpuSetAMDPCI %02d", node);
858 revision = mctGetLogicalCPUID(node);
859 platform = get_platform_type();
861 AMD_SetupPSIVID_d(platform, node); /* Set PSIVID offset which is not table driven */
863 for (i = 0; i < ARRAY_SIZE(fam10_pci_default); i++) {
864 if ((fam10_pci_default[i].revision & revision) &&
865 (fam10_pci_default[i].platform & platform)) {
866 val = pci_read_config32(NODE_PCI(node,
867 fam10_pci_default[i].
869 fam10_pci_default[i].offset);
870 val &= ~fam10_pci_default[i].mask;
871 val |= fam10_pci_default[i].data;
872 pci_write_config32(NODE_PCI(node,
873 fam10_pci_default[i].
875 fam10_pci_default[i].offset, val);
879 for (i = 0; i < ARRAY_SIZE(fam10_htphy_default); i++) {
880 if ((fam10_htphy_default[i].revision & revision) &&
881 (fam10_htphy_default[i].platform & platform)) {
882 /* HT Phy settings either apply to both sublinks or have
883 * separate registers for sublink zero and one, so there
884 * will be two table entries. So, here we only loop
885 cd t * through the sublink zeros in function zero.
887 for (j = 0; j < 4; j++) {
888 if (AMD_CpuFindCapability(node, j, &offset)) {
889 if (AMD_checkLinkType(node, j, offset)
890 & fam10_htphy_default[i].linktype) {
891 AMD_SetHtPhyRegister(node, j,
895 /* No more capabilities,
904 /* FIXME: add UMA support and programXbarToSriReg(); */
906 AMD_Errata281(node, revision, platform);
908 /* FIXME: if the dct phy doesn't init correct it needs to reset.
909 if (revision & (AMD_DR_B2 | AMD_DR_B3))
912 printk(BIOS_DEBUG, " done\n");
915 static void cpuInitializeMCA(void)
917 /* Clears Machine Check Architecture (MCA) registers, which power on
918 * containing unknown data, on currently running processor.
919 * This routine should only be executed on initial power on (cold boot),
920 * not across a warm reset because valid data is present at that time.
927 if (cpuid_edx(1) & 0x4080) { /* MCE and MCA (edx[7] and edx[14]) */
928 msr = rdmsr(MCG_CAP);
929 if (msr.lo & MCG_CTL_P) { /* MCG_CTL_P bit is set? */
932 msr.lo <<= 2; /* multiply the count by 4 */
933 reg = MC0_STA + msr.lo;
935 for (i = 0; i < 4; i++) {
937 reg -= 4; /* Touch status regs for each bank */
944 * finalize_node_setup()
946 * Do any additional post HT init
949 static void finalize_node_setup(struct sys_info *sysinfo)
952 u8 nodes = get_nodes();
955 #if RAMINIT_SYSINFO == 1
956 /* read Node0 F0_0x64 bit [8:10] to find out SbLink # */
957 reg = pci_read_config32(NODE_HT(0), 0x64);
958 sysinfo->sblk = (reg >> 8) & 7;
960 sysinfo->nodes = nodes;
961 sysinfo->sbdn = get_sbdn(sysinfo->sbbusn);
964 for (i = 0; i < nodes; i++) {
969 // Prep each node for FID/VID setup.
973 #if CONFIG_MAX_PHYSICAL_CPUS > 1
974 /* Skip the BSP, start at node 1 */
975 for (i = 1; i < nodes; i++) {
976 setup_remote_node(i);