2 * This file is part of the coreboot project.
4 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <cpu/x86/lapic.h>
23 #include <cpu/x86/mtrr.h>
24 #include <northbridge/amd/amdfam10/amdfam10.h>
25 #include <northbridge/amd/amdht/AsPsDefs.h>
26 #include <northbridge/amd/amdht/porting.h>
28 #include <cpu/x86/mtrr/earlymtrr.c>
29 #include <northbridge/amd/amdfam10/raminit_amdmct.c>
31 //it takes the CONFIG_ENABLE_APIC_EXT_ID and CONFIG_APIC_ID_OFFSET and CONFIG_LIFT_BSP_APIC_ID
36 #ifndef SET_FIDVID_CORE0_ONLY
37 /* MSR FIDVID_CTL and FIDVID_STATUS are shared by cores,
38 Need to do every AP to set common FID/VID */
39 #define SET_FIDVID_CORE0_ONLY 0
42 static void prep_fid_change(void);
43 static void init_fidvid_stage2(u32 apicid, u32 nodeid);
44 void cpuSetAMDMSR(void);
46 #if CONFIG_PCI_IO_CFG_EXT == 1
47 static void set_EnableCf8ExtCfg(void)
49 // set the NB_CFG[46]=1;
51 msr = rdmsr(NB_CFG_MSR);
52 // EnableCf8ExtCfg: We need that to access CONFIG_PCI_IO_CFG_EXT 4K range
53 msr.hi |= (1 << (46 - 32));
54 wrmsr(NB_CFG_MSR, msr);
57 static void set_EnableCf8ExtCfg(void) { }
61 typedef void (*process_ap_t) (u32 apicid, void *gp);
63 //core_range = 0 : all cores
64 //core range = 1 : core 0 only
65 //core range = 2 : cores other than core0
67 static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
70 // here assume the OS don't change our apicid
81 /* get_nodes define in ht_wrapper.c */
84 if (!CONFIG_LOGICAL_CPUS ||
85 read_option(CMOS_VSTART_multi_core, CMOS_VLEN_multi_core, 0) != 0) { // 0 means multi core
91 /* Assume that all node are same stepping, otherwise we can use use
92 nb_cfg_54 from bsp for all nodes */
93 nb_cfg_54 = read_nb_cfg_54();
95 ApicIdCoreIdSize = (cpuid_ecx(0x80000008) >> 12 & 0xf);
96 if (ApicIdCoreIdSize) {
97 siblings = ((1 << ApicIdCoreIdSize) - 1);
99 siblings = 3; //quad core
102 for (i = 0; i < nodes; i++) {
103 cores_found = get_core_num_in_bsp(i);
107 if (core_range == 2) {
113 if (disable_siblings || (core_range == 1)) {
119 for (j = jstart; j <= jend; j++) {
121 i * (nb_cfg_54 ? (siblings + 1) : 1) +
122 j * (nb_cfg_54 ? 1 : 64);
124 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
125 #if CONFIG_LIFT_BSP_APIC_ID == 0
126 if ((i != 0) || (j != 0)) /* except bsp */
128 ap_apicid += CONFIG_APIC_ID_OFFSET;
131 if (ap_apicid == bsp_apicid)
134 process_ap(ap_apicid, gp);
140 static inline int lapic_remote_read(int apicid, int reg, u32 *pvalue)
145 lapic_wait_icr_idle();
146 lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
147 lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
149 /* Extra busy check compared to lapic.h */
152 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
153 } while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
157 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
158 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
162 if (status == LAPIC_ICR_RR_VALID) {
163 *pvalue = lapic_read(LAPIC_RRR);
169 /* Use the LAPIC timer count register to hold each cores init status */
170 #define LAPIC_MSG_REG 0x380
173 static void init_fidvid_ap(u32 bsp_apicid, u32 apicid, u32 nodeid, u32 coreid);
176 static inline __attribute__ ((always_inline))
177 void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id,
181 "%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str,
182 apicid, id.nodeid, id.coreid);
185 static u32 wait_cpu_state(u32 apicid, u32 state)
191 if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0)
193 if ((readback & 0x3f) == state) {
195 break; //target cpu is in stage started
207 static void wait_ap_started(u32 ap_apicid, void *gp)
210 timeout = wait_cpu_state(ap_apicid, 0x13); // started
211 printk(BIOS_DEBUG, "* AP %02x", ap_apicid);
213 printk(BIOS_DEBUG, " timed out:%08x\n", timeout);
215 printk(BIOS_DEBUG, "started\n");
219 void wait_all_other_cores_started(u32 bsp_apicid)
221 // all aps other than core0
222 printk(BIOS_DEBUG, "started ap apicid: ");
223 for_each_ap(bsp_apicid, 2, wait_ap_started, (void *)0);
224 printk(BIOS_DEBUG, "\n");
227 void allow_all_aps_stop(u32 bsp_apicid)
229 /* Called by the BSP to indicate AP can stop */
231 /* FIXME Do APs use this? */
233 // allow aps to stop use 6 bits for state
234 lapic_write(LAPIC_MSG_REG, (bsp_apicid << 24) | 0x14);
237 static void enable_apic_ext_id(u32 node)
241 val = pci_read_config32(NODE_HT(node), 0x68);
242 val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
243 pci_write_config32(NODE_HT(node), 0x68, val);
246 static void STOP_CAR_AND_CPU(void)
250 /* Disable L2 IC to L3 connection (Only for CAR) */
251 msr = rdmsr(BU_CFG2);
252 msr.lo &= ~(1 << ClLinesToNbDis);
255 disable_cache_as_ram(); // inline
256 /* stop all cores except node0/core0 the bsp .... */
260 #if CONFIG_RAMINIT_SYSINFO
261 static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo)
263 static u32 init_cpus(u32 cpu_init_detectedx)
268 struct node_core_id id;
271 * already set early mtrr in cache_as_ram.inc
274 /* that is from initial apicid, we need nodeid and coreid
276 id = get_node_core_id_x();
278 /* NB_CFG MSR is shared between cores, so we need make sure
279 core0 is done at first --- use wait_all_core0_started */
280 if (id.coreid == 0) {
281 set_apicid_cpuid_lo(); /* only set it on core0 */
282 set_EnableCf8ExtCfg(); /* only set it on core0 */
283 #if (CONFIG_ENABLE_APIC_EXT_ID == 1)
284 enable_apic_ext_id(id.nodeid);
290 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
291 u32 initial_apicid = get_initial_apicid();
293 #if CONFIG_LIFT_BSP_APIC_ID == 0
294 if (initial_apicid != 0) // other than bsp
297 /* use initial apic id to lift it */
298 u32 dword = lapic_read(LAPIC_ID);
299 dword &= ~(0xff << 24);
301 (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
303 lapic_write(LAPIC_ID, dword);
305 #if CONFIG_LIFT_BSP_APIC_ID == 1
306 bsp_apicid += CONFIG_APIC_ID_OFFSET;
311 /* get the apicid, it may be lifted already */
314 // show our apicid, nodeid, and coreid
315 if (id.coreid == 0) {
316 if (id.nodeid != 0) //all core0 except bsp
317 print_apicid_nodeid_coreid(apicid, id, " core0: ");
318 } else { //all other cores
319 print_apicid_nodeid_coreid(apicid, id, " corex: ");
322 if (cpu_init_detectedx) {
323 print_apicid_nodeid_coreid(apicid, id,
324 "\n\n\nINIT detected from ");
325 printk(BIOS_DEBUG, "\nIssuing SOFT_RESET...\n");
329 if (id.coreid == 0) {
330 if (!(warm_reset_detect(id.nodeid))) //FIXME: INIT is checked above but check for more resets?
331 distinguish_cpu_resets(id.nodeid); // Also indicates we are started
333 // Mark the core as started.
334 lapic_write(LAPIC_MSG_REG, (apicid << 24) | 0x13);
336 if (apicid != bsp_apicid) {
337 /* Setup each AP's cores MSRs.
338 * This happens after HTinit.
339 * The BSP runs this code in it's own path.
341 update_microcode(cpuid_eax(1));
345 #if (CONFIG_LOGICAL_CPUS == 1) && (SET_FIDVID_CORE0_ONLY == 1)
346 // Run on all AP for proper FID/VID setup.
347 if (id.coreid == 0) // only need set fid for core0
350 // check warm(bios) reset to call stage2 otherwise do stage1
351 if (warm_reset_detect(id.nodeid)) {
353 "init_fidvid_stage2 apicid: %02x\n",
355 init_fidvid_stage2(apicid, id.nodeid);
358 "init_fidvid_ap(stage1) apicid: %02x\n",
360 init_fidvid_ap(bsp_apicid, apicid, id.nodeid,
366 /* AP is ready, configure MTRRs and go to sleep */
367 set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
372 "\nAP %02x should be halted but you are reading this....\n",
379 static u32 is_core0_started(u32 nodeid)
383 device = NODE_PCI(nodeid, 0);
384 htic = pci_read_config32(device, HT_INIT_CONTROL);
385 htic &= HTIC_ColdR_Detect;
389 void wait_all_core0_started(void)
391 /* When core0 is started, it will distingush_cpu_resets
392 * So wait for that to finish */
394 u32 nodes = get_nodes();
396 printk(BIOS_DEBUG, "core0 started: ");
397 for (i = 1; i < nodes; i++) { // skip bsp, because it is running on bsp
398 while (!is_core0_started(i)) {
400 printk(BIOS_DEBUG, " %02x", i);
402 printk(BIOS_DEBUG, "\n");
405 #if CONFIG_MAX_PHYSICAL_CPUS > 1
407 * void start_node(u32 node)
409 * start the core0 in node, so it can generate HT packet to feature code.
411 * This function starts the AP nodes core0s. wait_all_core0_started() in
412 * romstage.c waits for all the AP to be finished before continuing
415 static void start_node(u8 node)
419 /* Enable routing table */
420 printk(BIOS_DEBUG, "Start node %02x", node);
422 #if CONFIG_NORTHBRIDGE_AMD_AMDFAM10
423 /* For FAM10 support, we need to set Dram base/limit for the new node */
424 pci_write_config32(NODE_MP(node), 0x44, 0);
425 pci_write_config32(NODE_MP(node), 0x40, 3);
428 /* Allow APs to make requests (ROM fetch) */
429 val = pci_read_config32(NODE_HT(node), 0x6c);
431 pci_write_config32(NODE_HT(node), 0x6c, val);
433 printk(BIOS_DEBUG, " done.\n");
437 * static void setup_remote_node(u32 node)
439 * Copy the BSP Adress Map to each AP.
441 static void setup_remote_node(u8 node)
443 /* There registers can be used with F1x114_x Address Map at the
444 same time, So must set them even 32 node */
445 static const u16 pci_reg[] = {
446 /* DRAM Base/Limits Registers */
447 0x44, 0x4c, 0x54, 0x5c, 0x64, 0x6c, 0x74, 0x7c,
448 0x40, 0x48, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78,
449 0x144, 0x14c, 0x154, 0x15c, 0x164, 0x16c, 0x174, 0x17c,
450 0x140, 0x148, 0x150, 0x158, 0x160, 0x168, 0x170, 0x178,
451 /* MMIO Base/Limits Registers */
452 0x84, 0x8c, 0x94, 0x9c, 0xa4, 0xac, 0xb4, 0xbc,
453 0x80, 0x88, 0x90, 0x98, 0xa0, 0xa8, 0xb0, 0xb8,
454 /* IO Base/Limits Registers */
455 0xc4, 0xcc, 0xd4, 0xdc,
456 0xc0, 0xc8, 0xd0, 0xd8,
457 /* Configuration Map Registers */
458 0xe0, 0xe4, 0xe8, 0xec,
462 printk(BIOS_DEBUG, "setup_remote_node: %02x", node);
464 /* copy the default resource map from node 0 */
465 for (i = 0; i < ARRAY_SIZE(pci_reg); i++) {
469 value = pci_read_config32(NODE_MP(0), reg);
470 pci_write_config32(NODE_MP(node), reg, value);
473 printk(BIOS_DEBUG, " done\n");
475 #endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */
477 static void AMD_Errata281(u8 node, u32 revision, u32 platform)
479 /* Workaround for Transaction Scheduling Conflict in
480 * Northbridge Cross Bar. Implement XCS Token adjustment
481 * for ganged links. Also, perform fix up for the mixed
488 u8 nodes = get_nodes();
490 if (platform & AMD_PTYPE_SVR) {
491 /* For each node we need to check for a "broken" node */
492 if (!(revision & (AMD_DR_B0 | AMD_DR_B1))) {
493 for (i = 0; i < nodes; i++) {
494 if (mctGetLogicalCPUID(i) &
495 (AMD_DR_B0 | AMD_DR_B1)) {
502 if ((revision & (AMD_DR_B0 | AMD_DR_B1)) || mixed) {
504 /* F0X68[22:21] DsNpReqLmt0 = 01b */
505 val = pci_read_config32(NODE_PCI(node, 0), 0x68);
508 pci_write_config32(NODE_PCI(node, 0), 0x68, val);
511 val = pci_read_config32(NODE_PCI(node, 3), 0x6C);
514 pci_write_config32(NODE_PCI(node, 3), 0x6C, val);
517 val = pci_read_config32(NODE_PCI(node, 3), 0x7C);
520 pci_write_config32(NODE_PCI(node, 3), 0x7C, val);
522 /* F3X144[3:0] RspTok = 0001b */
523 val = pci_read_config32(NODE_PCI(node, 3), 0x144);
526 pci_write_config32(NODE_PCI(node, 3), 0x144, val);
528 for (i = 0; i < 3; i++) {
529 reg = 0x148 + (i * 4);
530 val = pci_read_config32(NODE_PCI(node, 3), reg);
533 pci_write_config32(NODE_PCI(node, 3), reg, val);
539 static void AMD_Errata298(void)
541 /* Workaround for L2 Eviction May Occur during operation to
542 * set Accessed or dirty bit.
548 u8 nodes = get_nodes();
550 /* For each core we need to check for a "broken" node */
551 for (i = 0; i < nodes; i++) {
552 if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_B2)) {
560 msr.lo |= 0x08; /* Set TlbCacheDis bit[3] */
564 msr.lo |= 0x02; /* Set TlbForceMemTypeUc bit[1] */
567 msr = rdmsr(OSVW_ID_Length);
568 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
569 wrmsr(OSVW_ID_Length, msr);
571 msr = rdmsr(OSVW_Status);
572 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
573 wrmsr(OSVW_Status, msr);
576 if (!affectedRev && (mctGetLogicalCPUID(0xFF) & AMD_DR_B3)) {
577 msr = rdmsr(OSVW_ID_Length);
578 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
579 wrmsr(OSVW_ID_Length, msr);
584 static u32 get_platform_type(void)
588 switch (SYSTEM_TYPE) {
590 ret |= AMD_PTYPE_DSK;
593 ret |= AMD_PTYPE_MOB;
596 ret |= AMD_PTYPE_SVR;
602 /* FIXME: add UMA support. */
604 /* All Fam10 are multi core */
610 static void AMD_SetupPSIVID_d(u32 platform_type, u8 node)
616 if (platform_type & (AMD_PTYPE_MOB | AMD_PTYPE_DSK)) {
618 /* The following code sets the PSIVID to the lowest support P state
619 * assuming that the VID for the lowest power state is below
620 * the VDD voltage regulator threshold. (This also assumes that there
621 * is a Pstate lower than P0)
624 for (i = 4; i >= 0; i--) {
625 msr = rdmsr(PS_REG_BASE + i);
627 if (msr.hi & PS_EN_MASK) {
628 dword = pci_read_config32(NODE_PCI(i, 3), 0xA0);
630 dword |= (msr.lo >> 9) & 0x7F;
631 pci_write_config32(NODE_PCI(i, 3), 0xA0, dword);
639 * AMD_CpuFindCapability - Traverse PCI capability list to find host HT links.
640 * HT Phy operations are not valid on links that aren't present, so this
641 * prevents invalid accesses.
643 * Returns the offset of the link register.
645 static BOOL AMD_CpuFindCapability(u8 node, u8 cap_count, u8 * offset)
650 /* get start of CPU HT Host Capabilities */
651 val = pci_read_config32(NODE_PCI(node, 0), 0x34);
652 val &= 0xFF; //reg offset of first link
656 /* Traverse through the capabilities. */
658 reg = pci_read_config32(NODE_PCI(node, 0), val);
659 /* Is the capability block a HyperTransport capability block? */
660 if ((reg & 0xFF) == 0x08) {
661 /* Is the HT capability block an HT Host Capability? */
662 if ((reg & 0xE0000000) == (1 << 29))
667 val = (reg >> 8) & 0xFF; //update reg offset
668 } while (cap_count && val);
672 /* If requested capability found val != 0 */
680 * AMD_checkLinkType - Compare desired link characteristics using a logical
683 * Returns the link characteristic mask.
685 static u32 AMD_checkLinkType(u8 node, u8 link, u8 regoff)
690 /* Check connect, init and coherency */
691 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x18);
695 linktype |= HTPHY_LINKTYPE_COHERENT;
698 linktype |= HTPHY_LINKTYPE_NONCOHERENT;
702 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x08);
704 if (((val >> 8) & 0x0F) > 6)
705 linktype |= HTPHY_LINKTYPE_HT3;
707 linktype |= HTPHY_LINKTYPE_HT1;
710 val = pci_read_config32(NODE_PCI(node, 0), (link << 2) + 0x170);
713 linktype |= HTPHY_LINKTYPE_GANGED;
715 linktype |= HTPHY_LINKTYPE_UNGANGED;
721 * AMD_SetHtPhyRegister - Use the HT link's HT Phy portal registers to update
722 * a phy setting for that link.
724 static void AMD_SetHtPhyRegister(u8 node, u8 link, u8 entry)
730 /* Determine this link's portal */
734 phyBase = ((u32) link << 3) | 0x180;
736 /* Get the portal control register's initial value
737 * and update it to access the desired phy register
739 phyReg = pci_read_config32(NODE_PCI(node, 4), phyBase);
741 if (fam10_htphy_default[entry].htreg > 0x1FF) {
742 phyReg &= ~HTPHY_DIRECT_OFFSET_MASK;
743 phyReg |= HTPHY_DIRECT_MAP;
745 phyReg &= ~HTPHY_OFFSET_MASK;
748 /* Now get the current phy register data
749 * LinkPhyDone = 0, LinkPhyWrite = 0 is a read
751 phyReg |= fam10_htphy_default[entry].htreg;
752 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
755 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
756 } while (!(val & HTPHY_IS_COMPLETE_MASK));
758 /* Now we have the phy register data, apply the change */
759 val = pci_read_config32(NODE_PCI(node, 4), phyBase + 4);
760 val &= ~fam10_htphy_default[entry].mask;
761 val |= fam10_htphy_default[entry].data;
762 pci_write_config32(NODE_PCI(node, 4), phyBase + 4, val);
764 /* write it through the portal to the phy
765 * LinkPhyDone = 0, LinkPhyWrite = 1 is a write
767 phyReg |= HTPHY_WRITE_CMD;
768 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
771 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
772 } while (!(val & HTPHY_IS_COMPLETE_MASK));
775 void cpuSetAMDMSR(void)
777 /* This routine loads the CPU with default settings in fam10_msr_default
778 * table . It must be run after Cache-As-RAM has been enabled, and
779 * Hypertransport initialization has taken place. Also note
780 * that it is run on the current processor only, and only for the current
785 u32 revision, platform;
787 printk(BIOS_DEBUG, "cpuSetAMDMSR ");
789 revision = mctGetLogicalCPUID(0xFF);
790 platform = get_platform_type();
792 for (i = 0; i < ARRAY_SIZE(fam10_msr_default); i++) {
793 if ((fam10_msr_default[i].revision & revision) &&
794 (fam10_msr_default[i].platform & platform)) {
795 msr = rdmsr(fam10_msr_default[i].msr);
796 msr.hi &= ~fam10_msr_default[i].mask_hi;
797 msr.hi |= fam10_msr_default[i].data_hi;
798 msr.lo &= ~fam10_msr_default[i].mask_lo;
799 msr.lo |= fam10_msr_default[i].data_lo;
800 wrmsr(fam10_msr_default[i].msr, msr);
805 printk(BIOS_DEBUG, " done\n");
808 static void cpuSetAMDPCI(u8 node)
810 /* This routine loads the CPU with default settings in fam10_pci_default
811 * table . It must be run after Cache-As-RAM has been enabled, and
812 * Hypertransport initialization has taken place. Also note
813 * that it is run for the first core on each node
816 u32 revision, platform;
820 printk(BIOS_DEBUG, "cpuSetAMDPCI %02d", node);
822 revision = mctGetLogicalCPUID(node);
823 platform = get_platform_type();
825 AMD_SetupPSIVID_d(platform, node); /* Set PSIVID offset which is not table driven */
827 for (i = 0; i < ARRAY_SIZE(fam10_pci_default); i++) {
828 if ((fam10_pci_default[i].revision & revision) &&
829 (fam10_pci_default[i].platform & platform)) {
830 val = pci_read_config32(NODE_PCI(node,
831 fam10_pci_default[i].
833 fam10_pci_default[i].offset);
834 val &= ~fam10_pci_default[i].mask;
835 val |= fam10_pci_default[i].data;
836 pci_write_config32(NODE_PCI(node,
837 fam10_pci_default[i].
839 fam10_pci_default[i].offset, val);
843 for (i = 0; i < ARRAY_SIZE(fam10_htphy_default); i++) {
844 if ((fam10_htphy_default[i].revision & revision) &&
845 (fam10_htphy_default[i].platform & platform)) {
846 /* HT Phy settings either apply to both sublinks or have
847 * separate registers for sublink zero and one, so there
848 * will be two table entries. So, here we only loop
849 * through the sublink zeros in function zero.
851 for (j = 0; j < 4; j++) {
852 if (AMD_CpuFindCapability(node, j, &offset)) {
853 if (AMD_checkLinkType(node, j, offset)
854 & fam10_htphy_default[i].linktype) {
855 AMD_SetHtPhyRegister(node, j,
859 /* No more capabilities,
868 /* FIXME: add UMA support and programXbarToSriReg(); */
870 AMD_Errata281(node, revision, platform);
872 /* FIXME: if the dct phy doesn't init correct it needs to reset.
873 if (revision & (AMD_DR_B2 | AMD_DR_B3))
876 printk(BIOS_DEBUG, " done\n");
880 static void cpuInitializeMCA(void)
882 /* Clears Machine Check Architecture (MCA) registers, which power on
883 * containing unknown data, on currently running processor.
884 * This routine should only be executed on initial power on (cold boot),
885 * not across a warm reset because valid data is present at that time.
892 if (cpuid_edx(1) & 0x4080) { /* MCE and MCA (edx[7] and edx[14]) */
893 msr = rdmsr(MCG_CAP);
894 if (msr.lo & MCG_CTL_P) { /* MCG_CTL_P bit is set? */
897 msr.lo <<= 2; /* multiply the count by 4 */
898 reg = MC0_STA + msr.lo;
900 for (i = 0; i < 4; i++) {
902 reg -= 4; /* Touch status regs for each bank */
910 * finalize_node_setup()
912 * Do any additional post HT init
915 static void finalize_node_setup(struct sys_info *sysinfo)
918 u8 nodes = get_nodes();
921 #if CONFIG_RAMINIT_SYSINFO
922 /* read Node0 F0_0x64 bit [8:10] to find out SbLink # */
923 reg = pci_read_config32(NODE_HT(0), 0x64);
924 sysinfo->sblk = (reg >> 8) & 7;
926 sysinfo->nodes = nodes;
927 sysinfo->sbdn = get_sbdn(sysinfo->sbbusn);
930 for (i = 0; i < nodes; i++) {
935 // Prep each node for FID/VID setup.
939 #if CONFIG_MAX_PHYSICAL_CPUS > 1
940 /* Skip the BSP, start at node 1 */
941 for (i = 1; i < nodes; i++) {
942 setup_remote_node(i);