2 * This file is part of the coreboot project.
4 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <cpu/x86/lapic.h>
23 #include <cpu/x86/mtrr.h>
24 #include <northbridge/amd/amdfam10/amdfam10.h>
25 #include <northbridge/amd/amdht/AsPsDefs.h>
26 #include <northbridge/amd/amdht/porting.h>
28 #include <cpu/x86/mtrr/earlymtrr.c>
29 #include <northbridge/amd/amdfam10/raminit_amdmct.c>
31 //it takes the CONFIG_ENABLE_APIC_EXT_ID and CONFIG_APIC_ID_OFFSET and CONFIG_LIFT_BSP_APIC_ID
36 #ifndef SET_FIDVID_CORE0_ONLY
37 /* MSR FIDVID_CTL and FIDVID_STATUS are shared by cores,
38 Need to do every AP to set common FID/VID */
39 #define SET_FIDVID_CORE0_ONLY 0
42 static void prep_fid_change(void);
43 static void init_fidvid_stage2(u32 apicid, u32 nodeid);
44 void cpuSetAMDMSR(void);
46 #if CONFIG_PCI_IO_CFG_EXT == 1
47 static void set_EnableCf8ExtCfg(void)
49 // set the NB_CFG[46]=1;
51 msr = rdmsr(NB_CFG_MSR);
52 // EnableCf8ExtCfg: We need that to access CONFIG_PCI_IO_CFG_EXT 4K range
53 msr.hi |= (1 << (46 - 32));
54 wrmsr(NB_CFG_MSR, msr);
57 static void set_EnableCf8ExtCfg(void) { }
61 #define _ULLx(x) x ## ULL
62 #define _ULL(x) _ULLx(x)
65 #define PCI_MMIO_BASE _ULL(CONFIG_MMCONF_BASE_ADDRESS)
67 static void set_pci_mmio_conf_reg(void)
69 #if CONFIG_MMCONF_SUPPORT
70 # if PCI_MMIO_BASE > 0xffffffff
71 # error CONFIG_MMCONF_BASE_ADDRESS must currently fit in 32 bits!
74 msr = rdmsr(0xc0010058);
75 msr.lo &= ~(0xfff00000 | (0xf << 2));
76 // 256 buses, one segment. Total 256M address space.
77 msr.lo |= (PCI_MMIO_BASE & 0xfff00000) | (8 << 2) | (1 << 0);
78 msr.hi &= ~(0x0000ffff);
79 msr.hi |= (PCI_MMIO_BASE >> (32));
81 wrmsr(0xc0010058, msr); // MMIO Config Base Address Reg
85 typedef void (*process_ap_t) (u32 apicid, void *gp);
87 //core_range = 0 : all cores
88 //core range = 1 : core 0 only
89 //core range = 2 : cores other than core0
91 static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
94 // here assume the OS don't change our apicid
103 u32 ApicIdCoreIdSize;
105 /* get_nodes define in ht_wrapper.c */
108 if (!CONFIG_LOGICAL_CPUS ||
109 read_option(CMOS_VSTART_multi_core, CMOS_VLEN_multi_core, 0) != 0) { // 0 means multi core
110 disable_siblings = 1;
112 disable_siblings = 0;
115 /* Assume that all node are same stepping, otherwise we can use use
116 nb_cfg_54 from bsp for all nodes */
117 nb_cfg_54 = read_nb_cfg_54();
119 ApicIdCoreIdSize = (cpuid_ecx(0x80000008) >> 12 & 0xf);
120 if (ApicIdCoreIdSize) {
121 siblings = ((1 << ApicIdCoreIdSize) - 1);
123 siblings = 3; //quad core
126 for (i = 0; i < nodes; i++) {
127 cores_found = get_core_num_in_bsp(i);
131 if (core_range == 2) {
137 if (disable_siblings || (core_range == 1)) {
143 for (j = jstart; j <= jend; j++) {
145 i * (nb_cfg_54 ? (siblings + 1) : 1) +
146 j * (nb_cfg_54 ? 1 : 64);
148 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
149 #if CONFIG_LIFT_BSP_APIC_ID == 0
150 if ((i != 0) || (j != 0)) /* except bsp */
152 ap_apicid += CONFIG_APIC_ID_OFFSET;
155 if (ap_apicid == bsp_apicid)
158 process_ap(ap_apicid, gp);
164 static inline int lapic_remote_read(int apicid, int reg, u32 *pvalue)
169 lapic_wait_icr_idle();
170 lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
171 lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
173 /* Extra busy check compared to lapic.h */
176 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
177 } while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
181 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
182 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
186 if (status == LAPIC_ICR_RR_VALID) {
187 *pvalue = lapic_read(LAPIC_RRR);
193 /* Use the LAPIC timer count register to hold each cores init status */
194 #define LAPIC_MSG_REG 0x380
197 static void init_fidvid_ap(u32 bsp_apicid, u32 apicid, u32 nodeid, u32 coreid);
200 static inline __attribute__ ((always_inline))
201 void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id,
205 "%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str,
206 apicid, id.nodeid, id.coreid);
209 static u32 wait_cpu_state(u32 apicid, u32 state)
215 if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0)
217 if ((readback & 0x3f) == state) {
219 break; //target cpu is in stage started
231 static void wait_ap_started(u32 ap_apicid, void *gp)
234 timeout = wait_cpu_state(ap_apicid, 0x13); // started
235 printk(BIOS_DEBUG, "* AP %02x", ap_apicid);
237 printk(BIOS_DEBUG, " timed out:%08x\n", timeout);
239 printk(BIOS_DEBUG, "started\n");
243 void wait_all_other_cores_started(u32 bsp_apicid)
245 // all aps other than core0
246 printk(BIOS_DEBUG, "started ap apicid: ");
247 for_each_ap(bsp_apicid, 2, wait_ap_started, (void *)0);
248 printk(BIOS_DEBUG, "\n");
251 void allow_all_aps_stop(u32 bsp_apicid)
253 /* Called by the BSP to indicate AP can stop */
255 /* FIXME Do APs use this? */
257 // allow aps to stop use 6 bits for state
258 lapic_write(LAPIC_MSG_REG, (bsp_apicid << 24) | 0x14);
261 static void enable_apic_ext_id(u32 node)
265 val = pci_read_config32(NODE_HT(node), 0x68);
266 val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
267 pci_write_config32(NODE_HT(node), 0x68, val);
270 static void STOP_CAR_AND_CPU(void)
274 /* Disable L2 IC to L3 connection (Only for CAR) */
275 msr = rdmsr(BU_CFG2);
276 msr.lo &= ~(1 << ClLinesToNbDis);
279 disable_cache_as_ram(); // inline
280 /* stop all cores except node0/core0 the bsp .... */
284 #if RAMINIT_SYSINFO == 1
285 static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo)
287 static u32 init_cpus(u32 cpu_init_detectedx)
292 struct node_core_id id;
295 * already set early mtrr in cache_as_ram.inc
298 /* enable access pci conf via mmio */
299 set_pci_mmio_conf_reg();
301 /* that is from initial apicid, we need nodeid and coreid
303 id = get_node_core_id_x();
305 /* NB_CFG MSR is shared between cores, so we need make sure
306 core0 is done at first --- use wait_all_core0_started */
307 if (id.coreid == 0) {
308 set_apicid_cpuid_lo(); /* only set it on core0 */
309 set_EnableCf8ExtCfg(); /* only set it on core0 */
310 #if (CONFIG_ENABLE_APIC_EXT_ID == 1)
311 enable_apic_ext_id(id.nodeid);
317 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
318 u32 initial_apicid = get_initial_apicid();
320 #if CONFIG_LIFT_BSP_APIC_ID == 0
321 if (initial_apicid != 0) // other than bsp
324 /* use initial apic id to lift it */
325 u32 dword = lapic_read(LAPIC_ID);
326 dword &= ~(0xff << 24);
328 (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
330 lapic_write(LAPIC_ID, dword);
332 #if CONFIG_LIFT_BSP_APIC_ID == 1
333 bsp_apicid += CONFIG_APIC_ID_OFFSET;
338 /* get the apicid, it may be lifted already */
341 // show our apicid, nodeid, and coreid
342 if (id.coreid == 0) {
343 if (id.nodeid != 0) //all core0 except bsp
344 print_apicid_nodeid_coreid(apicid, id, " core0: ");
345 } else { //all other cores
346 print_apicid_nodeid_coreid(apicid, id, " corex: ");
349 if (cpu_init_detectedx) {
350 print_apicid_nodeid_coreid(apicid, id,
351 "\n\n\nINIT detected from ");
352 printk(BIOS_DEBUG, "\nIssuing SOFT_RESET...\n");
356 if (id.coreid == 0) {
357 if (!(warm_reset_detect(id.nodeid))) //FIXME: INIT is checked above but check for more resets?
358 distinguish_cpu_resets(id.nodeid); // Also indicates we are started
360 // Mark the core as started.
361 lapic_write(LAPIC_MSG_REG, (apicid << 24) | 0x13);
363 if (apicid != bsp_apicid) {
364 /* Setup each AP's cores MSRs.
365 * This happens after HTinit.
366 * The BSP runs this code in it's own path.
368 update_microcode(cpuid_eax(1));
372 #if (CONFIG_LOGICAL_CPUS == 1) && (SET_FIDVID_CORE0_ONLY == 1)
373 // Run on all AP for proper FID/VID setup.
374 if (id.coreid == 0) // only need set fid for core0
377 // check warm(bios) reset to call stage2 otherwise do stage1
378 if (warm_reset_detect(id.nodeid)) {
380 "init_fidvid_stage2 apicid: %02x\n",
382 init_fidvid_stage2(apicid, id.nodeid);
385 "init_fidvid_ap(stage1) apicid: %02x\n",
387 init_fidvid_ap(bsp_apicid, apicid, id.nodeid,
393 /* AP is ready, configure MTRRs and go to sleep */
394 set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
399 "\nAP %02x should be halted but you are reading this....\n",
406 static u32 is_core0_started(u32 nodeid)
410 device = NODE_PCI(nodeid, 0);
411 htic = pci_read_config32(device, HT_INIT_CONTROL);
412 htic &= HTIC_ColdR_Detect;
416 void wait_all_core0_started(void)
418 /* When core0 is started, it will distingush_cpu_resets
419 * So wait for that to finish */
421 u32 nodes = get_nodes();
423 printk(BIOS_DEBUG, "core0 started: ");
424 for (i = 1; i < nodes; i++) { // skip bsp, because it is running on bsp
425 while (!is_core0_started(i)) {
427 printk(BIOS_DEBUG, " %02x", i);
429 printk(BIOS_DEBUG, "\n");
432 #if CONFIG_MAX_PHYSICAL_CPUS > 1
434 * void start_node(u32 node)
436 * start the core0 in node, so it can generate HT packet to feature code.
438 * This function starts the AP nodes core0s. wait_all_core0_started() in
439 * romstage.c waits for all the AP to be finished before continuing
442 static void start_node(u8 node)
446 /* Enable routing table */
447 printk(BIOS_DEBUG, "Start node %02x", node);
449 #if CONFIG_NORTHBRIDGE_AMD_AMDFAM10
450 /* For FAM10 support, we need to set Dram base/limit for the new node */
451 pci_write_config32(NODE_MP(node), 0x44, 0);
452 pci_write_config32(NODE_MP(node), 0x40, 3);
455 /* Allow APs to make requests (ROM fetch) */
456 val = pci_read_config32(NODE_HT(node), 0x6c);
458 pci_write_config32(NODE_HT(node), 0x6c, val);
460 printk(BIOS_DEBUG, " done.\n");
464 * static void setup_remote_node(u32 node)
466 * Copy the BSP Adress Map to each AP.
468 static void setup_remote_node(u8 node)
470 /* There registers can be used with F1x114_x Address Map at the
471 same time, So must set them even 32 node */
472 static const u16 pci_reg[] = {
473 /* DRAM Base/Limits Registers */
474 0x44, 0x4c, 0x54, 0x5c, 0x64, 0x6c, 0x74, 0x7c,
475 0x40, 0x48, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78,
476 0x144, 0x14c, 0x154, 0x15c, 0x164, 0x16c, 0x174, 0x17c,
477 0x140, 0x148, 0x150, 0x158, 0x160, 0x168, 0x170, 0x178,
478 /* MMIO Base/Limits Registers */
479 0x84, 0x8c, 0x94, 0x9c, 0xa4, 0xac, 0xb4, 0xbc,
480 0x80, 0x88, 0x90, 0x98, 0xa0, 0xa8, 0xb0, 0xb8,
481 /* IO Base/Limits Registers */
482 0xc4, 0xcc, 0xd4, 0xdc,
483 0xc0, 0xc8, 0xd0, 0xd8,
484 /* Configuration Map Registers */
485 0xe0, 0xe4, 0xe8, 0xec,
489 printk(BIOS_DEBUG, "setup_remote_node: %02x", node);
491 /* copy the default resource map from node 0 */
492 for (i = 0; i < ARRAY_SIZE(pci_reg); i++) {
496 value = pci_read_config32(NODE_MP(0), reg);
497 pci_write_config32(NODE_MP(node), reg, value);
500 printk(BIOS_DEBUG, " done\n");
502 #endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */
504 static void AMD_Errata281(u8 node, u32 revision, u32 platform)
506 /* Workaround for Transaction Scheduling Conflict in
507 * Northbridge Cross Bar. Implement XCS Token adjustment
508 * for ganged links. Also, perform fix up for the mixed
515 u8 nodes = get_nodes();
517 if (platform & AMD_PTYPE_SVR) {
518 /* For each node we need to check for a "broken" node */
519 if (!(revision & (AMD_DR_B0 | AMD_DR_B1))) {
520 for (i = 0; i < nodes; i++) {
521 if (mctGetLogicalCPUID(i) &
522 (AMD_DR_B0 | AMD_DR_B1)) {
529 if ((revision & (AMD_DR_B0 | AMD_DR_B1)) || mixed) {
531 /* F0X68[22:21] DsNpReqLmt0 = 01b */
532 val = pci_read_config32(NODE_PCI(node, 0), 0x68);
535 pci_write_config32(NODE_PCI(node, 0), 0x68, val);
538 val = pci_read_config32(NODE_PCI(node, 3), 0x6C);
541 pci_write_config32(NODE_PCI(node, 3), 0x6C, val);
544 val = pci_read_config32(NODE_PCI(node, 3), 0x7C);
547 pci_write_config32(NODE_PCI(node, 3), 0x7C, val);
549 /* F3X144[3:0] RspTok = 0001b */
550 val = pci_read_config32(NODE_PCI(node, 3), 0x144);
553 pci_write_config32(NODE_PCI(node, 3), 0x144, val);
555 for (i = 0; i < 3; i++) {
556 reg = 0x148 + (i * 4);
557 val = pci_read_config32(NODE_PCI(node, 3), reg);
560 pci_write_config32(NODE_PCI(node, 3), reg, val);
566 static void AMD_Errata298(void)
568 /* Workaround for L2 Eviction May Occur during operation to
569 * set Accessed or dirty bit.
575 u8 nodes = get_nodes();
577 /* For each core we need to check for a "broken" node */
578 for (i = 0; i < nodes; i++) {
579 if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_B2)) {
587 msr.lo |= 0x08; /* Set TlbCacheDis bit[3] */
591 msr.lo |= 0x02; /* Set TlbForceMemTypeUc bit[1] */
594 msr = rdmsr(OSVW_ID_Length);
595 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
596 wrmsr(OSVW_ID_Length, msr);
598 msr = rdmsr(OSVW_Status);
599 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
600 wrmsr(OSVW_Status, msr);
603 if (!affectedRev && (mctGetLogicalCPUID(0xFF) & AMD_DR_B3)) {
604 msr = rdmsr(OSVW_ID_Length);
605 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
606 wrmsr(OSVW_ID_Length, msr);
611 static u32 get_platform_type(void)
615 switch (SYSTEM_TYPE) {
617 ret |= AMD_PTYPE_DSK;
620 ret |= AMD_PTYPE_MOB;
623 ret |= AMD_PTYPE_SVR;
629 /* FIXME: add UMA support. */
631 /* All Fam10 are multi core */
637 static void AMD_SetupPSIVID_d(u32 platform_type, u8 node)
643 if (platform_type & (AMD_PTYPE_MOB | AMD_PTYPE_DSK)) {
645 /* The following code sets the PSIVID to the lowest support P state
646 * assuming that the VID for the lowest power state is below
647 * the VDD voltage regulator threshold. (This also assumes that there
648 * is a Pstate lower than P0)
651 for (i = 4; i >= 0; i--) {
652 msr = rdmsr(PS_REG_BASE + i);
654 if (msr.hi & PS_EN_MASK) {
655 dword = pci_read_config32(NODE_PCI(i, 3), 0xA0);
657 dword |= (msr.lo >> 9) & 0x7F;
658 pci_write_config32(NODE_PCI(i, 3), 0xA0, dword);
666 * AMD_CpuFindCapability - Traverse PCI capability list to find host HT links.
667 * HT Phy operations are not valid on links that aren't present, so this
668 * prevents invalid accesses.
670 * Returns the offset of the link register.
672 static BOOL AMD_CpuFindCapability(u8 node, u8 cap_count, u8 * offset)
677 /* get start of CPU HT Host Capabilities */
678 val = pci_read_config32(NODE_PCI(node, 0), 0x34);
679 val &= 0xFF; //reg offset of first link
683 /* Traverse through the capabilities. */
685 reg = pci_read_config32(NODE_PCI(node, 0), val);
686 /* Is the capability block a HyperTransport capability block? */
687 if ((reg & 0xFF) == 0x08) {
688 /* Is the HT capability block an HT Host Capability? */
689 if ((reg & 0xE0000000) == (1 << 29))
694 val = (reg >> 8) & 0xFF; //update reg offset
695 } while (cap_count && val);
699 /* If requested capability found val != 0 */
707 * AMD_checkLinkType - Compare desired link characteristics using a logical
710 * Returns the link characteristic mask.
712 static u32 AMD_checkLinkType(u8 node, u8 link, u8 regoff)
717 /* Check connect, init and coherency */
718 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x18);
722 linktype |= HTPHY_LINKTYPE_COHERENT;
725 linktype |= HTPHY_LINKTYPE_NONCOHERENT;
729 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x08);
731 if (((val >> 8) & 0x0F) > 6)
732 linktype |= HTPHY_LINKTYPE_HT3;
734 linktype |= HTPHY_LINKTYPE_HT1;
737 val = pci_read_config32(NODE_PCI(node, 0), (link << 2) + 0x170);
740 linktype |= HTPHY_LINKTYPE_GANGED;
742 linktype |= HTPHY_LINKTYPE_UNGANGED;
748 * AMD_SetHtPhyRegister - Use the HT link's HT Phy portal registers to update
749 * a phy setting for that link.
751 static void AMD_SetHtPhyRegister(u8 node, u8 link, u8 entry)
757 /* Determine this link's portal */
761 phyBase = ((u32) link << 3) | 0x180;
763 /* Get the portal control register's initial value
764 * and update it to access the desired phy register
766 phyReg = pci_read_config32(NODE_PCI(node, 4), phyBase);
768 if (fam10_htphy_default[entry].htreg > 0x1FF) {
769 phyReg &= ~HTPHY_DIRECT_OFFSET_MASK;
770 phyReg |= HTPHY_DIRECT_MAP;
772 phyReg &= ~HTPHY_OFFSET_MASK;
775 /* Now get the current phy register data
776 * LinkPhyDone = 0, LinkPhyWrite = 0 is a read
778 phyReg |= fam10_htphy_default[entry].htreg;
779 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
782 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
783 } while (!(val & HTPHY_IS_COMPLETE_MASK));
785 /* Now we have the phy register data, apply the change */
786 val = pci_read_config32(NODE_PCI(node, 4), phyBase + 4);
787 val &= ~fam10_htphy_default[entry].mask;
788 val |= fam10_htphy_default[entry].data;
789 pci_write_config32(NODE_PCI(node, 4), phyBase + 4, val);
791 /* write it through the portal to the phy
792 * LinkPhyDone = 0, LinkPhyWrite = 1 is a write
794 phyReg |= HTPHY_WRITE_CMD;
795 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
798 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
799 } while (!(val & HTPHY_IS_COMPLETE_MASK));
802 void cpuSetAMDMSR(void)
804 /* This routine loads the CPU with default settings in fam10_msr_default
805 * table . It must be run after Cache-As-RAM has been enabled, and
806 * Hypertransport initialization has taken place. Also note
807 * that it is run on the current processor only, and only for the current
812 u32 revision, platform;
814 printk(BIOS_DEBUG, "cpuSetAMDMSR ");
816 revision = mctGetLogicalCPUID(0xFF);
817 platform = get_platform_type();
819 for (i = 0; i < ARRAY_SIZE(fam10_msr_default); i++) {
820 if ((fam10_msr_default[i].revision & revision) &&
821 (fam10_msr_default[i].platform & platform)) {
822 msr = rdmsr(fam10_msr_default[i].msr);
823 msr.hi &= ~fam10_msr_default[i].mask_hi;
824 msr.hi |= fam10_msr_default[i].data_hi;
825 msr.lo &= ~fam10_msr_default[i].mask_lo;
826 msr.lo |= fam10_msr_default[i].data_lo;
827 wrmsr(fam10_msr_default[i].msr, msr);
832 printk(BIOS_DEBUG, " done\n");
835 static void cpuSetAMDPCI(u8 node)
837 /* This routine loads the CPU with default settings in fam10_pci_default
838 * table . It must be run after Cache-As-RAM has been enabled, and
839 * Hypertransport initialization has taken place. Also note
840 * that it is run for the first core on each node
843 u32 revision, platform;
847 printk(BIOS_DEBUG, "cpuSetAMDPCI %02d", node);
849 revision = mctGetLogicalCPUID(node);
850 platform = get_platform_type();
852 AMD_SetupPSIVID_d(platform, node); /* Set PSIVID offset which is not table driven */
854 for (i = 0; i < ARRAY_SIZE(fam10_pci_default); i++) {
855 if ((fam10_pci_default[i].revision & revision) &&
856 (fam10_pci_default[i].platform & platform)) {
857 val = pci_read_config32(NODE_PCI(node,
858 fam10_pci_default[i].
860 fam10_pci_default[i].offset);
861 val &= ~fam10_pci_default[i].mask;
862 val |= fam10_pci_default[i].data;
863 pci_write_config32(NODE_PCI(node,
864 fam10_pci_default[i].
866 fam10_pci_default[i].offset, val);
870 for (i = 0; i < ARRAY_SIZE(fam10_htphy_default); i++) {
871 if ((fam10_htphy_default[i].revision & revision) &&
872 (fam10_htphy_default[i].platform & platform)) {
873 /* HT Phy settings either apply to both sublinks or have
874 * separate registers for sublink zero and one, so there
875 * will be two table entries. So, here we only loop
876 * through the sublink zeros in function zero.
878 for (j = 0; j < 4; j++) {
879 if (AMD_CpuFindCapability(node, j, &offset)) {
880 if (AMD_checkLinkType(node, j, offset)
881 & fam10_htphy_default[i].linktype) {
882 AMD_SetHtPhyRegister(node, j,
886 /* No more capabilities,
895 /* FIXME: add UMA support and programXbarToSriReg(); */
897 AMD_Errata281(node, revision, platform);
899 /* FIXME: if the dct phy doesn't init correct it needs to reset.
900 if (revision & (AMD_DR_B2 | AMD_DR_B3))
903 printk(BIOS_DEBUG, " done\n");
907 static void cpuInitializeMCA(void)
909 /* Clears Machine Check Architecture (MCA) registers, which power on
910 * containing unknown data, on currently running processor.
911 * This routine should only be executed on initial power on (cold boot),
912 * not across a warm reset because valid data is present at that time.
919 if (cpuid_edx(1) & 0x4080) { /* MCE and MCA (edx[7] and edx[14]) */
920 msr = rdmsr(MCG_CAP);
921 if (msr.lo & MCG_CTL_P) { /* MCG_CTL_P bit is set? */
924 msr.lo <<= 2; /* multiply the count by 4 */
925 reg = MC0_STA + msr.lo;
927 for (i = 0; i < 4; i++) {
929 reg -= 4; /* Touch status regs for each bank */
937 * finalize_node_setup()
939 * Do any additional post HT init
942 static void finalize_node_setup(struct sys_info *sysinfo)
945 u8 nodes = get_nodes();
948 #if RAMINIT_SYSINFO == 1
949 /* read Node0 F0_0x64 bit [8:10] to find out SbLink # */
950 reg = pci_read_config32(NODE_HT(0), 0x64);
951 sysinfo->sblk = (reg >> 8) & 7;
953 sysinfo->nodes = nodes;
954 sysinfo->sbdn = get_sbdn(sysinfo->sbbusn);
957 for (i = 0; i < nodes; i++) {
962 // Prep each node for FID/VID setup.
966 #if CONFIG_MAX_PHYSICAL_CPUS > 1
967 /* Skip the BSP, start at node 1 */
968 for (i = 1; i < nodes; i++) {
969 setup_remote_node(i);