2 * This file is part of the coreboot project.
4 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 //it takes the CONFIG_ENABLE_APIC_EXT_ID and CONFIG_APIC_ID_OFFSET and CONFIG_LIFT_BSP_APIC_ID
27 #ifndef SET_FIDVID_CORE0_ONLY
28 /* MSR FIDVID_CTL and FIDVID_STATUS are shared by cores,
29 Need to do every AP to set common FID/VID */
30 #define SET_FIDVID_CORE0_ONLY 0
33 void update_microcode(u32 cpu_deviceid);
34 static void prep_fid_change(void);
35 static void init_fidvid_stage2(u32 apicid, u32 nodeid);
36 void cpuSetAMDMSR(void);
38 #if CONFIG_PCI_IO_CFG_EXT == 1
39 static void set_EnableCf8ExtCfg(void)
41 // set the NB_CFG[46]=1;
43 msr = rdmsr(NB_CFG_MSR);
44 // EnableCf8ExtCfg: We need that to access CONFIG_PCI_IO_CFG_EXT 4K range
45 msr.hi |= (1 << (46 - 32));
46 wrmsr(NB_CFG_MSR, msr);
49 static void set_EnableCf8ExtCfg(void) { }
53 #define PCI_MMIO_BASE 0xfe000000
54 /* because we will use gs to store hi, so need to make sure lo can start
55 from 0, So PCI_MMIO_BASE & 0x00ffffff should be equal to 0*/
57 static void set_pci_mmio_conf_reg(void)
59 #if CONFIG_MMCONF_SUPPORT
61 msr = rdmsr(0xc0010058);
62 msr.lo &= ~(0xfff00000 | (0xf << 2));
63 // 256 bus per segment, MMIO reg will be 4G , enable MMIO Config space
64 msr.lo |= ((8 + CONFIG_PCI_BUS_SEGN_BITS) << 2) | (1 << 0);
65 msr.hi &= ~(0x0000ffff);
66 msr.hi |= (PCI_MMIO_BASE >> (32 - 8));
67 wrmsr(0xc0010058, msr); // MMIO Config Base Address Reg
69 //mtrr for that range?
70 // set_var_mtrr_x(7, PCI_MMIO_BASE<<8, PCI_MMIO_BASE>>(32-8), 0x00000000, 0x01, MTRR_TYPE_UNCACHEABLE);
74 msr.hi = (PCI_MMIO_BASE >> (32 - 8));
76 wrmsr(0xc0000101, msr); //GS_Base Reg
81 typedef void (*process_ap_t) (u32 apicid, void *gp);
83 //core_range = 0 : all cores
84 //core range = 1 : core 0 only
85 //core range = 2 : cores other than core0
87 static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
90 // here assume the OS don't change our apicid
101 /* get_nodes define in ht_wrapper.c */
104 disable_siblings = !CONFIG_LOGICAL_CPUS;
106 #if CONFIG_LOGICAL_CPUS == 1 && CONFIG_HAVE_OPTION_TABLE == 1
107 if (read_option(CMOS_VSTART_multi_core, CMOS_VLEN_multi_core, 0) != 0) { // 0 mean multi core
108 disable_siblings = 1;
112 /* Assume that all node are same stepping, otherwise we can use use
113 nb_cfg_54 from bsp for all nodes */
114 nb_cfg_54 = read_nb_cfg_54();
116 ApicIdCoreIdSize = (cpuid_ecx(0x80000008) >> 12 & 0xf);
117 if (ApicIdCoreIdSize) {
118 siblings = ((1 << ApicIdCoreIdSize) - 1);
120 siblings = 3; //quad core
123 for (i = 0; i < nodes; i++) {
124 cores_found = get_core_num_in_bsp(i);
128 if (core_range == 2) {
134 if (disable_siblings || (core_range == 1)) {
140 for (j = jstart; j <= jend; j++) {
142 i * (nb_cfg_54 ? (siblings + 1) : 1) +
143 j * (nb_cfg_54 ? 1 : 64);
145 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
146 #if CONFIG_LIFT_BSP_APIC_ID == 0
147 if ((i != 0) || (j != 0)) /* except bsp */
149 ap_apicid += CONFIG_APIC_ID_OFFSET;
152 if (ap_apicid == bsp_apicid)
155 process_ap(ap_apicid, gp);
161 static inline int lapic_remote_read(int apicid, int reg, u32 *pvalue)
166 lapic_wait_icr_idle();
167 lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
168 lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
170 /* Extra busy check compared to lapic.h */
173 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
174 } while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
178 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
179 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
183 if (status == LAPIC_ICR_RR_VALID) {
184 *pvalue = lapic_read(LAPIC_RRR);
190 /* Use the LAPIC timer count register to hold each cores init status */
191 #define LAPIC_MSG_REG 0x380
194 static void init_fidvid_ap(u32 bsp_apicid, u32 apicid, u32 nodeid, u32 coreid);
197 static inline __attribute__ ((always_inline))
198 void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id,
202 "%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str,
203 apicid, id.nodeid, id.coreid);
206 static u32 wait_cpu_state(u32 apicid, u32 state)
212 if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0)
214 if ((readback & 0x3f) == state) {
216 break; //target cpu is in stage started
228 static void wait_ap_started(u32 ap_apicid, void *gp)
231 timeout = wait_cpu_state(ap_apicid, 0x13); // started
232 printk(BIOS_DEBUG, "* AP %02x", ap_apicid);
234 printk(BIOS_DEBUG, " timed out:%08x\n", timeout);
236 printk(BIOS_DEBUG, "started\n");
240 static void wait_all_other_cores_started(u32 bsp_apicid)
242 // all aps other than core0
243 printk(BIOS_DEBUG, "started ap apicid: ");
244 for_each_ap(bsp_apicid, 2, wait_ap_started, (void *)0);
245 printk(BIOS_DEBUG, "\n");
248 static void allow_all_aps_stop(u32 bsp_apicid)
250 /* Called by the BSP to indicate AP can stop */
252 /* FIXME Do APs use this?
253 Looks like wait_till_sysinfo_in_ram is used instead. */
255 // allow aps to stop use 6 bits for state
256 lapic_write(LAPIC_MSG_REG, (bsp_apicid << 24) | 0x14);
259 static void enable_apic_ext_id(u32 node)
263 val = pci_read_config32(NODE_HT(node), 0x68);
264 val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
265 pci_write_config32(NODE_HT(node), 0x68, val);
268 static void STOP_CAR_AND_CPU(void)
272 /* Disable L2 IC to L3 connection (Only for CAR) */
273 msr = rdmsr(BU_CFG2);
274 msr.lo &= ~(1 << ClLinesToNbDis);
277 disable_cache_as_ram(); // inline
278 /* stop all cores except node0/core0 the bsp .... */
282 #if RAMINIT_SYSINFO == 1
283 static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo)
285 static u32 init_cpus(u32 cpu_init_detectedx)
290 struct node_core_id id;
293 * already set early mtrr in cache_as_ram.inc
296 /* enable access pci conf via mmio */
297 set_pci_mmio_conf_reg();
299 /* that is from initial apicid, we need nodeid and coreid
301 id = get_node_core_id_x();
303 /* NB_CFG MSR is shared between cores, so we need make sure
304 core0 is done at first --- use wait_all_core0_started */
305 if (id.coreid == 0) {
306 set_apicid_cpuid_lo(); /* only set it on core0 */
307 set_EnableCf8ExtCfg(); /* only set it on core0 */
308 #if (CONFIG_ENABLE_APIC_EXT_ID == 1)
309 enable_apic_ext_id(id.nodeid);
315 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
316 u32 initial_apicid = get_initial_apicid();
318 #if CONFIG_LIFT_BSP_APIC_ID == 0
319 if (initial_apicid != 0) // other than bsp
322 /* use initial apic id to lift it */
323 u32 dword = lapic_read(LAPIC_ID);
324 dword &= ~(0xff << 24);
326 (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
328 lapic_write(LAPIC_ID, dword);
330 #if CONFIG_LIFT_BSP_APIC_ID == 1
331 bsp_apicid += CONFIG_APIC_ID_OFFSET;
336 /* get the apicid, it may be lifted already */
339 // show our apicid, nodeid, and coreid
340 if (id.coreid == 0) {
341 if (id.nodeid != 0) //all core0 except bsp
342 print_apicid_nodeid_coreid(apicid, id, " core0: ");
343 } else { //all other cores
344 print_apicid_nodeid_coreid(apicid, id, " corex: ");
347 if (cpu_init_detectedx) {
348 print_apicid_nodeid_coreid(apicid, id,
349 "\n\n\nINIT detected from ");
350 printk(BIOS_DEBUG, "\nIssuing SOFT_RESET...\n");
354 if (id.coreid == 0) {
355 if (!(warm_reset_detect(id.nodeid))) //FIXME: INIT is checked above but check for more resets?
356 distinguish_cpu_resets(id.nodeid); // Also indicates we are started
358 // Mark the core as started.
359 lapic_write(LAPIC_MSG_REG, (apicid << 24) | 0x13);
361 if (apicid != bsp_apicid) {
362 /* Setup each AP's cores MSRs.
363 * This happens after HTinit.
364 * The BSP runs this code in it's own path.
366 update_microcode(cpuid_eax(1));
370 #if (CONFIG_LOGICAL_CPUS == 1) && (SET_FIDVID_CORE0_ONLY == 1)
371 // Run on all AP for proper FID/VID setup.
372 if (id.coreid == 0) // only need set fid for core0
375 // check warm(bios) reset to call stage2 otherwise do stage1
376 if (warm_reset_detect(id.nodeid)) {
378 "init_fidvid_stage2 apicid: %02x\n",
380 init_fidvid_stage2(apicid, id.nodeid);
383 "init_fidvid_ap(stage1) apicid: %02x\n",
385 init_fidvid_ap(bsp_apicid, apicid, id.nodeid,
391 /* AP is ready, Wait for the BSP to get memory configured */
392 /* FIXME: many cores spinning on node0 pci register seems to be bad.
393 * Why do we need to wait? These APs are just going to go sit in a hlt.
395 //wait_till_sysinfo_in_ram();
397 set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
401 "\nAP %02x should be halted but you are reading this....\n",
408 static u32 is_core0_started(u32 nodeid)
412 device = NODE_PCI(nodeid, 0);
413 htic = pci_read_config32(device, HT_INIT_CONTROL);
414 htic &= HTIC_ColdR_Detect;
418 static void wait_all_core0_started(void)
420 /* When core0 is started, it will distingush_cpu_resets
421 * So wait for that to finish */
423 u32 nodes = get_nodes();
425 printk(BIOS_DEBUG, "core0 started: ");
426 for (i = 1; i < nodes; i++) { // skip bsp, because it is running on bsp
427 while (!is_core0_started(i)) {
429 printk(BIOS_DEBUG, " %02x", i);
431 printk(BIOS_DEBUG, "\n");
434 #if CONFIG_MAX_PHYSICAL_CPUS > 1
436 * void start_node(u32 node)
438 * start the core0 in node, so it can generate HT packet to feature code.
440 * This function starts the AP nodes core0s. wait_all_core0_started() in
441 * romstage.c waits for all the AP to be finished before continuing
444 static void start_node(u8 node)
448 /* Enable routing table */
449 printk(BIOS_DEBUG, "Start node %02x", node);
451 #if CONFIG_NORTHBRIDGE_AMD_AMDFAM10
452 /* For FAM10 support, we need to set Dram base/limit for the new node */
453 pci_write_config32(NODE_MP(node), 0x44, 0);
454 pci_write_config32(NODE_MP(node), 0x40, 3);
457 /* Allow APs to make requests (ROM fetch) */
458 val = pci_read_config32(NODE_HT(node), 0x6c);
460 pci_write_config32(NODE_HT(node), 0x6c, val);
462 printk(BIOS_DEBUG, " done.\n");
466 * static void setup_remote_node(u32 node)
468 * Copy the BSP Adress Map to each AP.
470 static void setup_remote_node(u8 node)
472 /* There registers can be used with F1x114_x Address Map at the
473 same time, So must set them even 32 node */
474 static const u16 pci_reg[] = {
475 /* DRAM Base/Limits Registers */
476 0x44, 0x4c, 0x54, 0x5c, 0x64, 0x6c, 0x74, 0x7c,
477 0x40, 0x48, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78,
478 0x144, 0x14c, 0x154, 0x15c, 0x164, 0x16c, 0x174, 0x17c,
479 0x140, 0x148, 0x150, 0x158, 0x160, 0x168, 0x170, 0x178,
480 /* MMIO Base/Limits Registers */
481 0x84, 0x8c, 0x94, 0x9c, 0xa4, 0xac, 0xb4, 0xbc,
482 0x80, 0x88, 0x90, 0x98, 0xa0, 0xa8, 0xb0, 0xb8,
483 /* IO Base/Limits Registers */
484 0xc4, 0xcc, 0xd4, 0xdc,
485 0xc0, 0xc8, 0xd0, 0xd8,
486 /* Configuration Map Registers */
487 0xe0, 0xe4, 0xe8, 0xec,
491 printk(BIOS_DEBUG, "setup_remote_node: %02x", node);
493 /* copy the default resource map from node 0 */
494 for (i = 0; i < ARRAY_SIZE(pci_reg); i++) {
498 value = pci_read_config32(NODE_MP(0), reg);
499 pci_write_config32(NODE_MP(node), reg, value);
502 printk(BIOS_DEBUG, " done\n");
504 #endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */
506 static void AMD_Errata281(u8 node, u32 revision, u32 platform)
508 /* Workaround for Transaction Scheduling Conflict in
509 * Northbridge Cross Bar. Implement XCS Token adjustment
510 * for ganged links. Also, perform fix up for the mixed
517 u8 nodes = get_nodes();
519 if (platform & AMD_PTYPE_SVR) {
520 /* For each node we need to check for a "broken" node */
521 if (!(revision & (AMD_DR_B0 | AMD_DR_B1))) {
522 for (i = 0; i < nodes; i++) {
523 if (mctGetLogicalCPUID(i) &
524 (AMD_DR_B0 | AMD_DR_B1)) {
531 if ((revision & (AMD_DR_B0 | AMD_DR_B1)) || mixed) {
533 /* F0X68[22:21] DsNpReqLmt0 = 01b */
534 val = pci_read_config32(NODE_PCI(node, 0), 0x68);
537 pci_write_config32(NODE_PCI(node, 0), 0x68, val);
540 val = pci_read_config32(NODE_PCI(node, 3), 0x6C);
543 pci_write_config32(NODE_PCI(node, 3), 0x6C, val);
546 val = pci_read_config32(NODE_PCI(node, 3), 0x7C);
549 pci_write_config32(NODE_PCI(node, 3), 0x7C, val);
551 /* F3X144[3:0] RspTok = 0001b */
552 val = pci_read_config32(NODE_PCI(node, 3), 0x144);
555 pci_write_config32(NODE_PCI(node, 3), 0x144, val);
557 for (i = 0; i < 3; i++) {
558 reg = 0x148 + (i * 4);
559 val = pci_read_config32(NODE_PCI(node, 3), reg);
562 pci_write_config32(NODE_PCI(node, 3), reg, val);
568 static void AMD_Errata298(void)
570 /* Workaround for L2 Eviction May Occur during operation to
571 * set Accessed or dirty bit.
577 u8 nodes = get_nodes();
579 /* For each core we need to check for a "broken" node */
580 for (i = 0; i < nodes; i++) {
581 if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_B2)) {
589 msr.lo |= 0x08; /* Set TlbCacheDis bit[3] */
593 msr.lo |= 0x02; /* Set TlbForceMemTypeUc bit[1] */
596 msr = rdmsr(OSVW_ID_Length);
597 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
598 wrmsr(OSVW_ID_Length, msr);
600 msr = rdmsr(OSVW_Status);
601 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
602 wrmsr(OSVW_Status, msr);
605 if (!affectedRev && (mctGetLogicalCPUID(0xFF) & AMD_DR_B3)) {
606 msr = rdmsr(OSVW_ID_Length);
607 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
608 wrmsr(OSVW_ID_Length, msr);
613 static u32 get_platform_type(void)
617 switch (SYSTEM_TYPE) {
619 ret |= AMD_PTYPE_DSK;
622 ret |= AMD_PTYPE_MOB;
625 ret |= AMD_PTYPE_SVR;
631 /* FIXME: add UMA support. */
633 /* All Fam10 are multi core */
639 static void AMD_SetupPSIVID_d(u32 platform_type, u8 node)
645 if (platform_type & (AMD_PTYPE_MOB | AMD_PTYPE_DSK)) {
647 /* The following code sets the PSIVID to the lowest support P state
648 * assuming that the VID for the lowest power state is below
649 * the VDD voltage regulator threshold. (This also assumes that there
650 * is a Pstate lower than P0)
653 for (i = 4; i >= 0; i--) {
654 msr = rdmsr(PS_REG_BASE + i);
656 if (msr.hi & PS_EN_MASK) {
657 dword = pci_read_config32(NODE_PCI(i, 3), 0xA0);
659 dword |= (msr.lo >> 9) & 0x7F;
660 pci_write_config32(NODE_PCI(i, 3), 0xA0, dword);
668 * AMD_CpuFindCapability - Traverse PCI capability list to find host HT links.
669 * HT Phy operations are not valid on links that aren't present, so this
670 * prevents invalid accesses.
672 * Returns the offset of the link register.
674 static BOOL AMD_CpuFindCapability(u8 node, u8 cap_count, u8 * offset)
679 /* get start of CPU HT Host Capabilities */
680 val = pci_read_config32(NODE_PCI(node, 0), 0x34);
681 val &= 0xFF; //reg offset of first link
685 /* Traverse through the capabilities. */
687 reg = pci_read_config32(NODE_PCI(node, 0), val);
688 /* Is the capability block a HyperTransport capability block? */
689 if ((reg & 0xFF) == 0x08) {
690 /* Is the HT capability block an HT Host Capability? */
691 if ((reg & 0xE0000000) == (1 << 29))
696 val = (reg >> 8) & 0xFF; //update reg offset
697 } while (cap_count && val);
701 /* If requested capability found val != 0 */
709 * AMD_checkLinkType - Compare desired link characteristics using a logical
712 * Returns the link characteristic mask.
714 static u32 AMD_checkLinkType(u8 node, u8 link, u8 regoff)
719 /* Check connect, init and coherency */
720 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x18);
724 linktype |= HTPHY_LINKTYPE_COHERENT;
727 linktype |= HTPHY_LINKTYPE_NONCOHERENT;
731 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x08);
733 if (((val >> 8) & 0x0F) > 6)
734 linktype |= HTPHY_LINKTYPE_HT3;
736 linktype |= HTPHY_LINKTYPE_HT1;
739 val = pci_read_config32(NODE_PCI(node, 0), (link << 2) + 0x170);
742 linktype |= HTPHY_LINKTYPE_GANGED;
744 linktype |= HTPHY_LINKTYPE_UNGANGED;
750 * AMD_SetHtPhyRegister - Use the HT link's HT Phy portal registers to update
751 * a phy setting for that link.
753 static void AMD_SetHtPhyRegister(u8 node, u8 link, u8 entry)
759 /* Determine this link's portal */
763 phyBase = ((u32) link << 3) | 0x180;
765 /* Get the portal control register's initial value
766 * and update it to access the desired phy register
768 phyReg = pci_read_config32(NODE_PCI(node, 4), phyBase);
770 if (fam10_htphy_default[entry].htreg > 0x1FF) {
771 phyReg &= ~HTPHY_DIRECT_OFFSET_MASK;
772 phyReg |= HTPHY_DIRECT_MAP;
774 phyReg &= ~HTPHY_OFFSET_MASK;
777 /* Now get the current phy register data
778 * LinkPhyDone = 0, LinkPhyWrite = 0 is a read
780 phyReg |= fam10_htphy_default[entry].htreg;
781 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
784 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
785 } while (!(val & HTPHY_IS_COMPLETE_MASK));
787 /* Now we have the phy register data, apply the change */
788 val = pci_read_config32(NODE_PCI(node, 4), phyBase + 4);
789 val &= ~fam10_htphy_default[entry].mask;
790 val |= fam10_htphy_default[entry].data;
791 pci_write_config32(NODE_PCI(node, 4), phyBase + 4, val);
793 /* write it through the portal to the phy
794 * LinkPhyDone = 0, LinkPhyWrite = 1 is a write
796 phyReg |= HTPHY_WRITE_CMD;
797 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
800 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
801 } while (!(val & HTPHY_IS_COMPLETE_MASK));
804 void cpuSetAMDMSR(void)
806 /* This routine loads the CPU with default settings in fam10_msr_default
807 * table . It must be run after Cache-As-RAM has been enabled, and
808 * Hypertransport initialization has taken place. Also note
809 * that it is run on the current processor only, and only for the current
814 u32 revision, platform;
816 printk(BIOS_DEBUG, "cpuSetAMDMSR ");
818 revision = mctGetLogicalCPUID(0xFF);
819 platform = get_platform_type();
821 for (i = 0; i < ARRAY_SIZE(fam10_msr_default); i++) {
822 if ((fam10_msr_default[i].revision & revision) &&
823 (fam10_msr_default[i].platform & platform)) {
824 msr = rdmsr(fam10_msr_default[i].msr);
825 msr.hi &= ~fam10_msr_default[i].mask_hi;
826 msr.hi |= fam10_msr_default[i].data_hi;
827 msr.lo &= ~fam10_msr_default[i].mask_lo;
828 msr.lo |= fam10_msr_default[i].data_lo;
829 wrmsr(fam10_msr_default[i].msr, msr);
834 printk(BIOS_DEBUG, " done\n");
837 static void cpuSetAMDPCI(u8 node)
839 /* This routine loads the CPU with default settings in fam10_pci_default
840 * table . It must be run after Cache-As-RAM has been enabled, and
841 * Hypertransport initialization has taken place. Also note
842 * that it is run for the first core on each node
845 u32 revision, platform;
849 printk(BIOS_DEBUG, "cpuSetAMDPCI %02d", node);
851 revision = mctGetLogicalCPUID(node);
852 platform = get_platform_type();
854 AMD_SetupPSIVID_d(platform, node); /* Set PSIVID offset which is not table driven */
856 for (i = 0; i < ARRAY_SIZE(fam10_pci_default); i++) {
857 if ((fam10_pci_default[i].revision & revision) &&
858 (fam10_pci_default[i].platform & platform)) {
859 val = pci_read_config32(NODE_PCI(node,
860 fam10_pci_default[i].
862 fam10_pci_default[i].offset);
863 val &= ~fam10_pci_default[i].mask;
864 val |= fam10_pci_default[i].data;
865 pci_write_config32(NODE_PCI(node,
866 fam10_pci_default[i].
868 fam10_pci_default[i].offset, val);
872 for (i = 0; i < ARRAY_SIZE(fam10_htphy_default); i++) {
873 if ((fam10_htphy_default[i].revision & revision) &&
874 (fam10_htphy_default[i].platform & platform)) {
875 /* HT Phy settings either apply to both sublinks or have
876 * separate registers for sublink zero and one, so there
877 * will be two table entries. So, here we only loop
878 cd t * through the sublink zeros in function zero.
880 for (j = 0; j < 4; j++) {
881 if (AMD_CpuFindCapability(node, j, &offset)) {
882 if (AMD_checkLinkType(node, j, offset)
883 & fam10_htphy_default[i].linktype) {
884 AMD_SetHtPhyRegister(node, j,
888 /* No more capabilities,
897 /* FIXME: add UMA support and programXbarToSriReg(); */
899 AMD_Errata281(node, revision, platform);
901 /* FIXME: if the dct phy doesn't init correct it needs to reset.
902 if (revision & (AMD_DR_B2 | AMD_DR_B3))
905 printk(BIOS_DEBUG, " done\n");
908 static void cpuInitializeMCA(void)
910 /* Clears Machine Check Architecture (MCA) registers, which power on
911 * containing unknown data, on currently running processor.
912 * This routine should only be executed on initial power on (cold boot),
913 * not across a warm reset because valid data is present at that time.
920 if (cpuid_edx(1) & 0x4080) { /* MCE and MCA (edx[7] and edx[14]) */
921 msr = rdmsr(MCG_CAP);
922 if (msr.lo & MCG_CTL_P) { /* MCG_CTL_P bit is set? */
925 msr.lo <<= 2; /* multiply the count by 4 */
926 reg = MC0_STA + msr.lo;
928 for (i = 0; i < 4; i++) {
930 reg -= 4; /* Touch status regs for each bank */
937 * finalize_node_setup()
939 * Do any additional post HT init
942 static void finalize_node_setup(struct sys_info *sysinfo)
945 u8 nodes = get_nodes();
948 #if RAMINIT_SYSINFO == 1
949 /* read Node0 F0_0x64 bit [8:10] to find out SbLink # */
950 reg = pci_read_config32(NODE_HT(0), 0x64);
951 sysinfo->sblk = (reg >> 8) & 7;
953 sysinfo->nodes = nodes;
954 sysinfo->sbdn = get_sbdn(sysinfo->sbbusn);
957 for (i = 0; i < nodes; i++) {
962 // Prep each node for FID/VID setup.
966 #if CONFIG_MAX_PHYSICAL_CPUS > 1
967 /* Skip the BSP, start at node 1 */
968 for (i = 1; i < nodes; i++) {
969 setup_remote_node(i);