2 * This file is part of the coreboot project.
4 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <cpu/x86/lapic.h>
23 #include <cpu/x86/mtrr.h>
24 #include <northbridge/amd/amdfam10/amdfam10.h>
25 #include <northbridge/amd/amdht/AsPsDefs.h>
26 #include <northbridge/amd/amdht/porting.h>
28 #include <cpu/x86/mtrr/earlymtrr.c>
29 #include <northbridge/amd/amdfam10/raminit_amdmct.c>
31 //it takes the CONFIG_ENABLE_APIC_EXT_ID and CONFIG_APIC_ID_OFFSET and CONFIG_LIFT_BSP_APIC_ID
36 #ifndef SET_FIDVID_CORE0_ONLY
37 /* MSR FIDVID_CTL and FIDVID_STATUS are shared by cores,
38 Need to do every AP to set common FID/VID */
39 #define SET_FIDVID_CORE0_ONLY 0
42 static void prep_fid_change(void);
43 static void init_fidvid_stage2(u32 apicid, u32 nodeid);
44 void cpuSetAMDMSR(void);
46 #if CONFIG_PCI_IO_CFG_EXT == 1
47 static void set_EnableCf8ExtCfg(void)
49 // set the NB_CFG[46]=1;
51 msr = rdmsr(NB_CFG_MSR);
52 // EnableCf8ExtCfg: We need that to access CONFIG_PCI_IO_CFG_EXT 4K range
53 msr.hi |= (1 << (46 - 32));
54 wrmsr(NB_CFG_MSR, msr);
57 static void set_EnableCf8ExtCfg(void) { }
61 #define PCI_MMIO_BASE 0xfe000000
62 /* because we will use gs to store hi, so need to make sure lo can start
63 from 0, So PCI_MMIO_BASE & 0x00ffffff should be equal to 0*/
65 static void set_pci_mmio_conf_reg(void)
67 #if CONFIG_MMCONF_SUPPORT
69 msr = rdmsr(0xc0010058);
70 msr.lo &= ~(0xfff00000 | (0xf << 2));
71 // 256 bus per segment, MMIO reg will be 4G , enable MMIO Config space
72 msr.lo |= ((8 + CONFIG_PCI_BUS_SEGN_BITS) << 2) | (1 << 0);
73 msr.hi &= ~(0x0000ffff);
74 msr.hi |= (PCI_MMIO_BASE >> (32 - 8));
75 wrmsr(0xc0010058, msr); // MMIO Config Base Address Reg
77 //mtrr for that range?
78 // set_var_mtrr_x(7, PCI_MMIO_BASE<<8, PCI_MMIO_BASE>>(32-8), 0x00000000, 0x01, MTRR_TYPE_UNCACHEABLE);
82 msr.hi = (PCI_MMIO_BASE >> (32 - 8));
84 wrmsr(0xc0000101, msr); //GS_Base Reg
89 typedef void (*process_ap_t) (u32 apicid, void *gp);
91 //core_range = 0 : all cores
92 //core range = 1 : core 0 only
93 //core range = 2 : cores other than core0
95 static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
98 // here assume the OS don't change our apicid
103 u32 disable_siblings;
107 u32 ApicIdCoreIdSize;
109 /* get_nodes define in ht_wrapper.c */
112 if (!CONFIG_LOGICAL_CPUS ||
113 read_option(CMOS_VSTART_multi_core, CMOS_VLEN_multi_core, 0) != 0) { // 0 means multi core
114 disable_siblings = 1;
116 disable_siblings = 0;
119 /* Assume that all node are same stepping, otherwise we can use use
120 nb_cfg_54 from bsp for all nodes */
121 nb_cfg_54 = read_nb_cfg_54();
123 ApicIdCoreIdSize = (cpuid_ecx(0x80000008) >> 12 & 0xf);
124 if (ApicIdCoreIdSize) {
125 siblings = ((1 << ApicIdCoreIdSize) - 1);
127 siblings = 3; //quad core
130 for (i = 0; i < nodes; i++) {
131 cores_found = get_core_num_in_bsp(i);
135 if (core_range == 2) {
141 if (disable_siblings || (core_range == 1)) {
147 for (j = jstart; j <= jend; j++) {
149 i * (nb_cfg_54 ? (siblings + 1) : 1) +
150 j * (nb_cfg_54 ? 1 : 64);
152 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
153 #if CONFIG_LIFT_BSP_APIC_ID == 0
154 if ((i != 0) || (j != 0)) /* except bsp */
156 ap_apicid += CONFIG_APIC_ID_OFFSET;
159 if (ap_apicid == bsp_apicid)
162 process_ap(ap_apicid, gp);
168 static inline int lapic_remote_read(int apicid, int reg, u32 *pvalue)
173 lapic_wait_icr_idle();
174 lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
175 lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
177 /* Extra busy check compared to lapic.h */
180 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
181 } while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
185 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
186 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
190 if (status == LAPIC_ICR_RR_VALID) {
191 *pvalue = lapic_read(LAPIC_RRR);
197 /* Use the LAPIC timer count register to hold each cores init status */
198 #define LAPIC_MSG_REG 0x380
201 static void init_fidvid_ap(u32 bsp_apicid, u32 apicid, u32 nodeid, u32 coreid);
204 static inline __attribute__ ((always_inline))
205 void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id,
209 "%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str,
210 apicid, id.nodeid, id.coreid);
213 static u32 wait_cpu_state(u32 apicid, u32 state)
219 if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0)
221 if ((readback & 0x3f) == state) {
223 break; //target cpu is in stage started
235 static void wait_ap_started(u32 ap_apicid, void *gp)
238 timeout = wait_cpu_state(ap_apicid, 0x13); // started
239 printk(BIOS_DEBUG, "* AP %02x", ap_apicid);
241 printk(BIOS_DEBUG, " timed out:%08x\n", timeout);
243 printk(BIOS_DEBUG, "started\n");
247 void wait_all_other_cores_started(u32 bsp_apicid)
249 // all aps other than core0
250 printk(BIOS_DEBUG, "started ap apicid: ");
251 for_each_ap(bsp_apicid, 2, wait_ap_started, (void *)0);
252 printk(BIOS_DEBUG, "\n");
255 void allow_all_aps_stop(u32 bsp_apicid)
257 /* Called by the BSP to indicate AP can stop */
259 /* FIXME Do APs use this? */
261 // allow aps to stop use 6 bits for state
262 lapic_write(LAPIC_MSG_REG, (bsp_apicid << 24) | 0x14);
265 static void enable_apic_ext_id(u32 node)
269 val = pci_read_config32(NODE_HT(node), 0x68);
270 val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
271 pci_write_config32(NODE_HT(node), 0x68, val);
274 static void STOP_CAR_AND_CPU(void)
278 /* Disable L2 IC to L3 connection (Only for CAR) */
279 msr = rdmsr(BU_CFG2);
280 msr.lo &= ~(1 << ClLinesToNbDis);
283 disable_cache_as_ram(); // inline
284 /* stop all cores except node0/core0 the bsp .... */
288 #if RAMINIT_SYSINFO == 1
289 static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo)
291 static u32 init_cpus(u32 cpu_init_detectedx)
296 struct node_core_id id;
299 * already set early mtrr in cache_as_ram.inc
302 /* enable access pci conf via mmio */
303 set_pci_mmio_conf_reg();
305 /* that is from initial apicid, we need nodeid and coreid
307 id = get_node_core_id_x();
309 /* NB_CFG MSR is shared between cores, so we need make sure
310 core0 is done at first --- use wait_all_core0_started */
311 if (id.coreid == 0) {
312 set_apicid_cpuid_lo(); /* only set it on core0 */
313 set_EnableCf8ExtCfg(); /* only set it on core0 */
314 #if (CONFIG_ENABLE_APIC_EXT_ID == 1)
315 enable_apic_ext_id(id.nodeid);
321 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
322 u32 initial_apicid = get_initial_apicid();
324 #if CONFIG_LIFT_BSP_APIC_ID == 0
325 if (initial_apicid != 0) // other than bsp
328 /* use initial apic id to lift it */
329 u32 dword = lapic_read(LAPIC_ID);
330 dword &= ~(0xff << 24);
332 (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
334 lapic_write(LAPIC_ID, dword);
336 #if CONFIG_LIFT_BSP_APIC_ID == 1
337 bsp_apicid += CONFIG_APIC_ID_OFFSET;
342 /* get the apicid, it may be lifted already */
345 // show our apicid, nodeid, and coreid
346 if (id.coreid == 0) {
347 if (id.nodeid != 0) //all core0 except bsp
348 print_apicid_nodeid_coreid(apicid, id, " core0: ");
349 } else { //all other cores
350 print_apicid_nodeid_coreid(apicid, id, " corex: ");
353 if (cpu_init_detectedx) {
354 print_apicid_nodeid_coreid(apicid, id,
355 "\n\n\nINIT detected from ");
356 printk(BIOS_DEBUG, "\nIssuing SOFT_RESET...\n");
360 if (id.coreid == 0) {
361 if (!(warm_reset_detect(id.nodeid))) //FIXME: INIT is checked above but check for more resets?
362 distinguish_cpu_resets(id.nodeid); // Also indicates we are started
364 // Mark the core as started.
365 lapic_write(LAPIC_MSG_REG, (apicid << 24) | 0x13);
367 if (apicid != bsp_apicid) {
368 /* Setup each AP's cores MSRs.
369 * This happens after HTinit.
370 * The BSP runs this code in it's own path.
372 update_microcode(cpuid_eax(1));
376 #if (CONFIG_LOGICAL_CPUS == 1) && (SET_FIDVID_CORE0_ONLY == 1)
377 // Run on all AP for proper FID/VID setup.
378 if (id.coreid == 0) // only need set fid for core0
381 // check warm(bios) reset to call stage2 otherwise do stage1
382 if (warm_reset_detect(id.nodeid)) {
384 "init_fidvid_stage2 apicid: %02x\n",
386 init_fidvid_stage2(apicid, id.nodeid);
389 "init_fidvid_ap(stage1) apicid: %02x\n",
391 init_fidvid_ap(bsp_apicid, apicid, id.nodeid,
397 /* AP is ready, configure MTRRs and go to sleep */
398 set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
403 "\nAP %02x should be halted but you are reading this....\n",
410 static u32 is_core0_started(u32 nodeid)
414 device = NODE_PCI(nodeid, 0);
415 htic = pci_read_config32(device, HT_INIT_CONTROL);
416 htic &= HTIC_ColdR_Detect;
420 void wait_all_core0_started(void)
422 /* When core0 is started, it will distingush_cpu_resets
423 * So wait for that to finish */
425 u32 nodes = get_nodes();
427 printk(BIOS_DEBUG, "core0 started: ");
428 for (i = 1; i < nodes; i++) { // skip bsp, because it is running on bsp
429 while (!is_core0_started(i)) {
431 printk(BIOS_DEBUG, " %02x", i);
433 printk(BIOS_DEBUG, "\n");
436 #if CONFIG_MAX_PHYSICAL_CPUS > 1
438 * void start_node(u32 node)
440 * start the core0 in node, so it can generate HT packet to feature code.
442 * This function starts the AP nodes core0s. wait_all_core0_started() in
443 * romstage.c waits for all the AP to be finished before continuing
446 static void start_node(u8 node)
450 /* Enable routing table */
451 printk(BIOS_DEBUG, "Start node %02x", node);
453 #if CONFIG_NORTHBRIDGE_AMD_AMDFAM10
454 /* For FAM10 support, we need to set Dram base/limit for the new node */
455 pci_write_config32(NODE_MP(node), 0x44, 0);
456 pci_write_config32(NODE_MP(node), 0x40, 3);
459 /* Allow APs to make requests (ROM fetch) */
460 val = pci_read_config32(NODE_HT(node), 0x6c);
462 pci_write_config32(NODE_HT(node), 0x6c, val);
464 printk(BIOS_DEBUG, " done.\n");
468 * static void setup_remote_node(u32 node)
470 * Copy the BSP Adress Map to each AP.
472 static void setup_remote_node(u8 node)
474 /* There registers can be used with F1x114_x Address Map at the
475 same time, So must set them even 32 node */
476 static const u16 pci_reg[] = {
477 /* DRAM Base/Limits Registers */
478 0x44, 0x4c, 0x54, 0x5c, 0x64, 0x6c, 0x74, 0x7c,
479 0x40, 0x48, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78,
480 0x144, 0x14c, 0x154, 0x15c, 0x164, 0x16c, 0x174, 0x17c,
481 0x140, 0x148, 0x150, 0x158, 0x160, 0x168, 0x170, 0x178,
482 /* MMIO Base/Limits Registers */
483 0x84, 0x8c, 0x94, 0x9c, 0xa4, 0xac, 0xb4, 0xbc,
484 0x80, 0x88, 0x90, 0x98, 0xa0, 0xa8, 0xb0, 0xb8,
485 /* IO Base/Limits Registers */
486 0xc4, 0xcc, 0xd4, 0xdc,
487 0xc0, 0xc8, 0xd0, 0xd8,
488 /* Configuration Map Registers */
489 0xe0, 0xe4, 0xe8, 0xec,
493 printk(BIOS_DEBUG, "setup_remote_node: %02x", node);
495 /* copy the default resource map from node 0 */
496 for (i = 0; i < ARRAY_SIZE(pci_reg); i++) {
500 value = pci_read_config32(NODE_MP(0), reg);
501 pci_write_config32(NODE_MP(node), reg, value);
504 printk(BIOS_DEBUG, " done\n");
506 #endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */
508 static void AMD_Errata281(u8 node, u32 revision, u32 platform)
510 /* Workaround for Transaction Scheduling Conflict in
511 * Northbridge Cross Bar. Implement XCS Token adjustment
512 * for ganged links. Also, perform fix up for the mixed
519 u8 nodes = get_nodes();
521 if (platform & AMD_PTYPE_SVR) {
522 /* For each node we need to check for a "broken" node */
523 if (!(revision & (AMD_DR_B0 | AMD_DR_B1))) {
524 for (i = 0; i < nodes; i++) {
525 if (mctGetLogicalCPUID(i) &
526 (AMD_DR_B0 | AMD_DR_B1)) {
533 if ((revision & (AMD_DR_B0 | AMD_DR_B1)) || mixed) {
535 /* F0X68[22:21] DsNpReqLmt0 = 01b */
536 val = pci_read_config32(NODE_PCI(node, 0), 0x68);
539 pci_write_config32(NODE_PCI(node, 0), 0x68, val);
542 val = pci_read_config32(NODE_PCI(node, 3), 0x6C);
545 pci_write_config32(NODE_PCI(node, 3), 0x6C, val);
548 val = pci_read_config32(NODE_PCI(node, 3), 0x7C);
551 pci_write_config32(NODE_PCI(node, 3), 0x7C, val);
553 /* F3X144[3:0] RspTok = 0001b */
554 val = pci_read_config32(NODE_PCI(node, 3), 0x144);
557 pci_write_config32(NODE_PCI(node, 3), 0x144, val);
559 for (i = 0; i < 3; i++) {
560 reg = 0x148 + (i * 4);
561 val = pci_read_config32(NODE_PCI(node, 3), reg);
564 pci_write_config32(NODE_PCI(node, 3), reg, val);
570 static void AMD_Errata298(void)
572 /* Workaround for L2 Eviction May Occur during operation to
573 * set Accessed or dirty bit.
579 u8 nodes = get_nodes();
581 /* For each core we need to check for a "broken" node */
582 for (i = 0; i < nodes; i++) {
583 if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_B2)) {
591 msr.lo |= 0x08; /* Set TlbCacheDis bit[3] */
595 msr.lo |= 0x02; /* Set TlbForceMemTypeUc bit[1] */
598 msr = rdmsr(OSVW_ID_Length);
599 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
600 wrmsr(OSVW_ID_Length, msr);
602 msr = rdmsr(OSVW_Status);
603 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
604 wrmsr(OSVW_Status, msr);
607 if (!affectedRev && (mctGetLogicalCPUID(0xFF) & AMD_DR_B3)) {
608 msr = rdmsr(OSVW_ID_Length);
609 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
610 wrmsr(OSVW_ID_Length, msr);
615 static u32 get_platform_type(void)
619 switch (SYSTEM_TYPE) {
621 ret |= AMD_PTYPE_DSK;
624 ret |= AMD_PTYPE_MOB;
627 ret |= AMD_PTYPE_SVR;
633 /* FIXME: add UMA support. */
635 /* All Fam10 are multi core */
641 static void AMD_SetupPSIVID_d(u32 platform_type, u8 node)
647 if (platform_type & (AMD_PTYPE_MOB | AMD_PTYPE_DSK)) {
649 /* The following code sets the PSIVID to the lowest support P state
650 * assuming that the VID for the lowest power state is below
651 * the VDD voltage regulator threshold. (This also assumes that there
652 * is a Pstate lower than P0)
655 for (i = 4; i >= 0; i--) {
656 msr = rdmsr(PS_REG_BASE + i);
658 if (msr.hi & PS_EN_MASK) {
659 dword = pci_read_config32(NODE_PCI(i, 3), 0xA0);
661 dword |= (msr.lo >> 9) & 0x7F;
662 pci_write_config32(NODE_PCI(i, 3), 0xA0, dword);
670 * AMD_CpuFindCapability - Traverse PCI capability list to find host HT links.
671 * HT Phy operations are not valid on links that aren't present, so this
672 * prevents invalid accesses.
674 * Returns the offset of the link register.
676 static BOOL AMD_CpuFindCapability(u8 node, u8 cap_count, u8 * offset)
681 /* get start of CPU HT Host Capabilities */
682 val = pci_read_config32(NODE_PCI(node, 0), 0x34);
683 val &= 0xFF; //reg offset of first link
687 /* Traverse through the capabilities. */
689 reg = pci_read_config32(NODE_PCI(node, 0), val);
690 /* Is the capability block a HyperTransport capability block? */
691 if ((reg & 0xFF) == 0x08) {
692 /* Is the HT capability block an HT Host Capability? */
693 if ((reg & 0xE0000000) == (1 << 29))
698 val = (reg >> 8) & 0xFF; //update reg offset
699 } while (cap_count && val);
703 /* If requested capability found val != 0 */
711 * AMD_checkLinkType - Compare desired link characteristics using a logical
714 * Returns the link characteristic mask.
716 static u32 AMD_checkLinkType(u8 node, u8 link, u8 regoff)
721 /* Check connect, init and coherency */
722 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x18);
726 linktype |= HTPHY_LINKTYPE_COHERENT;
729 linktype |= HTPHY_LINKTYPE_NONCOHERENT;
733 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x08);
735 if (((val >> 8) & 0x0F) > 6)
736 linktype |= HTPHY_LINKTYPE_HT3;
738 linktype |= HTPHY_LINKTYPE_HT1;
741 val = pci_read_config32(NODE_PCI(node, 0), (link << 2) + 0x170);
744 linktype |= HTPHY_LINKTYPE_GANGED;
746 linktype |= HTPHY_LINKTYPE_UNGANGED;
752 * AMD_SetHtPhyRegister - Use the HT link's HT Phy portal registers to update
753 * a phy setting for that link.
755 static void AMD_SetHtPhyRegister(u8 node, u8 link, u8 entry)
761 /* Determine this link's portal */
765 phyBase = ((u32) link << 3) | 0x180;
767 /* Get the portal control register's initial value
768 * and update it to access the desired phy register
770 phyReg = pci_read_config32(NODE_PCI(node, 4), phyBase);
772 if (fam10_htphy_default[entry].htreg > 0x1FF) {
773 phyReg &= ~HTPHY_DIRECT_OFFSET_MASK;
774 phyReg |= HTPHY_DIRECT_MAP;
776 phyReg &= ~HTPHY_OFFSET_MASK;
779 /* Now get the current phy register data
780 * LinkPhyDone = 0, LinkPhyWrite = 0 is a read
782 phyReg |= fam10_htphy_default[entry].htreg;
783 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
786 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
787 } while (!(val & HTPHY_IS_COMPLETE_MASK));
789 /* Now we have the phy register data, apply the change */
790 val = pci_read_config32(NODE_PCI(node, 4), phyBase + 4);
791 val &= ~fam10_htphy_default[entry].mask;
792 val |= fam10_htphy_default[entry].data;
793 pci_write_config32(NODE_PCI(node, 4), phyBase + 4, val);
795 /* write it through the portal to the phy
796 * LinkPhyDone = 0, LinkPhyWrite = 1 is a write
798 phyReg |= HTPHY_WRITE_CMD;
799 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
802 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
803 } while (!(val & HTPHY_IS_COMPLETE_MASK));
806 void cpuSetAMDMSR(void)
808 /* This routine loads the CPU with default settings in fam10_msr_default
809 * table . It must be run after Cache-As-RAM has been enabled, and
810 * Hypertransport initialization has taken place. Also note
811 * that it is run on the current processor only, and only for the current
816 u32 revision, platform;
818 printk(BIOS_DEBUG, "cpuSetAMDMSR ");
820 revision = mctGetLogicalCPUID(0xFF);
821 platform = get_platform_type();
823 for (i = 0; i < ARRAY_SIZE(fam10_msr_default); i++) {
824 if ((fam10_msr_default[i].revision & revision) &&
825 (fam10_msr_default[i].platform & platform)) {
826 msr = rdmsr(fam10_msr_default[i].msr);
827 msr.hi &= ~fam10_msr_default[i].mask_hi;
828 msr.hi |= fam10_msr_default[i].data_hi;
829 msr.lo &= ~fam10_msr_default[i].mask_lo;
830 msr.lo |= fam10_msr_default[i].data_lo;
831 wrmsr(fam10_msr_default[i].msr, msr);
836 printk(BIOS_DEBUG, " done\n");
839 static void cpuSetAMDPCI(u8 node)
841 /* This routine loads the CPU with default settings in fam10_pci_default
842 * table . It must be run after Cache-As-RAM has been enabled, and
843 * Hypertransport initialization has taken place. Also note
844 * that it is run for the first core on each node
847 u32 revision, platform;
851 printk(BIOS_DEBUG, "cpuSetAMDPCI %02d", node);
853 revision = mctGetLogicalCPUID(node);
854 platform = get_platform_type();
856 AMD_SetupPSIVID_d(platform, node); /* Set PSIVID offset which is not table driven */
858 for (i = 0; i < ARRAY_SIZE(fam10_pci_default); i++) {
859 if ((fam10_pci_default[i].revision & revision) &&
860 (fam10_pci_default[i].platform & platform)) {
861 val = pci_read_config32(NODE_PCI(node,
862 fam10_pci_default[i].
864 fam10_pci_default[i].offset);
865 val &= ~fam10_pci_default[i].mask;
866 val |= fam10_pci_default[i].data;
867 pci_write_config32(NODE_PCI(node,
868 fam10_pci_default[i].
870 fam10_pci_default[i].offset, val);
874 for (i = 0; i < ARRAY_SIZE(fam10_htphy_default); i++) {
875 if ((fam10_htphy_default[i].revision & revision) &&
876 (fam10_htphy_default[i].platform & platform)) {
877 /* HT Phy settings either apply to both sublinks or have
878 * separate registers for sublink zero and one, so there
879 * will be two table entries. So, here we only loop
880 * through the sublink zeros in function zero.
882 for (j = 0; j < 4; j++) {
883 if (AMD_CpuFindCapability(node, j, &offset)) {
884 if (AMD_checkLinkType(node, j, offset)
885 & fam10_htphy_default[i].linktype) {
886 AMD_SetHtPhyRegister(node, j,
890 /* No more capabilities,
899 /* FIXME: add UMA support and programXbarToSriReg(); */
901 AMD_Errata281(node, revision, platform);
903 /* FIXME: if the dct phy doesn't init correct it needs to reset.
904 if (revision & (AMD_DR_B2 | AMD_DR_B3))
907 printk(BIOS_DEBUG, " done\n");
911 static void cpuInitializeMCA(void)
913 /* Clears Machine Check Architecture (MCA) registers, which power on
914 * containing unknown data, on currently running processor.
915 * This routine should only be executed on initial power on (cold boot),
916 * not across a warm reset because valid data is present at that time.
923 if (cpuid_edx(1) & 0x4080) { /* MCE and MCA (edx[7] and edx[14]) */
924 msr = rdmsr(MCG_CAP);
925 if (msr.lo & MCG_CTL_P) { /* MCG_CTL_P bit is set? */
928 msr.lo <<= 2; /* multiply the count by 4 */
929 reg = MC0_STA + msr.lo;
931 for (i = 0; i < 4; i++) {
933 reg -= 4; /* Touch status regs for each bank */
941 * finalize_node_setup()
943 * Do any additional post HT init
946 static void finalize_node_setup(struct sys_info *sysinfo)
949 u8 nodes = get_nodes();
952 #if RAMINIT_SYSINFO == 1
953 /* read Node0 F0_0x64 bit [8:10] to find out SbLink # */
954 reg = pci_read_config32(NODE_HT(0), 0x64);
955 sysinfo->sblk = (reg >> 8) & 7;
957 sysinfo->nodes = nodes;
958 sysinfo->sbdn = get_sbdn(sysinfo->sbbusn);
961 for (i = 0; i < nodes; i++) {
966 // Prep each node for FID/VID setup.
970 #if CONFIG_MAX_PHYSICAL_CPUS > 1
971 /* Skip the BSP, start at node 1 */
972 for (i = 1; i < nodes; i++) {
973 setup_remote_node(i);