2 * This file is part of the coreboot project.
4 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 //it takes the CONFIG_ENABLE_APIC_EXT_ID and CONFIG_APIC_ID_OFFSET and CONFIG_LIFT_BSP_APIC_ID
23 #ifndef FAM10_SET_FIDVID
24 #define FAM10_SET_FIDVID 1
27 #ifndef FAM10_SET_FIDVID_CORE0_ONLY
28 /* MSR FIDVID_CTL and FIDVID_STATUS are shared by cores,
29 Need to do every AP to set common FID/VID*/
30 #define FAM10_SET_FIDVID_CORE0_ONLY 0
33 static void print_initcpu8 (const char *strval, u8 val)
35 printk(BIOS_DEBUG, "%s%02x\n", strval, val);
38 static void print_initcpu8_nocr (const char *strval, u8 val)
40 printk(BIOS_DEBUG, "%s%02x", strval, val);
44 static void print_initcpu16 (const char *strval, u16 val)
46 printk(BIOS_DEBUG, "%s%04x\n", strval, val);
50 static void print_initcpu(const char *strval, u32 val)
52 printk(BIOS_DEBUG, "%s%08x\n", strval, val);
56 void update_microcode(u32 cpu_deviceid);
57 static void prep_fid_change(void);
58 static void init_fidvid_stage2(u32 apicid, u32 nodeid);
59 void cpuSetAMDMSR(void);
61 #if CONFIG_PCI_IO_CFG_EXT == 1
62 static void set_EnableCf8ExtCfg(void)
64 // set the NB_CFG[46]=1;
66 msr = rdmsr(NB_CFG_MSR);
67 // EnableCf8ExtCfg: We need that to access CONFIG_PCI_IO_CFG_EXT 4K range
68 msr.hi |= (1<<(46-32));
69 wrmsr(NB_CFG_MSR, msr);
72 static void set_EnableCf8ExtCfg(void) { }
77 #define PCI_MMIO_BASE 0xfe000000
78 /* because we will use gs to store hi, so need to make sure lo can start
79 from 0, So PCI_MMIO_BASE & 0x00ffffff should be equal to 0*/
81 static void set_pci_mmio_conf_reg(void)
83 #if CONFIG_MMCONF_SUPPORT
85 msr = rdmsr(0xc0010058);
86 msr.lo &= ~(0xfff00000 | (0xf << 2));
87 // 256 bus per segment, MMIO reg will be 4G , enable MMIO Config space
88 msr.lo |= ((8+CONFIG_PCI_BUS_SEGN_BITS) << 2) | (1 << 0);
89 msr.hi &= ~(0x0000ffff);
90 msr.hi |= (PCI_MMIO_BASE >> (32-8));
91 wrmsr(0xc0010058, msr); // MMIO Config Base Address Reg
93 //mtrr for that range?
94 // set_var_mtrr_x(7, PCI_MMIO_BASE<<8, PCI_MMIO_BASE>>(32-8), 0x00000000, 0x01, MTRR_TYPE_UNCACHEABLE);
98 msr.hi = (PCI_MMIO_BASE >> (32-8));
100 wrmsr(0xc0000101, msr); //GS_Base Reg
108 typedef void (*process_ap_t)(u32 apicid, void *gp);
110 //core_range = 0 : all cores
111 //core range = 1 : core 0 only
112 //core range = 2 : cores other than core0
114 static void for_each_ap(u32 bsp_apicid, u32 core_range,
115 process_ap_t process_ap, void *gp)
117 // here assume the OS don't change our apicid
122 u32 disable_siblings;
126 u32 ApicIdCoreIdSize;
128 /* get_nodes define in ht_wrapper.c */
131 disable_siblings = !CONFIG_LOGICAL_CPUS;
133 #if CONFIG_LOGICAL_CPUS == 1
134 if(read_option(CMOS_VSTART_quad_core, CMOS_VLEN_quad_core, 0) != 0) { // 0 mean quad core
135 disable_siblings = 1;
139 /* Assume that all node are same stepping, otherwise we can use use
140 nb_cfg_54 from bsp for all nodes */
141 nb_cfg_54 = read_nb_cfg_54();
143 ApicIdCoreIdSize = (cpuid_ecx(0x80000008) >> 12 & 0xf);
144 if(ApicIdCoreIdSize) {
145 siblings = ((1 << ApicIdCoreIdSize) - 1);
147 siblings = 3; //quad core
150 for (i = 0; i < nodes; i++) {
151 cores_found = get_core_num_in_bsp(i);
155 if (core_range == 2) {
161 if (disable_siblings || (core_range==1)) {
168 for (j = jstart; j <= jend; j++) {
169 ap_apicid = i * (nb_cfg_54 ? (siblings + 1):1) + j * (nb_cfg_54 ? 1:64);
171 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
172 #if CONFIG_LIFT_BSP_APIC_ID == 0
173 if( (i != 0) || (j != 0)) /* except bsp */
175 ap_apicid += CONFIG_APIC_ID_OFFSET;
178 if(ap_apicid == bsp_apicid) continue;
180 process_ap(ap_apicid, gp);
186 /* FIXME: Duplicate of what is in lapic.h? */
187 static int lapic_remote_read(int apicid, int reg, u32 *pvalue)
192 lapic_wait_icr_idle();
193 lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
194 lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
198 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
199 } while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
203 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
204 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
208 if (status == LAPIC_ICR_RR_VALID) {
209 *pvalue = lapic_read(LAPIC_RRR);
216 /* Use the LAPIC timer count register to hold each cores init status */
217 #define LAPIC_MSG_REG 0x380
220 #if FAM10_SET_FIDVID == 1
221 static void init_fidvid_ap(u32 bsp_apicid, u32 apicid, u32 nodeid, u32 coreid);
224 static inline __attribute__((always_inline)) void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id, const char *str)
226 printk(BIOS_DEBUG, "%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str, apicid, id.nodeid, id.coreid);
230 static unsigned wait_cpu_state(u32 apicid, u32 state)
236 if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0) continue;
237 if ((readback & 0x3f) == state) {
239 break; //target cpu is in stage started
252 static void wait_ap_started(u32 ap_apicid, void *gp )
255 timeout = wait_cpu_state(ap_apicid, 0x13); // started
257 print_initcpu8_nocr("* AP ", ap_apicid);
258 print_initcpu(" didn't start timeout:", timeout);
261 print_initcpu8_nocr("AP started: ", ap_apicid);
266 static void wait_all_other_cores_started(u32 bsp_apicid)
268 // all aps other than core0
269 print_debug("started ap apicid: ");
270 for_each_ap(bsp_apicid, 2 , wait_ap_started, (void *)0);
275 static void allow_all_aps_stop(u32 bsp_apicid)
277 /* Called by the BSP to indicate AP can stop */
279 /* FIXME Do APs use this?
280 Looks like wait_till_sysinfo_in_ram is used instead. */
282 // allow aps to stop use 6 bits for state
283 lapic_write(LAPIC_MSG_REG, (bsp_apicid << 24) | 0x14);
286 static void enable_apic_ext_id(u32 node)
290 val = pci_read_config32(NODE_HT(node), 0x68);
291 val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
292 pci_write_config32(NODE_HT(node), 0x68, val);
296 static void STOP_CAR_AND_CPU(void)
300 /* Disable L2 IC to L3 connection (Only for CAR) */
301 msr = rdmsr(BU_CFG2);
302 msr.lo &= ~(1 << ClLinesToNbDis);
305 disable_cache_as_ram(); // inline
309 #if RAMINIT_SYSINFO == 1
310 static u32 init_cpus(u32 cpu_init_detectedx ,struct sys_info *sysinfo)
312 static u32 init_cpus(u32 cpu_init_detectedx)
317 struct node_core_id id;
320 * already set early mtrr in cache_as_ram.inc
323 /* enable access pci conf via mmio*/
324 set_pci_mmio_conf_reg();
326 /* that is from initial apicid, we need nodeid and coreid
328 id = get_node_core_id_x();
330 /* NB_CFG MSR is shared between cores, so we need make sure
331 core0 is done at first --- use wait_all_core0_started */
333 set_apicid_cpuid_lo(); /* only set it on core0 */
334 set_EnableCf8ExtCfg(); /* only set it on core0 */
335 #if (CONFIG_ENABLE_APIC_EXT_ID == 1)
336 enable_apic_ext_id(id.nodeid);
343 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
344 u32 initial_apicid = get_initial_apicid();
346 #if CONFIG_LIFT_BSP_APIC_ID == 0
347 if( initial_apicid != 0 ) // other than bsp
350 /* use initial apic id to lift it */
351 u32 dword = lapic_read(LAPIC_ID);
352 dword &= ~(0xff << 24);
353 dword |= (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
355 lapic_write(LAPIC_ID, dword);
358 #if CONFIG_LIFT_BSP_APIC_ID == 1
359 bsp_apicid += CONFIG_APIC_ID_OFFSET;
364 /* get the apicid, it may be lifted already */
367 // show our apicid, nodeid, and coreid
369 if (id.nodeid!=0) //all core0 except bsp
370 print_apicid_nodeid_coreid(apicid, id, " core0: ");
372 else { //all other cores
373 print_apicid_nodeid_coreid(apicid, id, " corex: ");
377 if (cpu_init_detectedx) {
378 print_apicid_nodeid_coreid(apicid, id, "\n\n\nINIT detected from ");
379 print_debug("\nIssuing SOFT_RESET...\n");
384 if(!(warm_reset_detect(id.nodeid))) //FIXME: INIT is checked above but check for more resets?
385 distinguish_cpu_resets(id.nodeid); // Also indicates we are started
388 // Mark the core as started.
389 lapic_write(LAPIC_MSG_REG, (apicid << 24) | 0x13);
392 if(apicid != bsp_apicid) {
393 /* Setup each AP's cores MSRs.
394 * This happens after HTinit.
395 * The BSP runs this code in it's own path.
397 update_microcode(cpuid_eax(1));
401 #if FAM10_SET_FIDVID == 1
402 #if (CONFIG_LOGICAL_CPUS == 1) && (FAM10_SET_FIDVID_CORE0_ONLY == 1)
403 // Run on all AP for proper FID/VID setup.
404 if(id.coreid == 0 ) // only need set fid for core0
407 // check warm(bios) reset to call stage2 otherwise do stage1
408 if (warm_reset_detect(id.nodeid)) {
409 printk(BIOS_DEBUG, "init_fidvid_stage2 apicid: %02x\n", apicid);
410 init_fidvid_stage2(apicid, id.nodeid);
412 printk(BIOS_DEBUG, "init_fidvid_ap(stage1) apicid: %02x\n", apicid);
413 init_fidvid_ap(bsp_apicid, apicid, id.nodeid, id.coreid);
418 /* AP is ready, Wait for the BSP to get memory configured */
419 /* FIXME: many cores spinning on node0 pci register seems to be bad.
420 * Why do we need to wait? These APs are just going to go sit in a hlt.
422 //wait_till_sysinfo_in_ram();
424 set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
427 printk(BIOS_DEBUG, "\nAP %02x should be halted but you are reading this....\n", apicid);
434 static u32 is_core0_started(u32 nodeid)
438 device = NODE_PCI(nodeid, 0);
439 htic = pci_read_config32(device, HT_INIT_CONTROL);
440 htic &= HTIC_ColdR_Detect;
445 static void wait_all_core0_started(void)
447 /* When core0 is started, it will distingush_cpu_resets
448 . So wait for that to finish */
450 u32 nodes = get_nodes();
452 printk(BIOS_DEBUG, "Wait all core0s started \n");
453 for(i=1;i<nodes;i++) { // skip bsp, because it is running on bsp
454 while(!is_core0_started(i)) {}
455 print_initcpu8(" Core0 started on node: ", i);
457 printk(BIOS_DEBUG, "Wait all core0s started done\n");
459 #if CONFIG_MAX_PHYSICAL_CPUS > 1
461 * void start_node(u32 node)
463 * start the core0 in node, so it can generate HT packet to feature code.
465 * This function starts the AP nodes core0s. wait_all_core0_started() in
466 * romstage.c waits for all the AP to be finished before continuing
469 static void start_node(u8 node)
473 /* Enable routing table */
474 printk(BIOS_DEBUG, "Start node %02x", node);
476 #if CONFIG_NORTHBRIDGE_AMD_AMDFAM10
477 /* For FAM10 support, we need to set Dram base/limit for the new node */
478 pci_write_config32(NODE_MP(node), 0x44, 0);
479 pci_write_config32(NODE_MP(node), 0x40, 3);
482 /* Allow APs to make requests (ROM fetch) */
483 val=pci_read_config32(NODE_HT(node), 0x6c);
485 pci_write_config32(NODE_HT(node), 0x6c, val);
487 printk(BIOS_DEBUG, " done.\n");
492 * static void setup_remote_node(u32 node)
494 * Copy the BSP Adress Map to each AP.
496 static void setup_remote_node(u8 node)
498 /* There registers can be used with F1x114_x Address Map at the
499 same time, So must set them even 32 node */
500 static const u16 pci_reg[] = {
501 /* DRAM Base/Limits Registers */
502 0x44, 0x4c, 0x54, 0x5c, 0x64, 0x6c, 0x74, 0x7c,
503 0x40, 0x48, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78,
504 0x144, 0x14c, 0x154, 0x15c, 0x164, 0x16c, 0x174, 0x17c,
505 0x140, 0x148, 0x150, 0x158, 0x160, 0x168, 0x170, 0x178,
506 /* MMIO Base/Limits Registers */
507 0x84, 0x8c, 0x94, 0x9c, 0xa4, 0xac, 0xb4, 0xbc,
508 0x80, 0x88, 0x90, 0x98, 0xa0, 0xa8, 0xb0, 0xb8,
509 /* IO Base/Limits Registers */
510 0xc4, 0xcc, 0xd4, 0xdc,
511 0xc0, 0xc8, 0xd0, 0xd8,
512 /* Configuration Map Registers */
513 0xe0, 0xe4, 0xe8, 0xec,
517 printk(BIOS_DEBUG, "setup_remote_node: %02x", node);
519 /* copy the default resource map from node 0 */
520 for(i = 0; i < ARRAY_SIZE(pci_reg); i++) {
524 value = pci_read_config32(NODE_MP(0), reg);
525 pci_write_config32(NODE_MP(node), reg, value);
528 printk(BIOS_DEBUG, " done\n");
530 #endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */
532 static void AMD_Errata281(u8 node, u32 revision, u32 platform)
534 /* Workaround for Transaction Scheduling Conflict in
535 * Northbridge Cross Bar. Implement XCS Token adjustment
536 * for ganged links. Also, perform fix up for the mixed
543 u8 nodes = get_nodes();
545 if (platform & AMD_PTYPE_SVR) {
546 /* For each node we need to check for a "broken" node */
547 if (!(revision & (AMD_DR_B0 | AMD_DR_B1))) {
548 for (i = 0; i < nodes; i++) {
549 if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1)) {
556 if ((revision & (AMD_DR_B0 | AMD_DR_B1)) || mixed) {
558 /* F0X68[22:21] DsNpReqLmt0 = 01b */
559 val = pci_read_config32(NODE_PCI(node, 0), 0x68);
562 pci_write_config32(NODE_PCI(node, 0), 0x68, val);
565 val = pci_read_config32(NODE_PCI(node, 3), 0x6C);
568 pci_write_config32(NODE_PCI(node, 3), 0x6C, val);
571 val = pci_read_config32(NODE_PCI(node, 3), 0x7C);
574 pci_write_config32(NODE_PCI(node, 3), 0x7C, val);
576 /* F3X144[3:0] RspTok = 0001b */
577 val = pci_read_config32(NODE_PCI(node, 3), 0x144);
580 pci_write_config32(NODE_PCI(node, 3), 0x144, val);
582 for (i = 0; i < 3; i++) {
583 reg = 0x148 + (i * 4);
584 val = pci_read_config32(NODE_PCI(node, 3), reg);
587 pci_write_config32(NODE_PCI(node, 3), reg, val);
594 static void AMD_Errata298(void)
596 /* Workaround for L2 Eviction May Occur during operation to
597 * set Accessed or dirty bit.
603 u8 nodes = get_nodes();
605 /* For each core we need to check for a "broken" node */
606 for (i = 0; i < nodes; i++) {
607 if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_B2)) {
615 msr.lo |= 0x08; /* Set TlbCacheDis bit[3] */
619 msr.lo |= 0x02; /* Set TlbForceMemTypeUc bit[1] */
622 msr = rdmsr(OSVW_ID_Length);
623 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
624 wrmsr(OSVW_ID_Length, msr);
626 msr = rdmsr(OSVW_Status);
627 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
628 wrmsr(OSVW_Status, msr);
631 if (!affectedRev && (mctGetLogicalCPUID(0xFF) & AMD_DR_B3)) {
632 msr = rdmsr(OSVW_ID_Length);
633 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
634 wrmsr(OSVW_ID_Length, msr);
640 u32 get_platform_type(void)
644 switch(SYSTEM_TYPE) {
646 ret |= AMD_PTYPE_DSK;
649 ret |= AMD_PTYPE_MOB;
652 ret |= AMD_PTYPE_SVR;
658 /* FIXME: add UMA support. */
660 /* All Fam10 are multi core */
667 void AMD_SetupPSIVID_d (u32 platform_type, u8 node)
673 if (platform_type & (AMD_PTYPE_MOB | AMD_PTYPE_DSK)) {
675 /* The following code sets the PSIVID to the lowest support P state
676 * assuming that the VID for the lowest power state is below
677 * the VDD voltage regulator threshold. (This also assumes that there
678 * is a Pstate lower than P0)
681 for( i = 4; i >= 0; i--) {
682 msr = rdmsr(PS_REG_BASE + i);
684 if (msr.hi & PS_EN_MASK) {
685 dword = pci_read_config32(NODE_PCI(i,3), 0xA0);
687 dword |= (msr.lo >> 9) & 0x7F;
688 pci_write_config32(NODE_PCI(i,3), 0xA0, dword);
697 * AMD_CpuFindCapability - Traverse PCI capability list to find host HT links.
698 * HT Phy operations are not valid on links that aren't present, so this
699 * prevents invalid accesses.
701 * Returns the offset of the link register.
703 BOOL AMD_CpuFindCapability (u8 node, u8 cap_count, u8 *offset)
708 /* get start of CPU HT Host Capabilities */
709 val = pci_read_config32(NODE_PCI(node, 0), 0x34);
710 val &= 0xFF; //reg offset of first link
714 /* Traverse through the capabilities. */
716 reg = pci_read_config32(NODE_PCI(node, 0), val);
717 /* Is the capability block a HyperTransport capability block? */
718 if ((reg & 0xFF) == 0x08) {
719 /* Is the HT capability block an HT Host Capability? */
720 if ((reg & 0xE0000000) == (1 << 29))
725 val = (reg >> 8) & 0xFF; //update reg offset
726 } while (cap_count && val);
730 /* If requested capability found val != 0 */
739 * AMD_checkLinkType - Compare desired link characteristics using a logical
742 * Returns the link characteristic mask.
744 u32 AMD_checkLinkType (u8 node, u8 link, u8 regoff)
749 /* Check connect, init and coherency */
750 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x18);
754 linktype |= HTPHY_LINKTYPE_COHERENT;
757 linktype |= HTPHY_LINKTYPE_NONCOHERENT;
761 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x08);
763 if (((val >> 8) & 0x0F) > 6)
764 linktype |= HTPHY_LINKTYPE_HT3;
766 linktype |= HTPHY_LINKTYPE_HT1;
770 val = pci_read_config32(NODE_PCI(node, 0), (link << 2) + 0x170);
773 linktype |= HTPHY_LINKTYPE_GANGED;
775 linktype |= HTPHY_LINKTYPE_UNGANGED;
782 * AMD_SetHtPhyRegister - Use the HT link's HT Phy portal registers to update
783 * a phy setting for that link.
785 void AMD_SetHtPhyRegister (u8 node, u8 link, u8 entry)
791 /* Determine this link's portal */
795 phyBase = ((u32)link << 3) | 0x180;
798 /* Get the portal control register's initial value
799 * and update it to access the desired phy register
801 phyReg = pci_read_config32(NODE_PCI(node, 4), phyBase);
803 if (fam10_htphy_default[entry].htreg > 0x1FF) {
804 phyReg &= ~HTPHY_DIRECT_OFFSET_MASK;
805 phyReg |= HTPHY_DIRECT_MAP;
807 phyReg &= ~HTPHY_OFFSET_MASK;
810 /* Now get the current phy register data
811 * LinkPhyDone = 0, LinkPhyWrite = 0 is a read
813 phyReg |= fam10_htphy_default[entry].htreg;
814 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
817 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
818 } while (!(val & HTPHY_IS_COMPLETE_MASK));
820 /* Now we have the phy register data, apply the change */
821 val = pci_read_config32(NODE_PCI(node, 4), phyBase + 4);
822 val &= ~fam10_htphy_default[entry].mask;
823 val |= fam10_htphy_default[entry].data;
824 pci_write_config32(NODE_PCI(node, 4), phyBase + 4, val);
826 /* write it through the portal to the phy
827 * LinkPhyDone = 0, LinkPhyWrite = 1 is a write
829 phyReg |= HTPHY_WRITE_CMD;
830 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
833 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
834 } while (!(val & HTPHY_IS_COMPLETE_MASK));
838 void cpuSetAMDMSR(void)
840 /* This routine loads the CPU with default settings in fam10_msr_default
841 * table . It must be run after Cache-As-RAM has been enabled, and
842 * Hypertransport initialization has taken place. Also note
843 * that it is run on the current processor only, and only for the current
848 u32 revision, platform;
850 printk(BIOS_DEBUG, "cpuSetAMDMSR ");
852 revision = mctGetLogicalCPUID(0xFF);
853 platform = get_platform_type();
855 for(i = 0; i < ARRAY_SIZE(fam10_msr_default); i++) {
856 if ((fam10_msr_default[i].revision & revision) &&
857 (fam10_msr_default[i].platform & platform)) {
858 msr = rdmsr(fam10_msr_default[i].msr);
859 msr.hi &= ~fam10_msr_default[i].mask_hi;
860 msr.hi |= fam10_msr_default[i].data_hi;
861 msr.lo &= ~fam10_msr_default[i].mask_lo;
862 msr.lo |= fam10_msr_default[i].data_lo;
863 wrmsr(fam10_msr_default[i].msr, msr);
868 printk(BIOS_DEBUG, " done\n");
872 void cpuSetAMDPCI(u8 node)
874 /* This routine loads the CPU with default settings in fam10_pci_default
875 * table . It must be run after Cache-As-RAM has been enabled, and
876 * Hypertransport initialization has taken place. Also note
877 * that it is run for the first core on each node
880 u32 revision, platform;
884 printk(BIOS_DEBUG, "cpuSetAMDPCI %02d", node);
887 revision = mctGetLogicalCPUID(node);
888 platform = get_platform_type();
890 AMD_SetupPSIVID_d(platform, node); /* Set PSIVID offset which is not table driven */
892 for(i = 0; i < ARRAY_SIZE(fam10_pci_default); i++) {
893 if ((fam10_pci_default[i].revision & revision) &&
894 (fam10_pci_default[i].platform & platform)) {
895 val = pci_read_config32(NODE_PCI(node,
896 fam10_pci_default[i].function),
897 fam10_pci_default[i].offset);
898 val &= ~fam10_pci_default[i].mask;
899 val |= fam10_pci_default[i].data;
900 pci_write_config32(NODE_PCI(node,
901 fam10_pci_default[i].function),
902 fam10_pci_default[i].offset, val);
906 for(i = 0; i < ARRAY_SIZE(fam10_htphy_default); i++) {
907 if ((fam10_htphy_default[i].revision & revision) &&
908 (fam10_htphy_default[i].platform & platform)) {
909 /* HT Phy settings either apply to both sublinks or have
910 * separate registers for sublink zero and one, so there
911 * will be two table entries. So, here we only loop
912 cd t * through the sublink zeros in function zero.
914 for (j = 0; j < 4; j++) {
915 if (AMD_CpuFindCapability(node, j, &offset)) {
916 if (AMD_checkLinkType(node, j, offset)
917 & fam10_htphy_default[i].linktype) {
918 AMD_SetHtPhyRegister(node, j, i);
921 /* No more capabilities,
930 /* FIXME: add UMA support and programXbarToSriReg(); */
932 AMD_Errata281(node, revision, platform);
934 /* FIXME: if the dct phy doesn't init correct it needs to reset.
935 if (revision & (AMD_DR_B2 | AMD_DR_B3))
938 printk(BIOS_DEBUG, " done\n");
942 void cpuInitializeMCA(void)
944 /* Clears Machine Check Architecture (MCA) registers, which power on
945 * containing unknown data, on currently running processor.
946 * This routine should only be executed on initial power on (cold boot),
947 * not across a warm reset because valid data is present at that time.
954 if (cpuid_edx(1) & 0x4080) { /* MCE and MCA (edx[7] and edx[14]) */
955 msr = rdmsr(MCG_CAP);
956 if (msr.lo & MCG_CTL_P){ /* MCG_CTL_P bit is set? */
959 msr.lo <<= 2; /* multiply the count by 4 */
960 reg = MC0_STA + msr.lo;
962 for (i=0; i < 4; i++) {
964 reg -=4; /* Touch status regs for each bank */
972 * finalize_node_setup()
974 * Do any additional post HT init
977 void finalize_node_setup(struct sys_info *sysinfo)
980 u8 nodes = get_nodes();
983 #if RAMINIT_SYSINFO == 1
984 /* read Node0 F0_0x64 bit [8:10] to find out SbLink # */
985 reg = pci_read_config32(NODE_HT(0), 0x64);
986 sysinfo->sblk = (reg>>8) & 7;
988 sysinfo->nodes = nodes;
989 sysinfo->sbdn = get_sbdn(sysinfo->sbbusn);
993 for (i = 0; i < nodes; i++) {
997 #if FAM10_SET_FIDVID == 1
998 // Prep each node for FID/VID setup.
1002 #if CONFIG_MAX_PHYSICAL_CPUS > 1
1003 /* Skip the BSP, start at node 1 */
1004 for(i=1; i<nodes; i++) {
1005 setup_remote_node(i);