2 * This file is part of the coreboot project.
4 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 //it takes the CONFIG_ENABLE_APIC_EXT_ID and CONFIG_APIC_ID_OFFSET and CONFIG_LIFT_BSP_APIC_ID
27 #ifndef SET_FIDVID_CORE0_ONLY
28 /* MSR FIDVID_CTL and FIDVID_STATUS are shared by cores,
29 Need to do every AP to set common FID/VID */
30 #define SET_FIDVID_CORE0_ONLY 0
33 static void prep_fid_change(void);
34 static void init_fidvid_stage2(u32 apicid, u32 nodeid);
35 void cpuSetAMDMSR(void);
37 #if CONFIG_PCI_IO_CFG_EXT == 1
38 static void set_EnableCf8ExtCfg(void)
40 // set the NB_CFG[46]=1;
42 msr = rdmsr(NB_CFG_MSR);
43 // EnableCf8ExtCfg: We need that to access CONFIG_PCI_IO_CFG_EXT 4K range
44 msr.hi |= (1 << (46 - 32));
45 wrmsr(NB_CFG_MSR, msr);
48 static void set_EnableCf8ExtCfg(void) { }
52 #define PCI_MMIO_BASE 0xfe000000
53 /* because we will use gs to store hi, so need to make sure lo can start
54 from 0, So PCI_MMIO_BASE & 0x00ffffff should be equal to 0*/
56 static void set_pci_mmio_conf_reg(void)
58 #if CONFIG_MMCONF_SUPPORT
60 msr = rdmsr(0xc0010058);
61 msr.lo &= ~(0xfff00000 | (0xf << 2));
62 // 256 bus per segment, MMIO reg will be 4G , enable MMIO Config space
63 msr.lo |= ((8 + CONFIG_PCI_BUS_SEGN_BITS) << 2) | (1 << 0);
64 msr.hi &= ~(0x0000ffff);
65 msr.hi |= (PCI_MMIO_BASE >> (32 - 8));
66 wrmsr(0xc0010058, msr); // MMIO Config Base Address Reg
68 //mtrr for that range?
69 // set_var_mtrr_x(7, PCI_MMIO_BASE<<8, PCI_MMIO_BASE>>(32-8), 0x00000000, 0x01, MTRR_TYPE_UNCACHEABLE);
73 msr.hi = (PCI_MMIO_BASE >> (32 - 8));
75 wrmsr(0xc0000101, msr); //GS_Base Reg
80 typedef void (*process_ap_t) (u32 apicid, void *gp);
82 //core_range = 0 : all cores
83 //core range = 1 : core 0 only
84 //core range = 2 : cores other than core0
86 static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
89 // here assume the OS don't change our apicid
100 /* get_nodes define in ht_wrapper.c */
103 disable_siblings = !CONFIG_LOGICAL_CPUS;
105 #if CONFIG_LOGICAL_CPUS == 1 && CONFIG_HAVE_OPTION_TABLE == 1
106 if (read_option(CMOS_VSTART_multi_core, CMOS_VLEN_multi_core, 0) != 0) { // 0 mean multi core
107 disable_siblings = 1;
111 /* Assume that all node are same stepping, otherwise we can use use
112 nb_cfg_54 from bsp for all nodes */
113 nb_cfg_54 = read_nb_cfg_54();
115 ApicIdCoreIdSize = (cpuid_ecx(0x80000008) >> 12 & 0xf);
116 if (ApicIdCoreIdSize) {
117 siblings = ((1 << ApicIdCoreIdSize) - 1);
119 siblings = 3; //quad core
122 for (i = 0; i < nodes; i++) {
123 cores_found = get_core_num_in_bsp(i);
127 if (core_range == 2) {
133 if (disable_siblings || (core_range == 1)) {
139 for (j = jstart; j <= jend; j++) {
141 i * (nb_cfg_54 ? (siblings + 1) : 1) +
142 j * (nb_cfg_54 ? 1 : 64);
144 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
145 #if CONFIG_LIFT_BSP_APIC_ID == 0
146 if ((i != 0) || (j != 0)) /* except bsp */
148 ap_apicid += CONFIG_APIC_ID_OFFSET;
151 if (ap_apicid == bsp_apicid)
154 process_ap(ap_apicid, gp);
160 static inline int lapic_remote_read(int apicid, int reg, u32 *pvalue)
165 lapic_wait_icr_idle();
166 lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
167 lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
169 /* Extra busy check compared to lapic.h */
172 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
173 } while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
177 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
178 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
182 if (status == LAPIC_ICR_RR_VALID) {
183 *pvalue = lapic_read(LAPIC_RRR);
189 /* Use the LAPIC timer count register to hold each cores init status */
190 #define LAPIC_MSG_REG 0x380
193 static void init_fidvid_ap(u32 bsp_apicid, u32 apicid, u32 nodeid, u32 coreid);
196 static inline __attribute__ ((always_inline))
197 void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id,
201 "%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str,
202 apicid, id.nodeid, id.coreid);
205 static u32 wait_cpu_state(u32 apicid, u32 state)
211 if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0)
213 if ((readback & 0x3f) == state) {
215 break; //target cpu is in stage started
227 static void wait_ap_started(u32 ap_apicid, void *gp)
230 timeout = wait_cpu_state(ap_apicid, 0x13); // started
231 printk(BIOS_DEBUG, "* AP %02x", ap_apicid);
233 printk(BIOS_DEBUG, " timed out:%08x\n", timeout);
235 printk(BIOS_DEBUG, "started\n");
239 static void wait_all_other_cores_started(u32 bsp_apicid)
241 // all aps other than core0
242 printk(BIOS_DEBUG, "started ap apicid: ");
243 for_each_ap(bsp_apicid, 2, wait_ap_started, (void *)0);
244 printk(BIOS_DEBUG, "\n");
247 static void allow_all_aps_stop(u32 bsp_apicid)
249 /* Called by the BSP to indicate AP can stop */
251 /* FIXME Do APs use this?
252 Looks like wait_till_sysinfo_in_ram is used instead. */
254 // allow aps to stop use 6 bits for state
255 lapic_write(LAPIC_MSG_REG, (bsp_apicid << 24) | 0x14);
258 static void enable_apic_ext_id(u32 node)
262 val = pci_read_config32(NODE_HT(node), 0x68);
263 val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
264 pci_write_config32(NODE_HT(node), 0x68, val);
267 static void STOP_CAR_AND_CPU(void)
271 /* Disable L2 IC to L3 connection (Only for CAR) */
272 msr = rdmsr(BU_CFG2);
273 msr.lo &= ~(1 << ClLinesToNbDis);
276 disable_cache_as_ram(); // inline
277 /* stop all cores except node0/core0 the bsp .... */
281 #if RAMINIT_SYSINFO == 1
282 static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo)
284 static u32 init_cpus(u32 cpu_init_detectedx)
289 struct node_core_id id;
292 * already set early mtrr in cache_as_ram.inc
295 /* enable access pci conf via mmio */
296 set_pci_mmio_conf_reg();
298 /* that is from initial apicid, we need nodeid and coreid
300 id = get_node_core_id_x();
302 /* NB_CFG MSR is shared between cores, so we need make sure
303 core0 is done at first --- use wait_all_core0_started */
304 if (id.coreid == 0) {
305 set_apicid_cpuid_lo(); /* only set it on core0 */
306 set_EnableCf8ExtCfg(); /* only set it on core0 */
307 #if (CONFIG_ENABLE_APIC_EXT_ID == 1)
308 enable_apic_ext_id(id.nodeid);
314 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
315 u32 initial_apicid = get_initial_apicid();
317 #if CONFIG_LIFT_BSP_APIC_ID == 0
318 if (initial_apicid != 0) // other than bsp
321 /* use initial apic id to lift it */
322 u32 dword = lapic_read(LAPIC_ID);
323 dword &= ~(0xff << 24);
325 (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
327 lapic_write(LAPIC_ID, dword);
329 #if CONFIG_LIFT_BSP_APIC_ID == 1
330 bsp_apicid += CONFIG_APIC_ID_OFFSET;
335 /* get the apicid, it may be lifted already */
338 // show our apicid, nodeid, and coreid
339 if (id.coreid == 0) {
340 if (id.nodeid != 0) //all core0 except bsp
341 print_apicid_nodeid_coreid(apicid, id, " core0: ");
342 } else { //all other cores
343 print_apicid_nodeid_coreid(apicid, id, " corex: ");
346 if (cpu_init_detectedx) {
347 print_apicid_nodeid_coreid(apicid, id,
348 "\n\n\nINIT detected from ");
349 printk(BIOS_DEBUG, "\nIssuing SOFT_RESET...\n");
353 if (id.coreid == 0) {
354 if (!(warm_reset_detect(id.nodeid))) //FIXME: INIT is checked above but check for more resets?
355 distinguish_cpu_resets(id.nodeid); // Also indicates we are started
357 // Mark the core as started.
358 lapic_write(LAPIC_MSG_REG, (apicid << 24) | 0x13);
360 if (apicid != bsp_apicid) {
361 /* Setup each AP's cores MSRs.
362 * This happens after HTinit.
363 * The BSP runs this code in it's own path.
365 update_microcode(cpuid_eax(1));
369 #if (CONFIG_LOGICAL_CPUS == 1) && (SET_FIDVID_CORE0_ONLY == 1)
370 // Run on all AP for proper FID/VID setup.
371 if (id.coreid == 0) // only need set fid for core0
374 // check warm(bios) reset to call stage2 otherwise do stage1
375 if (warm_reset_detect(id.nodeid)) {
377 "init_fidvid_stage2 apicid: %02x\n",
379 init_fidvid_stage2(apicid, id.nodeid);
382 "init_fidvid_ap(stage1) apicid: %02x\n",
384 init_fidvid_ap(bsp_apicid, apicid, id.nodeid,
390 /* AP is ready, Wait for the BSP to get memory configured */
391 /* FIXME: many cores spinning on node0 pci register seems to be bad.
392 * Why do we need to wait? These APs are just going to go sit in a hlt.
394 //wait_till_sysinfo_in_ram();
396 set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
400 "\nAP %02x should be halted but you are reading this....\n",
407 static u32 is_core0_started(u32 nodeid)
411 device = NODE_PCI(nodeid, 0);
412 htic = pci_read_config32(device, HT_INIT_CONTROL);
413 htic &= HTIC_ColdR_Detect;
417 static void wait_all_core0_started(void)
419 /* When core0 is started, it will distingush_cpu_resets
420 * So wait for that to finish */
422 u32 nodes = get_nodes();
424 printk(BIOS_DEBUG, "core0 started: ");
425 for (i = 1; i < nodes; i++) { // skip bsp, because it is running on bsp
426 while (!is_core0_started(i)) {
428 printk(BIOS_DEBUG, " %02x", i);
430 printk(BIOS_DEBUG, "\n");
433 #if CONFIG_MAX_PHYSICAL_CPUS > 1
435 * void start_node(u32 node)
437 * start the core0 in node, so it can generate HT packet to feature code.
439 * This function starts the AP nodes core0s. wait_all_core0_started() in
440 * romstage.c waits for all the AP to be finished before continuing
443 static void start_node(u8 node)
447 /* Enable routing table */
448 printk(BIOS_DEBUG, "Start node %02x", node);
450 #if CONFIG_NORTHBRIDGE_AMD_AMDFAM10
451 /* For FAM10 support, we need to set Dram base/limit for the new node */
452 pci_write_config32(NODE_MP(node), 0x44, 0);
453 pci_write_config32(NODE_MP(node), 0x40, 3);
456 /* Allow APs to make requests (ROM fetch) */
457 val = pci_read_config32(NODE_HT(node), 0x6c);
459 pci_write_config32(NODE_HT(node), 0x6c, val);
461 printk(BIOS_DEBUG, " done.\n");
465 * static void setup_remote_node(u32 node)
467 * Copy the BSP Adress Map to each AP.
469 static void setup_remote_node(u8 node)
471 /* There registers can be used with F1x114_x Address Map at the
472 same time, So must set them even 32 node */
473 static const u16 pci_reg[] = {
474 /* DRAM Base/Limits Registers */
475 0x44, 0x4c, 0x54, 0x5c, 0x64, 0x6c, 0x74, 0x7c,
476 0x40, 0x48, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78,
477 0x144, 0x14c, 0x154, 0x15c, 0x164, 0x16c, 0x174, 0x17c,
478 0x140, 0x148, 0x150, 0x158, 0x160, 0x168, 0x170, 0x178,
479 /* MMIO Base/Limits Registers */
480 0x84, 0x8c, 0x94, 0x9c, 0xa4, 0xac, 0xb4, 0xbc,
481 0x80, 0x88, 0x90, 0x98, 0xa0, 0xa8, 0xb0, 0xb8,
482 /* IO Base/Limits Registers */
483 0xc4, 0xcc, 0xd4, 0xdc,
484 0xc0, 0xc8, 0xd0, 0xd8,
485 /* Configuration Map Registers */
486 0xe0, 0xe4, 0xe8, 0xec,
490 printk(BIOS_DEBUG, "setup_remote_node: %02x", node);
492 /* copy the default resource map from node 0 */
493 for (i = 0; i < ARRAY_SIZE(pci_reg); i++) {
497 value = pci_read_config32(NODE_MP(0), reg);
498 pci_write_config32(NODE_MP(node), reg, value);
501 printk(BIOS_DEBUG, " done\n");
503 #endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */
505 static void AMD_Errata281(u8 node, u32 revision, u32 platform)
507 /* Workaround for Transaction Scheduling Conflict in
508 * Northbridge Cross Bar. Implement XCS Token adjustment
509 * for ganged links. Also, perform fix up for the mixed
516 u8 nodes = get_nodes();
518 if (platform & AMD_PTYPE_SVR) {
519 /* For each node we need to check for a "broken" node */
520 if (!(revision & (AMD_DR_B0 | AMD_DR_B1))) {
521 for (i = 0; i < nodes; i++) {
522 if (mctGetLogicalCPUID(i) &
523 (AMD_DR_B0 | AMD_DR_B1)) {
530 if ((revision & (AMD_DR_B0 | AMD_DR_B1)) || mixed) {
532 /* F0X68[22:21] DsNpReqLmt0 = 01b */
533 val = pci_read_config32(NODE_PCI(node, 0), 0x68);
536 pci_write_config32(NODE_PCI(node, 0), 0x68, val);
539 val = pci_read_config32(NODE_PCI(node, 3), 0x6C);
542 pci_write_config32(NODE_PCI(node, 3), 0x6C, val);
545 val = pci_read_config32(NODE_PCI(node, 3), 0x7C);
548 pci_write_config32(NODE_PCI(node, 3), 0x7C, val);
550 /* F3X144[3:0] RspTok = 0001b */
551 val = pci_read_config32(NODE_PCI(node, 3), 0x144);
554 pci_write_config32(NODE_PCI(node, 3), 0x144, val);
556 for (i = 0; i < 3; i++) {
557 reg = 0x148 + (i * 4);
558 val = pci_read_config32(NODE_PCI(node, 3), reg);
561 pci_write_config32(NODE_PCI(node, 3), reg, val);
567 static void AMD_Errata298(void)
569 /* Workaround for L2 Eviction May Occur during operation to
570 * set Accessed or dirty bit.
576 u8 nodes = get_nodes();
578 /* For each core we need to check for a "broken" node */
579 for (i = 0; i < nodes; i++) {
580 if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_B2)) {
588 msr.lo |= 0x08; /* Set TlbCacheDis bit[3] */
592 msr.lo |= 0x02; /* Set TlbForceMemTypeUc bit[1] */
595 msr = rdmsr(OSVW_ID_Length);
596 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
597 wrmsr(OSVW_ID_Length, msr);
599 msr = rdmsr(OSVW_Status);
600 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
601 wrmsr(OSVW_Status, msr);
604 if (!affectedRev && (mctGetLogicalCPUID(0xFF) & AMD_DR_B3)) {
605 msr = rdmsr(OSVW_ID_Length);
606 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
607 wrmsr(OSVW_ID_Length, msr);
612 static u32 get_platform_type(void)
616 switch (SYSTEM_TYPE) {
618 ret |= AMD_PTYPE_DSK;
621 ret |= AMD_PTYPE_MOB;
624 ret |= AMD_PTYPE_SVR;
630 /* FIXME: add UMA support. */
632 /* All Fam10 are multi core */
638 static void AMD_SetupPSIVID_d(u32 platform_type, u8 node)
644 if (platform_type & (AMD_PTYPE_MOB | AMD_PTYPE_DSK)) {
646 /* The following code sets the PSIVID to the lowest support P state
647 * assuming that the VID for the lowest power state is below
648 * the VDD voltage regulator threshold. (This also assumes that there
649 * is a Pstate lower than P0)
652 for (i = 4; i >= 0; i--) {
653 msr = rdmsr(PS_REG_BASE + i);
655 if (msr.hi & PS_EN_MASK) {
656 dword = pci_read_config32(NODE_PCI(i, 3), 0xA0);
658 dword |= (msr.lo >> 9) & 0x7F;
659 pci_write_config32(NODE_PCI(i, 3), 0xA0, dword);
667 * AMD_CpuFindCapability - Traverse PCI capability list to find host HT links.
668 * HT Phy operations are not valid on links that aren't present, so this
669 * prevents invalid accesses.
671 * Returns the offset of the link register.
673 static BOOL AMD_CpuFindCapability(u8 node, u8 cap_count, u8 * offset)
678 /* get start of CPU HT Host Capabilities */
679 val = pci_read_config32(NODE_PCI(node, 0), 0x34);
680 val &= 0xFF; //reg offset of first link
684 /* Traverse through the capabilities. */
686 reg = pci_read_config32(NODE_PCI(node, 0), val);
687 /* Is the capability block a HyperTransport capability block? */
688 if ((reg & 0xFF) == 0x08) {
689 /* Is the HT capability block an HT Host Capability? */
690 if ((reg & 0xE0000000) == (1 << 29))
695 val = (reg >> 8) & 0xFF; //update reg offset
696 } while (cap_count && val);
700 /* If requested capability found val != 0 */
708 * AMD_checkLinkType - Compare desired link characteristics using a logical
711 * Returns the link characteristic mask.
713 static u32 AMD_checkLinkType(u8 node, u8 link, u8 regoff)
718 /* Check connect, init and coherency */
719 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x18);
723 linktype |= HTPHY_LINKTYPE_COHERENT;
726 linktype |= HTPHY_LINKTYPE_NONCOHERENT;
730 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x08);
732 if (((val >> 8) & 0x0F) > 6)
733 linktype |= HTPHY_LINKTYPE_HT3;
735 linktype |= HTPHY_LINKTYPE_HT1;
738 val = pci_read_config32(NODE_PCI(node, 0), (link << 2) + 0x170);
741 linktype |= HTPHY_LINKTYPE_GANGED;
743 linktype |= HTPHY_LINKTYPE_UNGANGED;
749 * AMD_SetHtPhyRegister - Use the HT link's HT Phy portal registers to update
750 * a phy setting for that link.
752 static void AMD_SetHtPhyRegister(u8 node, u8 link, u8 entry)
758 /* Determine this link's portal */
762 phyBase = ((u32) link << 3) | 0x180;
764 /* Get the portal control register's initial value
765 * and update it to access the desired phy register
767 phyReg = pci_read_config32(NODE_PCI(node, 4), phyBase);
769 if (fam10_htphy_default[entry].htreg > 0x1FF) {
770 phyReg &= ~HTPHY_DIRECT_OFFSET_MASK;
771 phyReg |= HTPHY_DIRECT_MAP;
773 phyReg &= ~HTPHY_OFFSET_MASK;
776 /* Now get the current phy register data
777 * LinkPhyDone = 0, LinkPhyWrite = 0 is a read
779 phyReg |= fam10_htphy_default[entry].htreg;
780 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
783 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
784 } while (!(val & HTPHY_IS_COMPLETE_MASK));
786 /* Now we have the phy register data, apply the change */
787 val = pci_read_config32(NODE_PCI(node, 4), phyBase + 4);
788 val &= ~fam10_htphy_default[entry].mask;
789 val |= fam10_htphy_default[entry].data;
790 pci_write_config32(NODE_PCI(node, 4), phyBase + 4, val);
792 /* write it through the portal to the phy
793 * LinkPhyDone = 0, LinkPhyWrite = 1 is a write
795 phyReg |= HTPHY_WRITE_CMD;
796 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
799 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
800 } while (!(val & HTPHY_IS_COMPLETE_MASK));
803 void cpuSetAMDMSR(void)
805 /* This routine loads the CPU with default settings in fam10_msr_default
806 * table . It must be run after Cache-As-RAM has been enabled, and
807 * Hypertransport initialization has taken place. Also note
808 * that it is run on the current processor only, and only for the current
813 u32 revision, platform;
815 printk(BIOS_DEBUG, "cpuSetAMDMSR ");
817 revision = mctGetLogicalCPUID(0xFF);
818 platform = get_platform_type();
820 for (i = 0; i < ARRAY_SIZE(fam10_msr_default); i++) {
821 if ((fam10_msr_default[i].revision & revision) &&
822 (fam10_msr_default[i].platform & platform)) {
823 msr = rdmsr(fam10_msr_default[i].msr);
824 msr.hi &= ~fam10_msr_default[i].mask_hi;
825 msr.hi |= fam10_msr_default[i].data_hi;
826 msr.lo &= ~fam10_msr_default[i].mask_lo;
827 msr.lo |= fam10_msr_default[i].data_lo;
828 wrmsr(fam10_msr_default[i].msr, msr);
833 printk(BIOS_DEBUG, " done\n");
836 static void cpuSetAMDPCI(u8 node)
838 /* This routine loads the CPU with default settings in fam10_pci_default
839 * table . It must be run after Cache-As-RAM has been enabled, and
840 * Hypertransport initialization has taken place. Also note
841 * that it is run for the first core on each node
844 u32 revision, platform;
848 printk(BIOS_DEBUG, "cpuSetAMDPCI %02d", node);
850 revision = mctGetLogicalCPUID(node);
851 platform = get_platform_type();
853 AMD_SetupPSIVID_d(platform, node); /* Set PSIVID offset which is not table driven */
855 for (i = 0; i < ARRAY_SIZE(fam10_pci_default); i++) {
856 if ((fam10_pci_default[i].revision & revision) &&
857 (fam10_pci_default[i].platform & platform)) {
858 val = pci_read_config32(NODE_PCI(node,
859 fam10_pci_default[i].
861 fam10_pci_default[i].offset);
862 val &= ~fam10_pci_default[i].mask;
863 val |= fam10_pci_default[i].data;
864 pci_write_config32(NODE_PCI(node,
865 fam10_pci_default[i].
867 fam10_pci_default[i].offset, val);
871 for (i = 0; i < ARRAY_SIZE(fam10_htphy_default); i++) {
872 if ((fam10_htphy_default[i].revision & revision) &&
873 (fam10_htphy_default[i].platform & platform)) {
874 /* HT Phy settings either apply to both sublinks or have
875 * separate registers for sublink zero and one, so there
876 * will be two table entries. So, here we only loop
877 cd t * through the sublink zeros in function zero.
879 for (j = 0; j < 4; j++) {
880 if (AMD_CpuFindCapability(node, j, &offset)) {
881 if (AMD_checkLinkType(node, j, offset)
882 & fam10_htphy_default[i].linktype) {
883 AMD_SetHtPhyRegister(node, j,
887 /* No more capabilities,
896 /* FIXME: add UMA support and programXbarToSriReg(); */
898 AMD_Errata281(node, revision, platform);
900 /* FIXME: if the dct phy doesn't init correct it needs to reset.
901 if (revision & (AMD_DR_B2 | AMD_DR_B3))
904 printk(BIOS_DEBUG, " done\n");
907 static void cpuInitializeMCA(void)
909 /* Clears Machine Check Architecture (MCA) registers, which power on
910 * containing unknown data, on currently running processor.
911 * This routine should only be executed on initial power on (cold boot),
912 * not across a warm reset because valid data is present at that time.
919 if (cpuid_edx(1) & 0x4080) { /* MCE and MCA (edx[7] and edx[14]) */
920 msr = rdmsr(MCG_CAP);
921 if (msr.lo & MCG_CTL_P) { /* MCG_CTL_P bit is set? */
924 msr.lo <<= 2; /* multiply the count by 4 */
925 reg = MC0_STA + msr.lo;
927 for (i = 0; i < 4; i++) {
929 reg -= 4; /* Touch status regs for each bank */
936 * finalize_node_setup()
938 * Do any additional post HT init
941 static void finalize_node_setup(struct sys_info *sysinfo)
944 u8 nodes = get_nodes();
947 #if RAMINIT_SYSINFO == 1
948 /* read Node0 F0_0x64 bit [8:10] to find out SbLink # */
949 reg = pci_read_config32(NODE_HT(0), 0x64);
950 sysinfo->sblk = (reg >> 8) & 7;
952 sysinfo->nodes = nodes;
953 sysinfo->sbdn = get_sbdn(sysinfo->sbbusn);
956 for (i = 0; i < nodes; i++) {
961 // Prep each node for FID/VID setup.
965 #if CONFIG_MAX_PHYSICAL_CPUS > 1
966 /* Skip the BSP, start at node 1 */
967 for (i = 1; i < nodes; i++) {
968 setup_remote_node(i);