2 * This file is part of the coreboot project.
4 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <cpu/x86/lapic.h>
23 #include <cpu/x86/mtrr.h>
24 #include <northbridge/amd/amdfam10/amdfam10.h>
25 #include <northbridge/amd/amdht/AsPsDefs.h>
26 #include <northbridge/amd/amdht/porting.h>
28 //it takes the CONFIG_ENABLE_APIC_EXT_ID and CONFIG_APIC_ID_OFFSET and CONFIG_LIFT_BSP_APIC_ID
33 #ifndef SET_FIDVID_CORE0_ONLY
34 /* MSR FIDVID_CTL and FIDVID_STATUS are shared by cores,
35 Need to do every AP to set common FID/VID */
36 #define SET_FIDVID_CORE0_ONLY 0
39 static void prep_fid_change(void);
40 static void init_fidvid_stage2(u32 apicid, u32 nodeid);
41 void cpuSetAMDMSR(void);
43 #if CONFIG_PCI_IO_CFG_EXT == 1
44 static void set_EnableCf8ExtCfg(void)
46 // set the NB_CFG[46]=1;
48 msr = rdmsr(NB_CFG_MSR);
49 // EnableCf8ExtCfg: We need that to access CONFIG_PCI_IO_CFG_EXT 4K range
50 msr.hi |= (1 << (46 - 32));
51 wrmsr(NB_CFG_MSR, msr);
54 static void set_EnableCf8ExtCfg(void) { }
58 #define PCI_MMIO_BASE 0xfe000000
59 /* because we will use gs to store hi, so need to make sure lo can start
60 from 0, So PCI_MMIO_BASE & 0x00ffffff should be equal to 0*/
62 static void set_pci_mmio_conf_reg(void)
64 #if CONFIG_MMCONF_SUPPORT
66 msr = rdmsr(0xc0010058);
67 msr.lo &= ~(0xfff00000 | (0xf << 2));
68 // 256 bus per segment, MMIO reg will be 4G , enable MMIO Config space
69 msr.lo |= ((8 + CONFIG_PCI_BUS_SEGN_BITS) << 2) | (1 << 0);
70 msr.hi &= ~(0x0000ffff);
71 msr.hi |= (PCI_MMIO_BASE >> (32 - 8));
72 wrmsr(0xc0010058, msr); // MMIO Config Base Address Reg
74 //mtrr for that range?
75 // set_var_mtrr_x(7, PCI_MMIO_BASE<<8, PCI_MMIO_BASE>>(32-8), 0x00000000, 0x01, MTRR_TYPE_UNCACHEABLE);
79 msr.hi = (PCI_MMIO_BASE >> (32 - 8));
81 wrmsr(0xc0000101, msr); //GS_Base Reg
86 typedef void (*process_ap_t) (u32 apicid, void *gp);
88 //core_range = 0 : all cores
89 //core range = 1 : core 0 only
90 //core range = 2 : cores other than core0
92 static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
95 // here assume the OS don't change our apicid
100 u32 disable_siblings;
104 u32 ApicIdCoreIdSize;
106 /* get_nodes define in ht_wrapper.c */
109 disable_siblings = !CONFIG_LOGICAL_CPUS;
111 #if CONFIG_LOGICAL_CPUS == 1 && CONFIG_HAVE_OPTION_TABLE == 1
112 if (read_option(CMOS_VSTART_multi_core, CMOS_VLEN_multi_core, 0) != 0) { // 0 mean multi core
113 disable_siblings = 1;
117 /* Assume that all node are same stepping, otherwise we can use use
118 nb_cfg_54 from bsp for all nodes */
119 nb_cfg_54 = read_nb_cfg_54();
121 ApicIdCoreIdSize = (cpuid_ecx(0x80000008) >> 12 & 0xf);
122 if (ApicIdCoreIdSize) {
123 siblings = ((1 << ApicIdCoreIdSize) - 1);
125 siblings = 3; //quad core
128 for (i = 0; i < nodes; i++) {
129 cores_found = get_core_num_in_bsp(i);
133 if (core_range == 2) {
139 if (disable_siblings || (core_range == 1)) {
145 for (j = jstart; j <= jend; j++) {
147 i * (nb_cfg_54 ? (siblings + 1) : 1) +
148 j * (nb_cfg_54 ? 1 : 64);
150 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
151 #if CONFIG_LIFT_BSP_APIC_ID == 0
152 if ((i != 0) || (j != 0)) /* except bsp */
154 ap_apicid += CONFIG_APIC_ID_OFFSET;
157 if (ap_apicid == bsp_apicid)
160 process_ap(ap_apicid, gp);
166 static inline int lapic_remote_read(int apicid, int reg, u32 *pvalue)
171 lapic_wait_icr_idle();
172 lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
173 lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
175 /* Extra busy check compared to lapic.h */
178 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
179 } while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
183 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
184 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
188 if (status == LAPIC_ICR_RR_VALID) {
189 *pvalue = lapic_read(LAPIC_RRR);
195 /* Use the LAPIC timer count register to hold each cores init status */
196 #define LAPIC_MSG_REG 0x380
199 static void init_fidvid_ap(u32 bsp_apicid, u32 apicid, u32 nodeid, u32 coreid);
202 static inline __attribute__ ((always_inline))
203 void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id,
207 "%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str,
208 apicid, id.nodeid, id.coreid);
211 static u32 wait_cpu_state(u32 apicid, u32 state)
217 if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0)
219 if ((readback & 0x3f) == state) {
221 break; //target cpu is in stage started
233 static void wait_ap_started(u32 ap_apicid, void *gp)
236 timeout = wait_cpu_state(ap_apicid, 0x13); // started
237 printk(BIOS_DEBUG, "* AP %02x", ap_apicid);
239 printk(BIOS_DEBUG, " timed out:%08x\n", timeout);
241 printk(BIOS_DEBUG, "started\n");
245 static void wait_all_other_cores_started(u32 bsp_apicid)
247 // all aps other than core0
248 printk(BIOS_DEBUG, "started ap apicid: ");
249 for_each_ap(bsp_apicid, 2, wait_ap_started, (void *)0);
250 printk(BIOS_DEBUG, "\n");
253 static void allow_all_aps_stop(u32 bsp_apicid)
255 /* Called by the BSP to indicate AP can stop */
257 /* FIXME Do APs use this?
258 Looks like wait_till_sysinfo_in_ram is used instead. */
260 // allow aps to stop use 6 bits for state
261 lapic_write(LAPIC_MSG_REG, (bsp_apicid << 24) | 0x14);
264 static void enable_apic_ext_id(u32 node)
268 val = pci_read_config32(NODE_HT(node), 0x68);
269 val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
270 pci_write_config32(NODE_HT(node), 0x68, val);
273 static void STOP_CAR_AND_CPU(void)
277 /* Disable L2 IC to L3 connection (Only for CAR) */
278 msr = rdmsr(BU_CFG2);
279 msr.lo &= ~(1 << ClLinesToNbDis);
282 disable_cache_as_ram(); // inline
283 /* stop all cores except node0/core0 the bsp .... */
287 #if RAMINIT_SYSINFO == 1
288 static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo)
290 static u32 init_cpus(u32 cpu_init_detectedx)
295 struct node_core_id id;
298 * already set early mtrr in cache_as_ram.inc
301 /* enable access pci conf via mmio */
302 set_pci_mmio_conf_reg();
304 /* that is from initial apicid, we need nodeid and coreid
306 id = get_node_core_id_x();
308 /* NB_CFG MSR is shared between cores, so we need make sure
309 core0 is done at first --- use wait_all_core0_started */
310 if (id.coreid == 0) {
311 set_apicid_cpuid_lo(); /* only set it on core0 */
312 set_EnableCf8ExtCfg(); /* only set it on core0 */
313 #if (CONFIG_ENABLE_APIC_EXT_ID == 1)
314 enable_apic_ext_id(id.nodeid);
320 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
321 u32 initial_apicid = get_initial_apicid();
323 #if CONFIG_LIFT_BSP_APIC_ID == 0
324 if (initial_apicid != 0) // other than bsp
327 /* use initial apic id to lift it */
328 u32 dword = lapic_read(LAPIC_ID);
329 dword &= ~(0xff << 24);
331 (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
333 lapic_write(LAPIC_ID, dword);
335 #if CONFIG_LIFT_BSP_APIC_ID == 1
336 bsp_apicid += CONFIG_APIC_ID_OFFSET;
341 /* get the apicid, it may be lifted already */
344 // show our apicid, nodeid, and coreid
345 if (id.coreid == 0) {
346 if (id.nodeid != 0) //all core0 except bsp
347 print_apicid_nodeid_coreid(apicid, id, " core0: ");
348 } else { //all other cores
349 print_apicid_nodeid_coreid(apicid, id, " corex: ");
352 if (cpu_init_detectedx) {
353 print_apicid_nodeid_coreid(apicid, id,
354 "\n\n\nINIT detected from ");
355 printk(BIOS_DEBUG, "\nIssuing SOFT_RESET...\n");
359 if (id.coreid == 0) {
360 if (!(warm_reset_detect(id.nodeid))) //FIXME: INIT is checked above but check for more resets?
361 distinguish_cpu_resets(id.nodeid); // Also indicates we are started
363 // Mark the core as started.
364 lapic_write(LAPIC_MSG_REG, (apicid << 24) | 0x13);
366 if (apicid != bsp_apicid) {
367 /* Setup each AP's cores MSRs.
368 * This happens after HTinit.
369 * The BSP runs this code in it's own path.
371 update_microcode(cpuid_eax(1));
375 #if (CONFIG_LOGICAL_CPUS == 1) && (SET_FIDVID_CORE0_ONLY == 1)
376 // Run on all AP for proper FID/VID setup.
377 if (id.coreid == 0) // only need set fid for core0
380 // check warm(bios) reset to call stage2 otherwise do stage1
381 if (warm_reset_detect(id.nodeid)) {
383 "init_fidvid_stage2 apicid: %02x\n",
385 init_fidvid_stage2(apicid, id.nodeid);
388 "init_fidvid_ap(stage1) apicid: %02x\n",
390 init_fidvid_ap(bsp_apicid, apicid, id.nodeid,
396 /* AP is ready, Wait for the BSP to get memory configured */
397 /* FIXME: many cores spinning on node0 pci register seems to be bad.
398 * Why do we need to wait? These APs are just going to go sit in a hlt.
400 //wait_till_sysinfo_in_ram();
402 set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
406 "\nAP %02x should be halted but you are reading this....\n",
413 static u32 is_core0_started(u32 nodeid)
417 device = NODE_PCI(nodeid, 0);
418 htic = pci_read_config32(device, HT_INIT_CONTROL);
419 htic &= HTIC_ColdR_Detect;
423 static void wait_all_core0_started(void)
425 /* When core0 is started, it will distingush_cpu_resets
426 * So wait for that to finish */
428 u32 nodes = get_nodes();
430 printk(BIOS_DEBUG, "core0 started: ");
431 for (i = 1; i < nodes; i++) { // skip bsp, because it is running on bsp
432 while (!is_core0_started(i)) {
434 printk(BIOS_DEBUG, " %02x", i);
436 printk(BIOS_DEBUG, "\n");
439 #if CONFIG_MAX_PHYSICAL_CPUS > 1
441 * void start_node(u32 node)
443 * start the core0 in node, so it can generate HT packet to feature code.
445 * This function starts the AP nodes core0s. wait_all_core0_started() in
446 * romstage.c waits for all the AP to be finished before continuing
449 static void start_node(u8 node)
453 /* Enable routing table */
454 printk(BIOS_DEBUG, "Start node %02x", node);
456 #if CONFIG_NORTHBRIDGE_AMD_AMDFAM10
457 /* For FAM10 support, we need to set Dram base/limit for the new node */
458 pci_write_config32(NODE_MP(node), 0x44, 0);
459 pci_write_config32(NODE_MP(node), 0x40, 3);
462 /* Allow APs to make requests (ROM fetch) */
463 val = pci_read_config32(NODE_HT(node), 0x6c);
465 pci_write_config32(NODE_HT(node), 0x6c, val);
467 printk(BIOS_DEBUG, " done.\n");
471 * static void setup_remote_node(u32 node)
473 * Copy the BSP Adress Map to each AP.
475 static void setup_remote_node(u8 node)
477 /* There registers can be used with F1x114_x Address Map at the
478 same time, So must set them even 32 node */
479 static const u16 pci_reg[] = {
480 /* DRAM Base/Limits Registers */
481 0x44, 0x4c, 0x54, 0x5c, 0x64, 0x6c, 0x74, 0x7c,
482 0x40, 0x48, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78,
483 0x144, 0x14c, 0x154, 0x15c, 0x164, 0x16c, 0x174, 0x17c,
484 0x140, 0x148, 0x150, 0x158, 0x160, 0x168, 0x170, 0x178,
485 /* MMIO Base/Limits Registers */
486 0x84, 0x8c, 0x94, 0x9c, 0xa4, 0xac, 0xb4, 0xbc,
487 0x80, 0x88, 0x90, 0x98, 0xa0, 0xa8, 0xb0, 0xb8,
488 /* IO Base/Limits Registers */
489 0xc4, 0xcc, 0xd4, 0xdc,
490 0xc0, 0xc8, 0xd0, 0xd8,
491 /* Configuration Map Registers */
492 0xe0, 0xe4, 0xe8, 0xec,
496 printk(BIOS_DEBUG, "setup_remote_node: %02x", node);
498 /* copy the default resource map from node 0 */
499 for (i = 0; i < ARRAY_SIZE(pci_reg); i++) {
503 value = pci_read_config32(NODE_MP(0), reg);
504 pci_write_config32(NODE_MP(node), reg, value);
507 printk(BIOS_DEBUG, " done\n");
509 #endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */
511 static void AMD_Errata281(u8 node, u32 revision, u32 platform)
513 /* Workaround for Transaction Scheduling Conflict in
514 * Northbridge Cross Bar. Implement XCS Token adjustment
515 * for ganged links. Also, perform fix up for the mixed
522 u8 nodes = get_nodes();
524 if (platform & AMD_PTYPE_SVR) {
525 /* For each node we need to check for a "broken" node */
526 if (!(revision & (AMD_DR_B0 | AMD_DR_B1))) {
527 for (i = 0; i < nodes; i++) {
528 if (mctGetLogicalCPUID(i) &
529 (AMD_DR_B0 | AMD_DR_B1)) {
536 if ((revision & (AMD_DR_B0 | AMD_DR_B1)) || mixed) {
538 /* F0X68[22:21] DsNpReqLmt0 = 01b */
539 val = pci_read_config32(NODE_PCI(node, 0), 0x68);
542 pci_write_config32(NODE_PCI(node, 0), 0x68, val);
545 val = pci_read_config32(NODE_PCI(node, 3), 0x6C);
548 pci_write_config32(NODE_PCI(node, 3), 0x6C, val);
551 val = pci_read_config32(NODE_PCI(node, 3), 0x7C);
554 pci_write_config32(NODE_PCI(node, 3), 0x7C, val);
556 /* F3X144[3:0] RspTok = 0001b */
557 val = pci_read_config32(NODE_PCI(node, 3), 0x144);
560 pci_write_config32(NODE_PCI(node, 3), 0x144, val);
562 for (i = 0; i < 3; i++) {
563 reg = 0x148 + (i * 4);
564 val = pci_read_config32(NODE_PCI(node, 3), reg);
567 pci_write_config32(NODE_PCI(node, 3), reg, val);
573 static void AMD_Errata298(void)
575 /* Workaround for L2 Eviction May Occur during operation to
576 * set Accessed or dirty bit.
582 u8 nodes = get_nodes();
584 /* For each core we need to check for a "broken" node */
585 for (i = 0; i < nodes; i++) {
586 if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_B2)) {
594 msr.lo |= 0x08; /* Set TlbCacheDis bit[3] */
598 msr.lo |= 0x02; /* Set TlbForceMemTypeUc bit[1] */
601 msr = rdmsr(OSVW_ID_Length);
602 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
603 wrmsr(OSVW_ID_Length, msr);
605 msr = rdmsr(OSVW_Status);
606 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
607 wrmsr(OSVW_Status, msr);
610 if (!affectedRev && (mctGetLogicalCPUID(0xFF) & AMD_DR_B3)) {
611 msr = rdmsr(OSVW_ID_Length);
612 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
613 wrmsr(OSVW_ID_Length, msr);
618 static u32 get_platform_type(void)
622 switch (SYSTEM_TYPE) {
624 ret |= AMD_PTYPE_DSK;
627 ret |= AMD_PTYPE_MOB;
630 ret |= AMD_PTYPE_SVR;
636 /* FIXME: add UMA support. */
638 /* All Fam10 are multi core */
644 static void AMD_SetupPSIVID_d(u32 platform_type, u8 node)
650 if (platform_type & (AMD_PTYPE_MOB | AMD_PTYPE_DSK)) {
652 /* The following code sets the PSIVID to the lowest support P state
653 * assuming that the VID for the lowest power state is below
654 * the VDD voltage regulator threshold. (This also assumes that there
655 * is a Pstate lower than P0)
658 for (i = 4; i >= 0; i--) {
659 msr = rdmsr(PS_REG_BASE + i);
661 if (msr.hi & PS_EN_MASK) {
662 dword = pci_read_config32(NODE_PCI(i, 3), 0xA0);
664 dword |= (msr.lo >> 9) & 0x7F;
665 pci_write_config32(NODE_PCI(i, 3), 0xA0, dword);
673 * AMD_CpuFindCapability - Traverse PCI capability list to find host HT links.
674 * HT Phy operations are not valid on links that aren't present, so this
675 * prevents invalid accesses.
677 * Returns the offset of the link register.
679 static BOOL AMD_CpuFindCapability(u8 node, u8 cap_count, u8 * offset)
684 /* get start of CPU HT Host Capabilities */
685 val = pci_read_config32(NODE_PCI(node, 0), 0x34);
686 val &= 0xFF; //reg offset of first link
690 /* Traverse through the capabilities. */
692 reg = pci_read_config32(NODE_PCI(node, 0), val);
693 /* Is the capability block a HyperTransport capability block? */
694 if ((reg & 0xFF) == 0x08) {
695 /* Is the HT capability block an HT Host Capability? */
696 if ((reg & 0xE0000000) == (1 << 29))
701 val = (reg >> 8) & 0xFF; //update reg offset
702 } while (cap_count && val);
706 /* If requested capability found val != 0 */
714 * AMD_checkLinkType - Compare desired link characteristics using a logical
717 * Returns the link characteristic mask.
719 static u32 AMD_checkLinkType(u8 node, u8 link, u8 regoff)
724 /* Check connect, init and coherency */
725 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x18);
729 linktype |= HTPHY_LINKTYPE_COHERENT;
732 linktype |= HTPHY_LINKTYPE_NONCOHERENT;
736 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x08);
738 if (((val >> 8) & 0x0F) > 6)
739 linktype |= HTPHY_LINKTYPE_HT3;
741 linktype |= HTPHY_LINKTYPE_HT1;
744 val = pci_read_config32(NODE_PCI(node, 0), (link << 2) + 0x170);
747 linktype |= HTPHY_LINKTYPE_GANGED;
749 linktype |= HTPHY_LINKTYPE_UNGANGED;
755 * AMD_SetHtPhyRegister - Use the HT link's HT Phy portal registers to update
756 * a phy setting for that link.
758 static void AMD_SetHtPhyRegister(u8 node, u8 link, u8 entry)
764 /* Determine this link's portal */
768 phyBase = ((u32) link << 3) | 0x180;
770 /* Get the portal control register's initial value
771 * and update it to access the desired phy register
773 phyReg = pci_read_config32(NODE_PCI(node, 4), phyBase);
775 if (fam10_htphy_default[entry].htreg > 0x1FF) {
776 phyReg &= ~HTPHY_DIRECT_OFFSET_MASK;
777 phyReg |= HTPHY_DIRECT_MAP;
779 phyReg &= ~HTPHY_OFFSET_MASK;
782 /* Now get the current phy register data
783 * LinkPhyDone = 0, LinkPhyWrite = 0 is a read
785 phyReg |= fam10_htphy_default[entry].htreg;
786 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
789 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
790 } while (!(val & HTPHY_IS_COMPLETE_MASK));
792 /* Now we have the phy register data, apply the change */
793 val = pci_read_config32(NODE_PCI(node, 4), phyBase + 4);
794 val &= ~fam10_htphy_default[entry].mask;
795 val |= fam10_htphy_default[entry].data;
796 pci_write_config32(NODE_PCI(node, 4), phyBase + 4, val);
798 /* write it through the portal to the phy
799 * LinkPhyDone = 0, LinkPhyWrite = 1 is a write
801 phyReg |= HTPHY_WRITE_CMD;
802 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
805 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
806 } while (!(val & HTPHY_IS_COMPLETE_MASK));
809 void cpuSetAMDMSR(void)
811 /* This routine loads the CPU with default settings in fam10_msr_default
812 * table . It must be run after Cache-As-RAM has been enabled, and
813 * Hypertransport initialization has taken place. Also note
814 * that it is run on the current processor only, and only for the current
819 u32 revision, platform;
821 printk(BIOS_DEBUG, "cpuSetAMDMSR ");
823 revision = mctGetLogicalCPUID(0xFF);
824 platform = get_platform_type();
826 for (i = 0; i < ARRAY_SIZE(fam10_msr_default); i++) {
827 if ((fam10_msr_default[i].revision & revision) &&
828 (fam10_msr_default[i].platform & platform)) {
829 msr = rdmsr(fam10_msr_default[i].msr);
830 msr.hi &= ~fam10_msr_default[i].mask_hi;
831 msr.hi |= fam10_msr_default[i].data_hi;
832 msr.lo &= ~fam10_msr_default[i].mask_lo;
833 msr.lo |= fam10_msr_default[i].data_lo;
834 wrmsr(fam10_msr_default[i].msr, msr);
839 printk(BIOS_DEBUG, " done\n");
842 static void cpuSetAMDPCI(u8 node)
844 /* This routine loads the CPU with default settings in fam10_pci_default
845 * table . It must be run after Cache-As-RAM has been enabled, and
846 * Hypertransport initialization has taken place. Also note
847 * that it is run for the first core on each node
850 u32 revision, platform;
854 printk(BIOS_DEBUG, "cpuSetAMDPCI %02d", node);
856 revision = mctGetLogicalCPUID(node);
857 platform = get_platform_type();
859 AMD_SetupPSIVID_d(platform, node); /* Set PSIVID offset which is not table driven */
861 for (i = 0; i < ARRAY_SIZE(fam10_pci_default); i++) {
862 if ((fam10_pci_default[i].revision & revision) &&
863 (fam10_pci_default[i].platform & platform)) {
864 val = pci_read_config32(NODE_PCI(node,
865 fam10_pci_default[i].
867 fam10_pci_default[i].offset);
868 val &= ~fam10_pci_default[i].mask;
869 val |= fam10_pci_default[i].data;
870 pci_write_config32(NODE_PCI(node,
871 fam10_pci_default[i].
873 fam10_pci_default[i].offset, val);
877 for (i = 0; i < ARRAY_SIZE(fam10_htphy_default); i++) {
878 if ((fam10_htphy_default[i].revision & revision) &&
879 (fam10_htphy_default[i].platform & platform)) {
880 /* HT Phy settings either apply to both sublinks or have
881 * separate registers for sublink zero and one, so there
882 * will be two table entries. So, here we only loop
883 cd t * through the sublink zeros in function zero.
885 for (j = 0; j < 4; j++) {
886 if (AMD_CpuFindCapability(node, j, &offset)) {
887 if (AMD_checkLinkType(node, j, offset)
888 & fam10_htphy_default[i].linktype) {
889 AMD_SetHtPhyRegister(node, j,
893 /* No more capabilities,
902 /* FIXME: add UMA support and programXbarToSriReg(); */
904 AMD_Errata281(node, revision, platform);
906 /* FIXME: if the dct phy doesn't init correct it needs to reset.
907 if (revision & (AMD_DR_B2 | AMD_DR_B3))
910 printk(BIOS_DEBUG, " done\n");
913 static void cpuInitializeMCA(void)
915 /* Clears Machine Check Architecture (MCA) registers, which power on
916 * containing unknown data, on currently running processor.
917 * This routine should only be executed on initial power on (cold boot),
918 * not across a warm reset because valid data is present at that time.
925 if (cpuid_edx(1) & 0x4080) { /* MCE and MCA (edx[7] and edx[14]) */
926 msr = rdmsr(MCG_CAP);
927 if (msr.lo & MCG_CTL_P) { /* MCG_CTL_P bit is set? */
930 msr.lo <<= 2; /* multiply the count by 4 */
931 reg = MC0_STA + msr.lo;
933 for (i = 0; i < 4; i++) {
935 reg -= 4; /* Touch status regs for each bank */
942 * finalize_node_setup()
944 * Do any additional post HT init
947 static void finalize_node_setup(struct sys_info *sysinfo)
950 u8 nodes = get_nodes();
953 #if RAMINIT_SYSINFO == 1
954 /* read Node0 F0_0x64 bit [8:10] to find out SbLink # */
955 reg = pci_read_config32(NODE_HT(0), 0x64);
956 sysinfo->sblk = (reg >> 8) & 7;
958 sysinfo->nodes = nodes;
959 sysinfo->sbdn = get_sbdn(sysinfo->sbbusn);
962 for (i = 0; i < nodes; i++) {
967 // Prep each node for FID/VID setup.
971 #if CONFIG_MAX_PHYSICAL_CPUS > 1
972 /* Skip the BSP, start at node 1 */
973 for (i = 1; i < nodes; i++) {
974 setup_remote_node(i);