2 * This file is part of the coreboot project.
4 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 //it takes the ENABLE_APIC_EXT_ID and APIC_ID_OFFSET and LIFT_BSP_APIC_ID
23 #ifndef FAM10_SET_FIDVID
24 #define FAM10_SET_FIDVID 1
27 #ifndef FAM10_SET_FIDVID_CORE0_ONLY
28 /* MSR FIDVID_CTL and FIDVID_STATUS are shared by cores,
29 Need to do every AP to set common FID/VID*/
30 #define FAM10_SET_FIDVID_CORE0_ONLY 0
33 static inline void print_initcpu8 (const char *strval, u8 val)
35 printk_debug("%s%02x\n", strval, val);
38 static inline void print_initcpu8_nocr (const char *strval, u8 val)
40 printk_debug("%s%02x", strval, val);
44 static inline void print_initcpu16 (const char *strval, u16 val)
46 printk_debug("%s%04x\n", strval, val);
50 static inline void print_initcpu(const char *strval, u32 val)
52 printk_debug("%s%08x\n", strval, val);
56 static void prep_fid_change(void);
57 static void init_fidvid_stage2(u32 apicid, u32 nodeid);
58 void cpuSetAMDMSR(void);
60 #if PCI_IO_CFG_EXT == 1
61 static inline void set_EnableCf8ExtCfg(void)
63 // set the NB_CFG[46]=1;
65 msr = rdmsr(NB_CFG_MSR);
66 // EnableCf8ExtCfg: We need that to access PCI_IO_CFG_EXT 4K range
67 msr.hi |= (1<<(46-32));
68 wrmsr(NB_CFG_MSR, msr);
71 static inline void set_EnableCf8ExtCfg(void) { }
76 #define PCI_MMIO_BASE 0xfe000000
77 /* because we will use gs to store hi, so need to make sure lo can start
78 from 0, So PCI_MMIO_BASE & 0x00ffffff should be equal to 0*/
80 static inline void set_pci_mmio_conf_reg(void)
84 msr = rdmsr(0xc0010058);
85 msr.lo &= ~(0xfff00000 | (0xf << 2));
86 // 256 bus per segment, MMIO reg will be 4G , enable MMIO Config space
87 msr.lo |= ((8+PCI_BUS_SEGN_BITS) << 2) | (1 << 0);
88 msr.hi &= ~(0x0000ffff);
89 msr.hi |= (PCI_MMIO_BASE >> (32-8));
90 wrmsr(0xc0010058, msr); // MMIO Config Base Address Reg
92 //mtrr for that range?
93 // set_var_mtrr_x(7, PCI_MMIO_BASE<<8, PCI_MMIO_BASE>>(32-8), 0x00000000, 0x01, MTRR_TYPE_UNCACHEABLE);
97 msr.hi = (PCI_MMIO_BASE >> (32-8));
99 wrmsr(0xc0000101, msr); //GS_Base Reg
107 typedef void (*process_ap_t)(u32 apicid, void *gp);
109 //core_range = 0 : all cores
110 //core range = 1 : core 0 only
111 //core range = 2 : cores other than core0
113 static void for_each_ap(u32 bsp_apicid, u32 core_range,
114 process_ap_t process_ap, void *gp)
116 // here assume the OS don't change our apicid
121 u32 disable_siblings;
125 u32 ApicIdCoreIdSize;
127 /* get_nodes define in ht_wrapper.c */
130 disable_siblings = !CONFIG_LOGICAL_CPUS;
132 #if CONFIG_LOGICAL_CPUS == 1
133 if(read_option(CMOS_VSTART_quad_core, CMOS_VLEN_quad_core, 0) != 0) { // 0 mean quad core
134 disable_siblings = 1;
138 /* Assume that all node are same stepping, otherwise we can use use
139 nb_cfg_54 from bsp for all nodes */
140 nb_cfg_54 = read_nb_cfg_54();
142 ApicIdCoreIdSize = (cpuid_ecx(0x80000008) >> 12 & 0xf);
143 if(ApicIdCoreIdSize) {
144 siblings = ((1 << ApicIdCoreIdSize) - 1);
146 siblings = 3; //quad core
149 for (i = 0; i < nodes; i++) {
150 cores_found = get_core_num_in_bsp(i);
154 if (core_range == 2) {
160 if (disable_siblings || (core_range==1)) {
167 for (j = jstart; j <= jend; j++) {
168 ap_apicid = i * (nb_cfg_54 ? (siblings + 1):1) + j * (nb_cfg_54 ? 1:64);
170 #if (ENABLE_APIC_EXT_ID == 1) && (APIC_ID_OFFSET > 0)
171 #if LIFT_BSP_APIC_ID == 0
172 if( (i != 0) || (j != 0)) /* except bsp */
174 ap_apicid += APIC_ID_OFFSET;
177 if(ap_apicid == bsp_apicid) continue;
179 process_ap(ap_apicid, gp);
185 /* FIXME: Duplicate of what is in lapic.h? */
186 static inline int lapic_remote_read(int apicid, int reg, u32 *pvalue)
191 lapic_wait_icr_idle();
192 lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
193 lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
197 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
198 } while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
202 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
203 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
207 if (status == LAPIC_ICR_RR_VALID) {
208 *pvalue = lapic_read(LAPIC_RRR);
215 /* Use the LAPIC timer count register to hold each cores init status */
216 #define LAPIC_MSG_REG 0x380
219 #if FAM10_SET_FIDVID == 1
220 static void init_fidvid_ap(u32 bsp_apicid, u32 apicid, u32 nodeid, u32 coreid);
223 static inline __attribute__((always_inline)) void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id, const char *str)
225 printk_debug("%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str, apicid, id.nodeid, id.coreid);
229 static unsigned wait_cpu_state(u32 apicid, u32 state)
235 if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0) continue;
236 if ((readback & 0x3f) == state) {
238 break; //target cpu is in stage started
251 static void wait_ap_started(u32 ap_apicid, void *gp )
254 timeout = wait_cpu_state(ap_apicid, 0x13); // started
256 print_initcpu8_nocr("* AP ", ap_apicid);
257 print_initcpu(" didn't start timeout:", timeout);
260 print_initcpu8_nocr("AP started: ", ap_apicid);
265 static void wait_all_other_cores_started(u32 bsp_apicid)
267 // all aps other than core0
268 print_debug("started ap apicid: ");
269 for_each_ap(bsp_apicid, 2 , wait_ap_started, (void *)0);
274 static void allow_all_aps_stop(u32 bsp_apicid)
276 /* Called by the BSP to indicate AP can stop */
278 /* FIXME Do APs use this?
279 Looks like wait_till_sysinfo_in_ram is used instead. */
281 // allow aps to stop use 6 bits for state
282 lapic_write(LAPIC_MSG_REG, (bsp_apicid << 24) | 0x14);
285 static void enable_apic_ext_id(u32 node)
289 val = pci_read_config32(NODE_HT(node), 0x68);
290 val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
291 pci_write_config32(NODE_HT(node), 0x68, val);
295 static void STOP_CAR_AND_CPU()
297 disable_cache_as_ram(); // inline
302 #ifndef MEM_TRAIN_SEQ
303 #define MEM_TRAIN_SEQ 0
306 #if RAMINIT_SYSINFO == 1
307 static u32 init_cpus(u32 cpu_init_detectedx ,struct sys_info *sysinfo)
309 static u32 init_cpus(u32 cpu_init_detectedx)
314 struct node_core_id id;
317 * already set early mtrr in cache_as_ram.inc
320 /* enable access pci conf via mmio*/
321 set_pci_mmio_conf_reg();
323 /* that is from initial apicid, we need nodeid and coreid
325 id = get_node_core_id_x();
327 /* NB_CFG MSR is shared between cores, so we need make sure
328 core0 is done at first --- use wait_all_core0_started */
330 set_apicid_cpuid_lo(); /* only set it on core0 */
331 set_EnableCf8ExtCfg(); /* only set it on core0 */
332 #if (ENABLE_APIC_EXT_ID == 1)
333 enable_apic_ext_id(id.nodeid);
340 #if (ENABLE_APIC_EXT_ID == 1) && (APIC_ID_OFFSET > 0)
341 u32 initial_apicid = get_initial_apicid();
343 #if LIFT_BSP_APIC_ID == 0
344 if( initial_apicid != 0 ) // other than bsp
347 /* use initial apic id to lift it */
348 u32 dword = lapic_read(LAPIC_ID);
349 dword &= ~(0xff << 24);
350 dword |= (((initial_apicid + APIC_ID_OFFSET) & 0xff) << 24);
352 lapic_write(LAPIC_ID, dword);
355 #if LIFT_BSP_APIC_ID == 1
356 bsp_apicid += APIC_ID_OFFSET;
361 /* get the apicid, it may be lifted already */
364 // show our apicid, nodeid, and coreid
366 if (id.nodeid!=0) //all core0 except bsp
367 print_apicid_nodeid_coreid(apicid, id, " core0: ");
369 else { //all other cores
370 print_apicid_nodeid_coreid(apicid, id, " corex: ");
374 if (cpu_init_detectedx) {
375 print_apicid_nodeid_coreid(apicid, id, "\n\n\nINIT detected from ");
376 print_debug("\nIssuing SOFT_RESET...\n");
381 if(!(warm_reset_detect(id.nodeid))) //FIXME: INIT is checked above but check for more resets?
382 distinguish_cpu_resets(id.nodeid); // Also indicates we are started
385 // Mark the core as started.
386 lapic_write(LAPIC_MSG_REG, (apicid << 24) | 0x13);
389 if(apicid != bsp_apicid) {
390 /* Setup each AP's cores MSRs.
391 * This happens after HTinit.
392 * The BSP runs this code in it's own path.
397 #if FAM10_SET_FIDVID == 1
398 #if (CONFIG_LOGICAL_CPUS == 1) && (FAM10_SET_FIDVID_CORE0_ONLY == 1)
399 // Run on all AP for proper FID/VID setup.
400 if(id.coreid == 0 ) // only need set fid for core0
403 // check warm(bios) reset to call stage2 otherwise do stage1
404 if (warm_reset_detect(id.nodeid)) {
405 printk_debug("init_fidvid_stage2 apicid: %02x\n", apicid);
406 init_fidvid_stage2(apicid, id.nodeid);
408 printk_debug("init_fidvid_ap(stage1) apicid: %02x\n", apicid);
409 init_fidvid_ap(bsp_apicid, apicid, id.nodeid, id.coreid);
414 /* AP is ready, Wait for the BSP to get memory configured */
415 /* FIXME: many cores spinning on node0 pci register seems to be bad.
416 * Why do we need to wait? These APs are just going to go sit in a hlt.
418 //wait_till_sysinfo_in_ram();
420 set_init_ram_access();
423 printk_debug("\nAP %02x should be halted but you are reading this....\n", apicid);
430 static u32 is_core0_started(u32 nodeid)
434 device = NODE_PCI(nodeid, 0);
435 htic = pci_read_config32(device, HT_INIT_CONTROL);
436 htic &= HTIC_ColdR_Detect;
441 static void wait_all_core0_started(void)
443 /* When core0 is started, it will distingush_cpu_resets
444 . So wait for that to finish */
446 u32 nodes = get_nodes();
448 printk_debug("Wait all core0s started \n");
449 for(i=1;i<nodes;i++) { // skip bsp, because it is running on bsp
450 while(!is_core0_started(i)) {}
451 print_initcpu8(" Core0 started on node: ", i);
453 printk_debug("Wait all core0s started done\n");
455 #if CONFIG_MAX_PHYSICAL_CPUS > 1
457 * void start_node(u32 node)
459 * start the core0 in node, so it can generate HT packet to feature code.
461 * This function starts the AP nodes core0s. wait_all_core0_started() in
462 * cache_as_ram_auto.c waits for all the AP to be finished before continuing
465 static void start_node(u8 node)
469 /* Enable routing table */
470 printk_debug("Start node %02x", node);
473 /* For CAR_FAM10 support, we need to set Dram base/limit for the new node */
474 pci_write_config32(NODE_MP(node), 0x44, 0);
475 pci_write_config32(NODE_MP(node), 0x40, 3);
478 /* Allow APs to make requests (ROM fetch) */
479 val=pci_read_config32(NODE_HT(node), 0x6c);
481 pci_write_config32(NODE_HT(node), 0x6c, val);
483 printk_debug(" done.\n");
488 * static void setup_remote_node(u32 node)
490 * Copy the BSP Adress Map to each AP.
492 static void setup_remote_node(u8 node)
494 /* There registers can be used with F1x114_x Address Map at the
495 same time, So must set them even 32 node */
496 static const u16 pci_reg[] = {
497 /* DRAM Base/Limits Registers */
498 0x44, 0x4c, 0x54, 0x5c, 0x64, 0x6c, 0x74, 0x7c,
499 0x40, 0x48, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78,
500 0x144, 0x14c, 0x154, 0x15c, 0x164, 0x16c, 0x174, 0x17c,
501 0x140, 0x148, 0x150, 0x158, 0x160, 0x168, 0x170, 0x178,
502 /* MMIO Base/Limits Registers */
503 0x84, 0x8c, 0x94, 0x9c, 0xa4, 0xac, 0xb4, 0xbc,
504 0x80, 0x88, 0x90, 0x98, 0xa0, 0xa8, 0xb0, 0xb8,
505 /* IO Base/Limits Registers */
506 0xc4, 0xcc, 0xd4, 0xdc,
507 0xc0, 0xc8, 0xd0, 0xd8,
508 /* Configuration Map Registers */
509 0xe0, 0xe4, 0xe8, 0xec,
513 printk_debug("setup_remote_node: %02x", node);
515 /* copy the default resource map from node 0 */
516 for(i = 0; i < sizeof(pci_reg)/sizeof(pci_reg[0]); i++) {
520 value = pci_read_config32(NODE_MP(0), reg);
521 pci_write_config32(NODE_MP(node), reg, value);
524 printk_debug(" done\n");
526 #endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */
528 void AMD_Errata281(u8 node, u32 revision, u32 platform)
530 /* Workaround for Transaction Scheduling Conflict in
531 * Northbridge Cross Bar. Implement XCS Token adjustment
532 * for ganged links. Also, perform fix up for the mixed
539 u8 nodes = get_nodes();
541 if (platform & AMD_PTYPE_SVR) {
542 /* For each node we need to check for a "broken" node */
543 if (!(revision & (AMD_DR_B0 | AMD_DR_B1))) {
544 for (i = 0; i < nodes; i++) {
545 if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1)) {
552 if ((revision & (AMD_DR_B0 | AMD_DR_B1)) || mixed) {
554 /* F0X68[22:21] DsNpReqLmt0 = 01b */
555 val = pci_read_config32(NODE_PCI(node, 0), 0x68);
558 pci_write_config32(NODE_PCI(node, 0), 0x68, val);
561 val = pci_read_config32(NODE_PCI(node, 3), 0x6C);
564 pci_write_config32(NODE_PCI(node, 3), 0x6C, val);
567 val = pci_read_config32(NODE_PCI(node, 3), 0x7C);
570 pci_write_config32(NODE_PCI(node, 3), 0x7C, val);
572 /* F3X144[3:0] RspTok = 0001b */
573 val = pci_read_config32(NODE_PCI(node, 3), 0x144);
576 pci_write_config32(NODE_PCI(node, 3), 0x144, val);
578 for (i = 0; i < 3; i++) {
579 reg = 0x148 + (i * 4);
580 val = pci_read_config32(NODE_PCI(node, 3), reg);
583 pci_write_config32(NODE_PCI(node, 3), reg, val);
590 void AMD_Errata298(void)
592 /* Workaround for L2 Eviction May Occur during operation to
593 * set Accessed or dirty bit.
599 u8 nodes = get_nodes();
601 /* For each core we need to check for a "broken" node */
602 for (i = 0; i < nodes; i++) {
603 if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_B2)) {
611 msr.lo |= 0x08; /* Set TlbCacheDis bit[3] */
615 msr.lo |= 0x02; /* Set TlbForceMemTypeUc bit[1] */
618 msr = rdmsr(OSVW_ID_Length);
619 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
620 wrmsr(OSVW_ID_Length, msr);
622 msr = rdmsr(OSVW_Status);
623 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
624 wrmsr(OSVW_Status, msr);
627 if (!affectedRev && (mctGetLogicalCPUID(0xFF) & AMD_DR_B3)) {
628 msr = rdmsr(OSVW_ID_Length);
629 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
630 wrmsr(OSVW_ID_Length, msr);
636 u32 get_platform_type(void)
640 switch(SYSTEM_TYPE) {
642 ret |= AMD_PTYPE_DSK;
645 ret |= AMD_PTYPE_MOB;
648 ret |= AMD_PTYPE_SVR;
654 /* FIXME: add UMA support. */
656 /* All Fam10 are multi core */
664 * AMD_CpuFindCapability - Traverse PCI capability list to find host HT links.
665 * HT Phy operations are not valid on links that aren't present, so this
666 * prevents invalid accesses.
668 * Returns the offset of the link register.
670 BOOL AMD_CpuFindCapability (u8 node, u8 cap_count, u8 *offset)
674 /* get start of CPU HT Host Capabilities */
675 val = pci_read_config32(NODE_PCI(node, 0), 0x34);
680 /* Traverse through the capabilities. */
682 val = pci_read_config32(NODE_PCI(node, 0), val);
683 /* Is the capability block a HyperTransport capability block? */
684 if ((val & 0xFF) == 0x08)
685 /* Is the HT capability block an HT Host Capability? */
686 if ((val & 0xE0000000) == (1 << 29))
688 val = (val >> 8) & 0xFF;
689 } while (cap_count && val);
693 /* If requested capability found val != 0 */
702 * AMD_checkLinkType - Compare desired link characteristics using a logical
705 * Returns the link characteristic mask.
707 u32 AMD_checkLinkType (u8 node, u8 link, u8 regoff)
712 /* Check coherency */
713 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x18);
717 linktype |= HTPHY_LINKTYPE_COHERENT;
720 linktype |= HTPHY_LINKTYPE_NONCOHERENT;
723 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x08);
725 if (((val >> 8) & 0x0F) > 6)
726 linktype |= HTPHY_LINKTYPE_HT3;
728 linktype |= HTPHY_LINKTYPE_HT1;
732 val = pci_read_config32(NODE_PCI(node, 0), (link << 2) + 0x170);
735 linktype |= HTPHY_LINKTYPE_GANGED;
737 linktype |= HTPHY_LINKTYPE_UNGANGED;
744 * AMD_SetHtPhyRegister - Use the HT link's HT Phy portal registers to update
745 * a phy setting for that link.
747 void AMD_SetHtPhyRegister (u8 node, u8 link, u8 entry)
753 /* Determine this link's portal */
757 phyBase = ((u32)link << 3) | 0x180;
760 /* Get the portal control register's initial value
761 * and update it to access the desired phy register
763 phyReg = pci_read_config32(NODE_PCI(node, 4), phyBase);
765 if (fam10_htphy_default[entry].htreg > 0x1FF) {
766 phyReg &= ~HTPHY_DIRECT_OFFSET_MASK;
767 phyReg |= HTPHY_DIRECT_MAP;
769 phyReg &= ~HTPHY_OFFSET_MASK;
772 /* Now get the current phy register data
773 * LinkPhyDone = 0, LinkPhyWrite = 0 is a read
775 phyReg |= fam10_htphy_default[entry].htreg;
776 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
779 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
780 } while (!(val & HTPHY_IS_COMPLETE_MASK));
782 /* Now we have the phy register data, apply the change */
783 val = pci_read_config32(NODE_PCI(node, 4), phyBase + 4);
784 val &= ~fam10_htphy_default[entry].mask;
785 val |= fam10_htphy_default[entry].data;
786 pci_write_config32(NODE_PCI(node, 4), phyBase + 4, val);
788 /* write it through the portal to the phy
789 * LinkPhyDone = 0, LinkPhyWrite = 1 is a write
791 phyReg |= HTPHY_WRITE_CMD;
792 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
795 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
796 } while (!(val & HTPHY_IS_COMPLETE_MASK));
800 void cpuSetAMDMSR(void)
802 /* This routine loads the CPU with default settings in fam10_msr_default
803 * table . It must be run after Cache-As-RAM has been enabled, and
804 * Hypertransport initialization has taken place. Also note
805 * that it is run on the current processor only, and only for the current
810 u32 revision, platform;
812 printk_debug("cpuSetAMDMSR ");
814 revision = mctGetLogicalCPUID(0xFF);
815 platform = get_platform_type();
817 for(i = 0; i < sizeof(fam10_msr_default)/sizeof(fam10_msr_default[0]); i++) {
818 if ((fam10_msr_default[i].revision & revision) &&
819 (fam10_msr_default[i].platform & platform)) {
820 msr = rdmsr(fam10_msr_default[i].msr);
821 msr.hi &= ~fam10_msr_default[i].mask_hi;
822 msr.hi |= fam10_msr_default[i].data_hi;
823 msr.lo &= ~fam10_msr_default[i].mask_lo;
824 msr.lo |= fam10_msr_default[i].data_lo;
825 wrmsr(fam10_msr_default[i].msr, msr);
830 printk_debug(" done\n");
834 void cpuSetAMDPCI(u8 node)
836 /* This routine loads the CPU with default settings in fam10_pci_default
837 * table . It must be run after Cache-As-RAM has been enabled, and
838 * Hypertransport initialization has taken place. Also note
839 * that it is run for the first core on each node
842 u32 revision, platform;
846 printk_debug("cpuSetAMDPCI %02d", node);
848 revision = mctGetLogicalCPUID(node);
849 platform = get_platform_type();
851 for(i = 0; i < sizeof(fam10_pci_default)/sizeof(fam10_pci_default[0]); i++) {
852 if ((fam10_pci_default[i].revision & revision) &&
853 (fam10_pci_default[i].platform & platform)) {
854 val = pci_read_config32(NODE_PCI(node,
855 fam10_pci_default[i].function),
856 fam10_pci_default[i].offset);
857 val &= ~fam10_pci_default[i].mask;
858 val |= fam10_pci_default[i].data;
859 pci_write_config32(NODE_PCI(node,
860 fam10_pci_default[i].function),
861 fam10_pci_default[i].offset, val);
865 for(i = 0; i < sizeof(fam10_htphy_default)/sizeof(fam10_htphy_default[0]); i++) {
866 if ((fam10_htphy_default[i].revision & revision) &&
867 (fam10_htphy_default[i].platform & platform)) {
868 /* HT Phy settings either apply to both sublinks or have
869 * separate registers for sublink zero and one, so there
870 * will be two table entries. So, here we only loop
871 cd t * through the sublink zeros in function zero.
873 for (j = 0; j < 4; j++) {
874 if (AMD_CpuFindCapability(node, j, &offset)) {
875 if (AMD_checkLinkType(node, j, offset)
876 & fam10_htphy_default[i].linktype) {
877 AMD_SetHtPhyRegister(node, j, i);
880 /* No more capabilities,
889 /* FIXME: add UMA support and programXbarToSriReg(); */
891 AMD_Errata281(node, revision, platform);
893 /* FIXME: if the dct phy doesn't init correct it needs to reset.
894 if (revision & (AMD_DR_B2 | AMD_DR_B3))
897 printk_debug(" done\n");
901 void cpuInitializeMCA(void)
903 /* Clears Machine Check Architecture (MCA) registers, which power on
904 * containing unknown data, on currently running processor.
905 * This routine should only be executed on initial power on (cold boot),
906 * not across a warm reset because valid data is present at that time.
913 if (cpuid_edx(1) & 0x4080) { /* MCE and MCA (edx[7] and edx[14]) */
914 msr = rdmsr(MCG_CAP);
915 if (msr.lo & MCG_CTL_P){ /* MCG_CTL_P bit is set? */
918 msr.lo <<= 2; /* multiply the count by 4 */
919 reg = MC0_STA + msr.lo;
921 for (i=0; i < 4; i++) {
923 reg -=4; /* Touch status regs for each bank */
931 * finalize_node_setup()
933 * Do any additional post HT init
936 void finalize_node_setup(struct sys_info *sysinfo)
939 u8 nodes = get_nodes();
942 #if RAMINIT_SYSINFO == 1
943 /* read Node0 F0_0x64 bit [8:10] to find out SbLink # */
944 reg = pci_read_config32(NODE_HT(0), 0x64);
945 sysinfo->sblk = (reg>>8) & 7;
947 sysinfo->nodes = nodes;
948 sysinfo->sbdn = get_sbdn(sysinfo->sbbusn);
952 for (i = 0; i < nodes; i++) {
956 #if FAM10_SET_FIDVID == 1
957 // Prep each node for FID/VID setup.
961 #if CONFIG_MAX_PHYSICAL_CPUS > 1
962 /* Skip the BSP, start at node 1 */
963 for(i=1; i<nodes; i++) {
964 setup_remote_node(i);