X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=src%2Fcpu%2Famd%2Fmodel_fxx%2Fmodel_fxx_init.c;h=ae5429d05d7493005da77730f771ad734fb82e5a;hb=5ff7c13e858a31addf1558731a12cf6c753b576d;hp=e2c864f06a36c96e82a655fd94efd06d75d0d66c;hpb=fcd5ace00b333ce31b11b02a2243dfbf39307f10;p=coreboot.git diff --git a/src/cpu/amd/model_fxx/model_fxx_init.c b/src/cpu/amd/model_fxx/model_fxx_init.c index e2c864f06..ae5429d05 100644 --- a/src/cpu/amd/model_fxx/model_fxx_init.c +++ b/src/cpu/amd/model_fxx/model_fxx_init.c @@ -1,56 +1,114 @@ /* Needed so the AMD K8 runs correctly. */ +/* this should be done by Eric + * 2004.11 yhlu add d0 e0 support + * 2004.12 yhlu add dual core support + * 2005.02 yhlu add e0 memory hole support + + * Copyright 2005 AMD + * 2005.08 yhlu add microcode support + */ + #include #include #include #include -#include -#include #include #include #include #include #include #include -#include "../../../northbridge/amd/amdk8/amdk8.h" -#include "../../../northbridge/amd/amdk8/cpu_rev.c" +#include "northbridge/amd/amdk8/amdk8.h" +#include +#include #include #include #include -#include -#include "model_fxx_msr.h" +#include +#include +#include -#define MCI_STATUS 0x401 +#if CONFIG_WAIT_BEFORE_CPUS_INIT +void cpus_ready_for_init(void) +{ +#if CONFIG_MEM_TRAIN_SEQ == 1 + struct sys_info *sysinfox = (struct sys_info *)((CONFIG_RAMTOP) - CONFIG_DCACHE_RAM_GLOBAL_VAR_SIZE); + // wait for ap memory to trained + wait_all_core0_mem_trained(sysinfox); +#endif +} +#endif -static inline msr_t rdmsr_amd(unsigned index) +#if CONFIG_K8_REV_F_SUPPORT == 0 +int is_e0_later_in_bsp(int nodeid) { - msr_t result; - __asm__ __volatile__ ( - "rdmsr" - : "=a" (result.lo), "=d" (result.hi) - : "c" (index), "D" (0x9c5a203a) - ); - return result; + uint32_t val; + uint32_t val_old; + int e0_later; + if (nodeid == 0) { // we don't need to do that for node 0 in core0/node0 + return !is_cpu_pre_e0(); + } + // d0 will be treated as e0 with this methods, but the d0 nb_cfg_54 always 0 + device_t dev; + dev = dev_find_slot(0, PCI_DEVFN(0x18 + nodeid, 2)); + if (!dev) + return 0; + val_old = pci_read_config32(dev, 0x80); + val = val_old; + val |= (1 << 3); + pci_write_config32(dev, 0x80, val); + val = pci_read_config32(dev, 0x80); + e0_later = !!(val & (1 << 3)); + if (e0_later) { // pre_e0 bit 3 always be 0 and can not be changed + pci_write_config32(dev, 0x80, val_old); // restore it + } + + return e0_later; } +#endif -static inline void wrmsr_amd(unsigned index, msr_t msr) +#if CONFIG_K8_REV_F_SUPPORT == 1 +int is_cpu_f0_in_bsp(int nodeid) { - __asm__ __volatile__ ( - "wrmsr" - : /* No outputs */ - : "c" (index), "a" (msr.lo), "d" (msr.hi), "D" (0x9c5a203a) - ); + uint32_t dword; + device_t dev; + dev = dev_find_slot(0, PCI_DEVFN(0x18 + nodeid, 3)); + dword = pci_read_config32(dev, 0xfc); + return (dword & 0xfff00) == 0x40f00; } +#endif + +#define MCI_STATUS 0x401 +static inline msr_t rdmsr_amd(u32 index) +{ + msr_t result; + __asm__ __volatile__( + "rdmsr" + :"=a"(result.lo), "=d"(result.hi) + :"c"(index), "D"(0x9c5a203a) + ); + return result; +} +static inline void wrmsr_amd(u32 index, msr_t msr) +{ + __asm__ __volatile__( + "wrmsr" + : /* No outputs */ + :"c"(index), "a"(msr.lo), "d"(msr.hi), "D"(0x9c5a203a) + ); +} #define MTRR_COUNT 8 -#define ZERO_CHUNK_KB 0x800UL /* 2M */ +#define ZERO_CHUNK_KB 0x800UL /* 2M */ #define TOLM_KB 0x400000UL struct mtrr { msr_t base; msr_t mask; }; + struct mtrr_state { struct mtrr mtrrs[MTRR_COUNT]; msr_t top_mem, top_mem2; @@ -60,11 +118,11 @@ struct mtrr_state { static void save_mtrr_state(struct mtrr_state *state) { int i; - for(i = 0; i < MTRR_COUNT; i++) { + for (i = 0; i < MTRR_COUNT; i++) { state->mtrrs[i].base = rdmsr(MTRRphysBase_MSR(i)); state->mtrrs[i].mask = rdmsr(MTRRphysMask_MSR(i)); } - state->top_mem = rdmsr(TOP_MEM); + state->top_mem = rdmsr(TOP_MEM); state->top_mem2 = rdmsr(TOP_MEM2); state->def_type = rdmsr(MTRRdefType_MSR); } @@ -74,34 +132,33 @@ static void restore_mtrr_state(struct mtrr_state *state) int i; disable_cache(); - for(i = 0; i < MTRR_COUNT; i++) { + for (i = 0; i < MTRR_COUNT; i++) { wrmsr(MTRRphysBase_MSR(i), state->mtrrs[i].base); wrmsr(MTRRphysMask_MSR(i), state->mtrrs[i].mask); } - wrmsr(TOP_MEM, state->top_mem); - wrmsr(TOP_MEM2, state->top_mem2); + wrmsr(TOP_MEM, state->top_mem); + wrmsr(TOP_MEM2, state->top_mem2); wrmsr(MTRRdefType_MSR, state->def_type); enable_cache(); } - #if 0 static void print_mtrr_state(struct mtrr_state *state) { int i; - for(i = 0; i < MTRR_COUNT; i++) { - printk_debug("var mtrr %d: %08x%08x mask: %08x%08x\n", - i, - state->mtrrs[i].base.hi, state->mtrrs[i].base.lo, - state->mtrrs[i].mask.hi, state->mtrrs[i].mask.lo); + for (i = 0; i < MTRR_COUNT; i++) { + printk(BIOS_DEBUG, "var mtrr %d: %08x%08x mask: %08x%08x\n", + i, + state->mtrrs[i].base.hi, state->mtrrs[i].base.lo, + state->mtrrs[i].mask.hi, state->mtrrs[i].mask.lo); } - printk_debug("top_mem: %08x%08x\n", - state->top_mem.hi, state->top_mem.lo); - printk_debug("top_mem2: %08x%08x\n", - state->top_mem2.hi, state->top_mem2.lo); - printk_debug("def_type: %08x%08x\n", - state->def_type.hi, state->def_type.lo); + printk(BIOS_DEBUG, "top_mem: %08x%08x\n", + state->top_mem.hi, state->top_mem.lo); + printk(BIOS_DEBUG, "top_mem2: %08x%08x\n", + state->top_mem2.hi, state->top_mem2.lo); + printk(BIOS_DEBUG, "def_type: %08x%08x\n", + state->def_type.hi, state->def_type.lo); } #endif @@ -112,7 +169,7 @@ static void set_init_ecc_mtrrs(void) disable_cache(); /* First clear all of the msrs to be safe */ - for(i = 0; i < MTRR_COUNT; i++) { + for (i = 0; i < MTRR_COUNT; i++) { msr_t zero; zero.lo = zero.hi = 0; wrmsr(MTRRphysBase_MSR(i), zero); @@ -124,7 +181,7 @@ static void set_init_ecc_mtrrs(void) msr.lo = 0x00000000 | MTRR_TYPE_WRBACK; wrmsr(MTRRphysBase_MSR(0), msr); msr.hi = 0x000000ff; - msr.lo = ~((CONFIG_LB_MEM_TOPK << 10) - 1) | 0x800; + msr.lo = ~((CONFIG_RAMTOP) - 1) | 0x800; wrmsr(MTRRphysMask_MSR(0), msr); /* Set the default type to write combining */ @@ -140,19 +197,58 @@ static void set_init_ecc_mtrrs(void) enable_cache(); } +static inline void clear_2M_ram(unsigned long basek, + struct mtrr_state *mtrr_state) +{ + unsigned long limitk; + unsigned long size; + void *addr; + + /* Report every 64M */ + if ((basek % (64 * 1024)) == 0) { -static void init_ecc_memory(void) + /* Restore the normal state */ + map_2M_page(0); + restore_mtrr_state(mtrr_state); + enable_lapic(); + + /* Print a status message */ + printk(BIOS_DEBUG, "%c", (basek >= TOLM_KB) ? '+' : '-'); + + /* Return to the initialization state */ + set_init_ecc_mtrrs(); + disable_lapic(); + + } + + limitk = (basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1); +#if 0 + /* couldn't happen, memory must on 2M boundary */ + if (limitk > endk) { + limitk = enk; + } +#endif + size = (limitk - basek) << 10; + addr = map_2M_page(basek >> 11); + if (addr == MAPPING_ERROR) { + printk(BIOS_ERR, "Cannot map page: %lx\n", basek >> 11); + return; + } + + /* clear memory 2M (limitk - basek) */ + addr = (void *)(((uint32_t) addr) | ((basek & 0x7ff) << 10)); + memset(addr, 0, size); +} + +static void init_ecc_memory(unsigned node_id) { unsigned long startk, begink, endk; unsigned long basek; struct mtrr_state mtrr_state; + device_t f1_dev, f2_dev, f3_dev; - int node_id; int enable_scrubbing; uint32_t dcl; - - /* For now there is a 1-1 mapping between node_id and cpu_id */ - node_id = lapicid(); f1_dev = dev_find_slot(0, PCI_DEVFN(0x18 + node_id, 1)); if (!f1_dev) { @@ -169,34 +265,60 @@ static void init_ecc_memory(void) /* See if we scrubbing should be enabled */ enable_scrubbing = 1; - get_option(&enable_scrubbing, "hw_scrubber"); + if( get_option(&enable_scrubbing, "hw_scrubber") < 0 ) + { + enable_scrubbing = CONFIG_HW_SCRUBBER; + } /* Enable cache scrubbing at the lowest possible rate */ if (enable_scrubbing) { pci_write_config32(f3_dev, SCRUB_CONTROL, - (SCRUB_84ms << 16) | (SCRUB_84ms << 8) | (SCRUB_NONE << 0)); + (SCRUB_84ms << 16) | (SCRUB_84ms << 8) | + (SCRUB_NONE << 0)); } else { pci_write_config32(f3_dev, SCRUB_CONTROL, - (SCRUB_NONE << 16) | (SCRUB_NONE << 8) | (SCRUB_NONE << 0)); - printk_debug("Scrubbing Disabled\n"); + (SCRUB_NONE << 16) | (SCRUB_NONE << 8) | + (SCRUB_NONE << 0)); + printk(BIOS_DEBUG, "Scrubbing Disabled\n"); } - /* If ecc support is not enabled don't touch memory */ dcl = pci_read_config32(f2_dev, DRAM_CONFIG_LOW); if (!(dcl & DCL_DimmEccEn)) { + printk(BIOS_DEBUG, "ECC Disabled\n"); return; } - startk = (pci_read_config32(f1_dev, 0x40 + (node_id*8)) & 0xffff0000) >> 2; - endk = ((pci_read_config32(f1_dev, 0x44 + (node_id*8)) & 0xffff0000) >> 2) + 0x4000; + startk = + (pci_read_config32(f1_dev, 0x40 + (node_id * 8)) & 0xffff0000) >> 2; + endk = + ((pci_read_config32(f1_dev, 0x44 + (node_id * 8)) & 0xffff0000) >> + 2) + 0x4000; + +#if CONFIG_HW_MEM_HOLE_SIZEK != 0 + unsigned long hole_startk = 0; + +#if CONFIG_K8_REV_F_SUPPORT == 0 + if (!is_cpu_pre_e0()) { +#endif + + uint32_t val; + val = pci_read_config32(f1_dev, 0xf0); + if (val & 1) { + hole_startk = ((val & (0xff << 24)) >> 10); + } +#if CONFIG_K8_REV_F_SUPPORT == 0 + } +#endif +#endif /* Don't start too early */ begink = startk; - if (begink < CONFIG_LB_MEM_TOPK) { - begink = CONFIG_LB_MEM_TOPK; + if (begink < (CONFIG_RAMTOP >> 10)) { + begink = (CONFIG_RAMTOP >> 10); } - printk_debug("Clearing memory %uK - %uK: ", startk, endk); + + printk(BIOS_DEBUG, "Clearing memory %luK - %luK: ", begink, endk); /* Save the normal state */ save_mtrr_state(&mtrr_state); @@ -206,61 +328,49 @@ static void init_ecc_memory(void) disable_lapic(); /* Walk through 2M chunks and zero them */ - for(basek = begink; basek < endk; basek = ((basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1))) { - unsigned long limitk; - unsigned long size; - void *addr; - - /* Report every 64M */ - if ((basek % (64*1024)) == 0) { - /* Restore the normal state */ - map_2M_page(0); - restore_mtrr_state(&mtrr_state); - enable_lapic(); - - /* Print a status message */ - printk_debug("%c", (basek >= TOLM_KB)?'+':'-'); - - /* Return to the initialization state */ - set_init_ecc_mtrrs(); - disable_lapic(); +#if CONFIG_HW_MEM_HOLE_SIZEK != 0 + /* here hole_startk can not be equal to begink, never. Also hole_startk is in 2M boundary, 64M? */ + if ((hole_startk != 0) + && ((begink < hole_startk) && (endk > (4 * 1024 * 1024)))) { + for (basek = begink; basek < hole_startk; + basek = ((basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1))) { + clear_2M_ram(basek, &mtrr_state); } - limitk = (basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1); - if (limitk > endk) { - limitk = endk; + for (basek = 4 * 1024 * 1024; basek < endk; + basek = ((basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1))) { + clear_2M_ram(basek, &mtrr_state); } - size = (limitk - basek) << 10; - addr = map_2M_page(basek >> 11); - addr = (void *)(((uint32_t)addr) | ((basek & 0x7ff) << 10)); - if (addr == MAPPING_ERROR) { - continue; + } else +#endif + for (basek = begink; basek < endk; + basek = ((basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1))) { + clear_2M_ram(basek, &mtrr_state); } - /* clear memory 2M (limitk - basek) */ - clear_memory(addr, size); - } /* Restore the normal state */ map_2M_page(0); restore_mtrr_state(&mtrr_state); enable_lapic(); /* Set the scrub base address registers */ - pci_write_config32(f3_dev, SCRUB_ADDR_LOW, startk << 10); + pci_write_config32(f3_dev, SCRUB_ADDR_LOW, startk << 10); pci_write_config32(f3_dev, SCRUB_ADDR_HIGH, startk >> 22); /* Enable the scrubber? */ if (enable_scrubbing) { /* Enable scrubbing at the lowest possible rate */ pci_write_config32(f3_dev, SCRUB_CONTROL, - (SCRUB_84ms << 16) | (SCRUB_84ms << 8) | (SCRUB_84ms << 0)); + (SCRUB_84ms << 16) | (SCRUB_84ms << 8) | + (SCRUB_84ms << 0)); } - printk_debug(" done\n"); + printk(BIOS_DEBUG, " done\n"); } static inline void k8_errata(void) { msr_t msr; +#if CONFIG_K8_REV_F_SUPPORT == 0 if (is_cpu_pre_c0()) { /* Erratum 63... */ msr = rdmsr(HWCR_MSR); @@ -274,96 +384,277 @@ static inline void k8_errata(void) /* Erratum 81... */ msr = rdmsr_amd(DC_CFG_MSR); - msr.lo |= (1 << 10); + msr.lo |= (1 << 10); wrmsr_amd(DC_CFG_MSR, msr); - + } - /* I can't touch this msr on early buggy cpus */ - if (!is_cpu_pre_b3()) { - /* Erratum 89 ... */ - msr = rdmsr(NB_CFG_MSR); + /* Erratum 97 ... */ + if (!is_cpu_pre_c0() && is_cpu_pre_d0()) { + msr = rdmsr_amd(DC_CFG_MSR); msr.lo |= 1 << 3; - - if (!is_cpu_pre_c0()) { - /* Erratum 86 Disable data masking on C0 and + wrmsr_amd(DC_CFG_MSR, msr); + } + + /* Erratum 94 ... */ + if (is_cpu_pre_d0()) { + msr = rdmsr_amd(IC_CFG_MSR); + msr.lo |= 1 << 11; + wrmsr_amd(IC_CFG_MSR, msr); + } + + /* Erratum 91 prefetch miss is handled in the kernel */ + + /* Erratum 106 ... */ + msr = rdmsr_amd(LS_CFG_MSR); + msr.lo |= 1 << 25; + wrmsr_amd(LS_CFG_MSR, msr); + + /* Erratum 107 ... */ + msr = rdmsr_amd(BU_CFG_MSR); + msr.hi |= 1 << (43 - 32); + wrmsr_amd(BU_CFG_MSR, msr); + + /* Erratum 110 */ + /* This erratum applies to D0 thru E6 revisions + * Revision F and later are unaffected. There are two fixes + * depending on processor revision. + */ + if (is_cpu_d0()) { + /* Erratum 110 ... */ + msr = rdmsr_amd(CPU_ID_HYPER_EXT_FEATURES); + msr.hi |= 1; + wrmsr_amd(CPU_ID_HYPER_EXT_FEATURES, msr); + } + + if (!is_cpu_pre_e0()) + { + /* Erratum 110 ... */ + msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR); + msr.hi |= 1; + wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr); + } +#endif + + +#if CONFIG_K8_REV_F_SUPPORT == 0 + /* I can't touch this msr on early buggy cpus */ + if (!is_cpu_pre_b3()) +#endif + { + msr = rdmsr(NB_CFG_MSR); + +#if CONFIG_K8_REV_F_SUPPORT == 0 + if (!is_cpu_pre_c0() && is_cpu_pre_d0()) { + /* D0 later don't need it */ + /* Erratum 86 Disable data masking on C0 and * later processor revs. * FIXME this is only needed if ECC is enabled. */ msr.hi |= 1 << (36 - 32); - } + } +#endif + /* Erratum 89 ... */ + /* Erratum 89 is mistakenly labeled as 88 in AMD pub #25759 + * It is correctly labeled as 89 on page 49 of the document + * and in AMD pub#33610 + */ + msr.lo |= 1 << 3; + /* Erratum 169 */ + /* This supersedes erratum 131; 131 should not be applied with 169 + * We also need to set some bits in the northbridge, handled in src/northbridge/amdk8/ + */ + msr.hi |= 1; + wrmsr(NB_CFG_MSR, msr); } - - /* Erratum 97 ... */ - if (!is_cpu_pre_c0()) { - msr = rdmsr_amd(DC_CFG_MSR); - msr.lo |= 1 << 3; - wrmsr_amd(DC_CFG_MSR, msr); - } - - /* Erratum 94 ... */ - msr = rdmsr_amd(IC_CFG_MSR); - msr.lo |= 1 << 11; - wrmsr_amd(IC_CFG_MSR, msr); + /* Erratum 122 */ + msr = rdmsr(HWCR_MSR); + msr.lo |= 1 << 6; + wrmsr(HWCR_MSR, msr); + - /* Erratum 91 prefetch miss is handled in the kernel */ } -void model_fxx_init(device_t dev) +#if CONFIG_USBDEBUG +static unsigned ehci_debug_addr; +#endif + +static void model_fxx_init(device_t dev) { - unsigned long mmio_basek, tomk; unsigned long i; msr_t msr; + struct node_core_id id; + +#if CONFIG_USBDEBUG + if (!ehci_debug_addr) + ehci_debug_addr = get_ehci_debug(); + set_ehci_debug(0); +#endif /* Turn on caching if we haven't already */ x86_enable_cache(); amd_setup_mtrrs(); x86_mtrr_check(); +#if CONFIG_USBDEBUG + set_ehci_debug(ehci_debug_addr); +#endif + + /* Update the microcode */ + model_fxx_update_microcode(dev->device); + disable_cache(); - + /* zero the machine check error status registers */ msr.lo = 0; msr.hi = 0; - for(i=0; i<5; i++) { - wrmsr(MCI_STATUS + (i*4),msr); + for (i = 0; i < 5; i++) { + wrmsr(MCI_STATUS + (i * 4), msr); } k8_errata(); - + enable_cache(); - /* Is this a bad location? In particular can another node prefecth - * data from this node before we have initialized it? - */ - init_ecc_memory(); + /* Set the processor name string */ + init_processor_name(); /* Enable the local cpu apics */ setup_lapic(); + +#if CONFIG_LOGICAL_CPUS == 1 + u32 siblings = cpuid_ecx(0x80000008) & 0xff; + + if (siblings > 0) { + msr = rdmsr_amd(CPU_ID_FEATURES_MSR); + msr.lo |= 1 << 28; + wrmsr_amd(CPU_ID_FEATURES_MSR, msr); + + msr = rdmsr_amd(LOGICAL_CPUS_NUM_MSR); + msr.lo = (siblings + 1) << 16; + wrmsr_amd(LOGICAL_CPUS_NUM_MSR, msr); + + msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR); + msr.hi |= 1 << (33 - 32); + wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr); + } +#endif + + id = get_node_core_id(read_nb_cfg_54()); // pre e0 nb_cfg_54 can not be set + + /* Is this a bad location? In particular can another node prefecth + * data from this node before we have initialized it? + */ + if (id.coreid == 0) + init_ecc_memory(id.nodeid); // only do it for core 0 + + /* Set SMM base address for this CPU */ + msr = rdmsr(SMM_BASE_MSR); + msr.lo = SMM_BASE - (lapicid() * 0x400); + wrmsr(SMM_BASE_MSR, msr); + + /* Enable the SMM memory window */ + msr = rdmsr(SMM_MASK_MSR); + msr.lo |= (1 << 0); /* Enable ASEG SMRAM Range */ + wrmsr(SMM_MASK_MSR, msr); + + /* Set SMMLOCK to avoid exploits messing with SMM */ + msr = rdmsr(HWCR_MSR); + msr.lo |= (1 << 0); + wrmsr(HWCR_MSR, msr); } static struct device_operations cpu_dev_ops = { .init = model_fxx_init, }; + static struct cpu_device_id cpu_table[] = { - { X86_VENDOR_AMD, 0xf50 }, /* B3 */ - { X86_VENDOR_AMD, 0xf51 }, /* SH7-B3 */ - { X86_VENDOR_AMD, 0xf58 }, /* SH7-C0 */ - { X86_VENDOR_AMD, 0xf48 }, -#if 1 - { X86_VENDOR_AMD, 0xf5A }, /* SH7-CG */ - { X86_VENDOR_AMD, 0xf4A }, - { X86_VENDOR_AMD, 0xf7A }, - { X86_VENDOR_AMD, 0xfc0 }, /* DH7-CG */ - { X86_VENDOR_AMD, 0xfe0 }, - { X86_VENDOR_AMD, 0xff0 }, - { X86_VENDOR_AMD, 0xf82 }, /* CH7-CG */ - { X86_VENDOR_AMD, 0xfb2 }, +#if CONFIG_K8_REV_F_SUPPORT == 0 + { X86_VENDOR_AMD, 0xf40 }, /* SH-B0 (socket 754) */ + { X86_VENDOR_AMD, 0xf50 }, /* SH-B0 (socket 940) */ + { X86_VENDOR_AMD, 0xf51 }, /* SH-B3 (socket 940) */ + { X86_VENDOR_AMD, 0xf58 }, /* SH-C0 (socket 940) */ + { X86_VENDOR_AMD, 0xf48 }, /* SH-C0 (socket 754) */ + { X86_VENDOR_AMD, 0xf5a }, /* SH-CG (socket 940) */ + { X86_VENDOR_AMD, 0xf4a }, /* SH-CG (socket 754) */ + { X86_VENDOR_AMD, 0xf7a }, /* SH-CG (socket 939) */ + { X86_VENDOR_AMD, 0xfc0 }, /* DH-CG (socket 754) */ + { X86_VENDOR_AMD, 0xfe0 }, /* DH-CG (socket 754) */ + { X86_VENDOR_AMD, 0xff0 }, /* DH-CG (socket 939) */ + { X86_VENDOR_AMD, 0xf82 }, /* CH-CG (socket 754) */ + { X86_VENDOR_AMD, 0xfb2 }, /* CH-CG (socket 939) */ + + /* AMD D0 support */ + { X86_VENDOR_AMD, 0x10f50 }, /* SH-D0 (socket 940) */ + { X86_VENDOR_AMD, 0x10f40 }, /* SH-D0 (socket 754) */ + { X86_VENDOR_AMD, 0x10f70 }, /* SH-D0 (socket 939) */ + { X86_VENDOR_AMD, 0x10fc0 }, /* DH-D0 (socket 754) */ + { X86_VENDOR_AMD, 0x10ff0 }, /* DH-D0 (socket 939) */ + { X86_VENDOR_AMD, 0x10f80 }, /* CH-D0 (socket 754) */ + { X86_VENDOR_AMD, 0x10fb0 }, /* CH-D0 (socket 939) */ + + /* AMD E0 support */ + { X86_VENDOR_AMD, 0x20f50 }, /* SH-E0 */ + { X86_VENDOR_AMD, 0x20f40 }, + { X86_VENDOR_AMD, 0x20f70 }, + { X86_VENDOR_AMD, 0x20fc0 }, /* DH-E3 (socket 754) */ + { X86_VENDOR_AMD, 0x20ff0 }, /* DH-E3 (socket 939) */ + { X86_VENDOR_AMD, 0x20f10 }, /* JH-E1 (socket 940) */ + { X86_VENDOR_AMD, 0x20f51 }, /* SH-E4 (socket 940) */ + { X86_VENDOR_AMD, 0x20f71 }, /* SH-E4 (socket 939) */ + { X86_VENDOR_AMD, 0x20fb1 }, /* BH-E4 (socket 939) */ + { X86_VENDOR_AMD, 0x20f42 }, /* SH-E5 (socket 754) */ + { X86_VENDOR_AMD, 0x20ff2 }, /* DH-E6 (socket 939) */ + { X86_VENDOR_AMD, 0x20fc2 }, /* DH-E6 (socket 754) */ + { X86_VENDOR_AMD, 0x20f12 }, /* JH-E6 (socket 940) */ + { X86_VENDOR_AMD, 0x20f32 }, /* JH-E6 (socket 939) */ + { X86_VENDOR_AMD, 0x30ff2 }, /* E4 ? */ #endif + +#if CONFIG_K8_REV_F_SUPPORT == 1 + /* + * AMD F0 support. + * + * See Revision Guide for AMD NPT Family 0Fh Processors, + * Publication #33610, Revision: 3.30, February 2008. + * + * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf + */ + { X86_VENDOR_AMD, 0x40f50 }, /* SH-F0 (socket F/1207) */ + { X86_VENDOR_AMD, 0x40f70 }, /* SH-F0 (socket AM2) */ + { X86_VENDOR_AMD, 0x40f40 }, /* SH-F0 (socket S1g1) */ + { X86_VENDOR_AMD, 0x40f11 }, /* JH-F1 (socket F/1207) */ + { X86_VENDOR_AMD, 0x40f31 }, /* JH-F1 (socket AM2) */ + { X86_VENDOR_AMD, 0x40f01 }, /* JH-F1 (socket S1g1) */ + + { X86_VENDOR_AMD, 0x40f12 }, /* JH-F2 (socket F/1207) */ + { X86_VENDOR_AMD, 0x40f32 }, /* JH-F2 (socket AM2) */ + { X86_VENDOR_AMD, 0x40fb2 }, /* BH-F2 (socket AM2) */ + { X86_VENDOR_AMD, 0x40f82 }, /* BH-F2 (socket S1g1) */ + { X86_VENDOR_AMD, 0x40ff2 }, /* DH-F2 (socket AM2) */ + { X86_VENDOR_AMD, 0x50ff2 }, /* DH-F2 (socket AM2) */ + { X86_VENDOR_AMD, 0x40fc2 }, /* DH-F2 (socket S1g1) */ + { X86_VENDOR_AMD, 0x40f13 }, /* JH-F3 (socket F/1207) */ + { X86_VENDOR_AMD, 0x40f33 }, /* JH-F3 (socket AM2) */ + { X86_VENDOR_AMD, 0x50fd3 }, /* JH-F3 (socket F/1207) */ + { X86_VENDOR_AMD, 0xc0f13 }, /* JH-F3 (socket F/1207) */ + { X86_VENDOR_AMD, 0x50ff3 }, /* DH-F3 (socket AM2) */ + { X86_VENDOR_AMD, 0x60fb1 }, /* BH-G1 (socket AM2) */ + { X86_VENDOR_AMD, 0x60f81 }, /* BH-G1 (socket S1g1) */ + { X86_VENDOR_AMD, 0x60fb2 }, /* BH-G2 (socket AM2) */ + { X86_VENDOR_AMD, 0x60f82 }, /* BH-G2 (socket S1g1) */ + { X86_VENDOR_AMD, 0x70ff1 }, /* DH-G1 (socket AM2) */ + { X86_VENDOR_AMD, 0x60ff2 }, /* DH-G2 (socket AM2) */ + { X86_VENDOR_AMD, 0x70ff2 }, /* DH-G2 (socket AM2) */ + { X86_VENDOR_AMD, 0x60fc2 }, /* DH-G2 (socket S1g1) */ + { X86_VENDOR_AMD, 0x70fc2 }, /* DH-G2 (socket S1g1) */ +#endif + { 0, 0 }, }; -static struct cpu_driver model_fxx __cpu_driver = { + +static const struct cpu_driver model_fxx __cpu_driver = { .ops = &cpu_dev_ops, .id_table = cpu_table, };