X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=src%2Fcpu%2Famd%2Fsmm%2Fsmm_init.c;h=6398688c48c712852f3be104b8f699bf637f65d7;hb=b5b3b3bf8ca3eb5be974b50c05e60d03432173d7;hp=ad1c112ed053b1249ff37d861f511d3f8f8cde75;hpb=a68555f48d06b4c8d55f7e4ca208805bec3d5512;p=coreboot.git diff --git a/src/cpu/amd/smm/smm_init.c b/src/cpu/amd/smm/smm_init.c index ad1c112ed..6398688c4 100644 --- a/src/cpu/amd/smm/smm_init.c +++ b/src/cpu/amd/smm/smm_init.c @@ -30,94 +30,56 @@ #include #include -#define SMM_BASE_MSR 0xc0010111 -#define SMM_ADDR_MSR 0xc0010112 -#define SMM_MASK_MSR 0xc0010113 -#define SMM_BASE 0xa0000 - extern unsigned char _binary_smm_start; extern unsigned char _binary_smm_size; -static int smm_handler_copied = 0; - void smm_init(void) { - msr_t msr; - - msr = rdmsr(HWCR_MSR); - if (msr.lo & (1 << 0)) { - // This sounds like a bug... ? - printk(BIOS_DEBUG, "SMM is still locked from last boot, using old handler.\n"); - return; - } - - /* Only copy SMM handler once, not once per CPU */ - if (!smm_handler_copied) { - msr_t syscfg_orig, mtrr_aseg_orig; - - smm_handler_copied = 1; - - /* Back up MSRs for later restore */ - syscfg_orig = rdmsr(SYSCFG_MSR); - mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR); - - /* MTRR changes don't like an enabled cache */ - disable_cache(); - - msr = syscfg_orig; - /* Allow changes to MTRR extended attributes */ - msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; - /* turn the extended attributes off until we fix - * them so A0000 is routed to memory - */ - msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn; - wrmsr(SYSCFG_MSR, msr); - - /* set DRAM access to 0xa0000 */ - /* A0000 is memory */ - msr.lo = 0x18181818; - msr.hi = 0x18181818; - wrmsr(MTRRfix16K_A0000_MSR, msr); - - /* enable the extended features */ - msr = syscfg_orig; - msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; - msr.lo |= SYSCFG_MSR_MtrrFixDramEn; - wrmsr(SYSCFG_MSR, msr); - - enable_cache(); - /* copy the real SMM handler */ - memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size); - wbinvd(); - - /* Restore MTRR */ - disable_cache(); - - /* Restore SYSCFG */ - wrmsr(SYSCFG_MSR, syscfg_orig); - - wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig); - enable_cache(); - } - - - /* But set SMM base address on all CPUs/cores */ - msr = rdmsr(SMM_BASE_MSR); - msr.lo = SMM_BASE - (lapicid() * 0x400); - wrmsr(SMM_BASE_MSR, msr); - - /* enable the SMM memory window */ - msr = rdmsr(SMM_MASK_MSR); - msr.lo |= (1 << 0); // Enable ASEG SMRAM Range - wrmsr(SMM_MASK_MSR, msr); - - /* Set SMMLOCK to avoid exploits messing with SMM */ - msr = rdmsr(HWCR_MSR); - msr.lo |= (1 << 0); - wrmsr(HWCR_MSR, msr); + msr_t msr, syscfg_orig, mtrr_aseg_orig; + + /* Back up MSRs for later restore */ + syscfg_orig = rdmsr(SYSCFG_MSR); + mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR); + + /* MTRR changes don't like an enabled cache */ + disable_cache(); + + msr = syscfg_orig; + + /* Allow changes to MTRR extended attributes */ + msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; + /* turn the extended attributes off until we fix + * them so A0000 is routed to memory + */ + msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn; + wrmsr(SYSCFG_MSR, msr); + + /* set DRAM access to 0xa0000 */ + msr.lo = 0x18181818; + msr.hi = 0x18181818; + wrmsr(MTRRfix16K_A0000_MSR, msr); + + /* enable the extended features */ + msr = syscfg_orig; + msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; + msr.lo |= SYSCFG_MSR_MtrrFixDramEn; + wrmsr(SYSCFG_MSR, msr); + + enable_cache(); + /* copy the real SMM handler */ + memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size); + wbinvd(); + disable_cache(); + + /* Restore SYSCFG and MTRR */ + wrmsr(SYSCFG_MSR, syscfg_orig); + wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig); + enable_cache(); + + /* CPU MSR are set in CPU init */ } void smm_lock(void) { - /* We lock SMM per CPU core */ + /* We lock SMM in CPU init */ }