X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=src%2Fcpu%2Fx86%2Fsmm%2Fsmmrelocate.S;h=18d668c9dd0cfdd5eb367af7ad600a5a3b63844a;hb=8c5b58e7c372d0c1666931040e35fef92ad56c4b;hp=e477830cf1c84863326ba88677756c871740bd83;hpb=cadc54583877db65f33d2db11088d5fae1b77b74;p=coreboot.git diff --git a/src/cpu/x86/smm/smmrelocate.S b/src/cpu/x86/smm/smmrelocate.S index e477830cf..18d668c9d 100644 --- a/src/cpu/x86/smm/smmrelocate.S +++ b/src/cpu/x86/smm/smmrelocate.S @@ -22,22 +22,29 @@ // Make sure no stage 2 code is included: #define __PRE_RAM__ -#if !defined(CONFIG_NORTHBRIDGE_AMD_AMDK8) && !defined(CONFIG_NORTHBRIDGE_AMD_FAM10) +/* On AMD's platforms we can set SMBASE by writing an MSR */ +#if !CONFIG_NORTHBRIDGE_AMD_AMDK8 && !CONFIG_NORTHBRIDGE_AMD_AMDFAM10 // FIXME: Is this piece of code southbridge specific, or // can it be cleaned up so this include is not required? // It's needed right now because we get our DEFAULT_PMBASE from // here. -#if defined(CONFIG_SOUTHBRIDGE_INTEL_I82801GX) +#if CONFIG_SOUTHBRIDGE_INTEL_I82801GX #include "../../../southbridge/intel/i82801gx/i82801gx.h" -#elif defined(CONFIG_SOUTHBRIDGE_INTEL_I82801DX) +#elif CONFIG_SOUTHBRIDGE_INTEL_I82801DX #include "../../../southbridge/intel/i82801dx/i82801dx.h" -#elif defined(CONFIG_SOUTHBRIDGE_INTEL_SCH) +#elif CONFIG_SOUTHBRIDGE_INTEL_SCH #include "../../../southbridge/intel/sch/sch.h" #else #error "Southbridge needs SMM handler support." #endif +#if CONFIG_SMM_TSEG + +#include + +#endif /* CONFIG_SMM_TSEG */ + #define LAPIC_ID 0xfee00020 .global smm_relocation_start @@ -47,13 +54,22 @@ .code16 /** - * This trampoline code relocates SMBASE to 0xa0000 - ( lapicid * 0x400 ) + * When starting up, x86 CPUs have their SMBASE set to 0x30000. However, + * this is not a good place for the SMM handler to live, so it needs to + * be relocated. + * Traditionally SMM handlers used to live in the A segment (0xa0000). + * With growing SMM handlers, more CPU cores, etc. CPU vendors started + * allowing to relocate the handler to the end of physical memory, which + * they refer to as TSEG. + * This trampoline code relocates SMBASE to base address - ( lapicid * 0x400 ) * * Why 0x400? It is a safe value to cover the save state area per CPU. On * current AMD CPUs this area is _documented_ to be 0x200 bytes. On Intel * Core 2 CPUs the _documented_ parts of the save state area is 48 bytes * bigger, effectively sizing our data structures 0x300 bytes. * + * Example (with SMM handler living at 0xa0000): + * * LAPICID SMBASE SMM Entry SAVE STATE * 0 0xa0000 0xa8000 0xafd00 * 1 0x9fc00 0xa7c00 0xaf900 @@ -81,13 +97,7 @@ * at 0xa8000-0xa8100 (example for core 0). That is not enough. * * This means we're basically limited to 16 cpu cores before - * we need to use the TSEG/HSEG for the actual SMM handler plus stack. - * When we exceed 32 cores, we also need to put SMBASE to TSEG/HSEG. - * - * If we figure out the documented values above are safe to use, - * we could pack the structure above even more, so we could use the - * scheme to pack save state areas for 63 AMD CPUs or 58 Intel CPUs - * in the ASEG. + * we need to move the SMM handler to TSEG. * * Note: Some versions of Pentium M need their SMBASE aligned to 32k. * On those the above only works for up to 2 cores. But for now we only @@ -99,6 +109,7 @@ smm_relocation_start: /* Check revision to see if AMD64 style SMM_BASE * Intel Core Solo/Duo: 0x30007 * Intel Core2 Solo/Duo: 0x30100 + * Intel SandyBridge: 0x30101 * AMD64: 0x3XX64 * This check does not make much sense, unless someone ports * SMI handling to AMD64 CPUs. @@ -126,11 +137,53 @@ smm_relocate: movl %ecx, %edx shl $10, %edx +#if CONFIG_SMM_TSEG + movl $(TSEG_BAR), %ecx /* Get TSEG base from PCIE */ + addr32 movl (%ecx), %eax /* Save TSEG_BAR in %eax */ + andl $~1, %eax /* Remove lock bit */ +#else movl $0xa0000, %eax +#endif subl %edx, %eax /* subtract offset, see above */ addr32 movl %eax, (%ebx) +#if CONFIG_SMM_TSEG + /* Check for SMRR capability in MTRRCAP[11] */ + movl $MTRRcap_MSR, %ecx + rdmsr + bt $11, %eax + jnc skip_smrr + + /* TSEG base */ + movl $(TSEG_BAR), %ecx /* Get TSEG base from PCIE */ + addr32 movl (%ecx), %eax /* Save TSEG_BAR in %eax */ + andl $~1, %eax /* Remove lock bit */ + movl %eax, %ebx + + /* Set SMRR base address. */ + movl $SMRRphysBase_MSR, %ecx + orl $MTRR_TYPE_WRBACK, %eax + xorl %edx, %edx + wrmsr + + /* Set SMRR mask. */ + movl $SMRRphysMask_MSR, %ecx + movl $(~(CONFIG_SMM_TSEG_SIZE - 1) | MTRRphysMaskValid), %eax + xorl %edx, %edx + wrmsr + +#if CONFIG_NORTHBRIDGE_INTEL_SANDYBRIDGE || CONFIG_NORTHBRIDGE_INTEL_IVYBRIDGE + /* + * IED base is top 4M of TSEG + */ + addl $(CONFIG_SMM_TSEG_SIZE - IED_SIZE), %ebx + movl $(0x30000 + 0x8000 + 0x7eec), %eax + addr32 movl %ebx, (%eax) +#endif + +skip_smrr: +#endif /* The next section of code is potentially southbridge specific */ @@ -152,7 +205,7 @@ smm_relocate: /* End of southbridge specific section. */ -#if defined(CONFIG_DEBUG_SMM_RELOCATION) && CONFIG_DEBUG_SMM_RELOCATION +#if CONFIG_DEBUG_SMM_RELOCATION /* print [SMM-x] so we can determine if CPUx went to SMM */ movw $CONFIG_TTYS0_BASE, %dx mov $'[', %al