X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=src%2Fcpu%2Fx86%2Fsmm%2Fsmmrelocate.S;h=18d668c9dd0cfdd5eb367af7ad600a5a3b63844a;hb=8c5b58e7c372d0c1666931040e35fef92ad56c4b;hp=7b383485f9bb24cfe811795038dd704bb9aaf6bf;hpb=582748fbb35f930f1d40e62dc685b526f4ab74cc;p=coreboot.git diff --git a/src/cpu/x86/smm/smmrelocate.S b/src/cpu/x86/smm/smmrelocate.S index 7b383485f..18d668c9d 100644 --- a/src/cpu/x86/smm/smmrelocate.S +++ b/src/cpu/x86/smm/smmrelocate.S @@ -39,6 +39,12 @@ #error "Southbridge needs SMM handler support." #endif +#if CONFIG_SMM_TSEG + +#include + +#endif /* CONFIG_SMM_TSEG */ + #define LAPIC_ID 0xfee00020 .global smm_relocation_start @@ -48,13 +54,22 @@ .code16 /** - * This trampoline code relocates SMBASE to 0xa0000 - ( lapicid * 0x400 ) + * When starting up, x86 CPUs have their SMBASE set to 0x30000. However, + * this is not a good place for the SMM handler to live, so it needs to + * be relocated. + * Traditionally SMM handlers used to live in the A segment (0xa0000). + * With growing SMM handlers, more CPU cores, etc. CPU vendors started + * allowing to relocate the handler to the end of physical memory, which + * they refer to as TSEG. + * This trampoline code relocates SMBASE to base address - ( lapicid * 0x400 ) * * Why 0x400? It is a safe value to cover the save state area per CPU. On * current AMD CPUs this area is _documented_ to be 0x200 bytes. On Intel * Core 2 CPUs the _documented_ parts of the save state area is 48 bytes * bigger, effectively sizing our data structures 0x300 bytes. * + * Example (with SMM handler living at 0xa0000): + * * LAPICID SMBASE SMM Entry SAVE STATE * 0 0xa0000 0xa8000 0xafd00 * 1 0x9fc00 0xa7c00 0xaf900 @@ -82,13 +97,7 @@ * at 0xa8000-0xa8100 (example for core 0). That is not enough. * * This means we're basically limited to 16 cpu cores before - * we need to use the TSEG/HSEG for the actual SMM handler plus stack. - * When we exceed 32 cores, we also need to put SMBASE to TSEG/HSEG. - * - * If we figure out the documented values above are safe to use, - * we could pack the structure above even more, so we could use the - * scheme to pack save state areas for 63 AMD CPUs or 58 Intel CPUs - * in the ASEG. + * we need to move the SMM handler to TSEG. * * Note: Some versions of Pentium M need their SMBASE aligned to 32k. * On those the above only works for up to 2 cores. But for now we only @@ -100,6 +109,7 @@ smm_relocation_start: /* Check revision to see if AMD64 style SMM_BASE * Intel Core Solo/Duo: 0x30007 * Intel Core2 Solo/Duo: 0x30100 + * Intel SandyBridge: 0x30101 * AMD64: 0x3XX64 * This check does not make much sense, unless someone ports * SMI handling to AMD64 CPUs. @@ -127,11 +137,53 @@ smm_relocate: movl %ecx, %edx shl $10, %edx +#if CONFIG_SMM_TSEG + movl $(TSEG_BAR), %ecx /* Get TSEG base from PCIE */ + addr32 movl (%ecx), %eax /* Save TSEG_BAR in %eax */ + andl $~1, %eax /* Remove lock bit */ +#else movl $0xa0000, %eax +#endif subl %edx, %eax /* subtract offset, see above */ addr32 movl %eax, (%ebx) +#if CONFIG_SMM_TSEG + /* Check for SMRR capability in MTRRCAP[11] */ + movl $MTRRcap_MSR, %ecx + rdmsr + bt $11, %eax + jnc skip_smrr + + /* TSEG base */ + movl $(TSEG_BAR), %ecx /* Get TSEG base from PCIE */ + addr32 movl (%ecx), %eax /* Save TSEG_BAR in %eax */ + andl $~1, %eax /* Remove lock bit */ + movl %eax, %ebx + + /* Set SMRR base address. */ + movl $SMRRphysBase_MSR, %ecx + orl $MTRR_TYPE_WRBACK, %eax + xorl %edx, %edx + wrmsr + + /* Set SMRR mask. */ + movl $SMRRphysMask_MSR, %ecx + movl $(~(CONFIG_SMM_TSEG_SIZE - 1) | MTRRphysMaskValid), %eax + xorl %edx, %edx + wrmsr + +#if CONFIG_NORTHBRIDGE_INTEL_SANDYBRIDGE || CONFIG_NORTHBRIDGE_INTEL_IVYBRIDGE + /* + * IED base is top 4M of TSEG + */ + addl $(CONFIG_SMM_TSEG_SIZE - IED_SIZE), %ebx + movl $(0x30000 + 0x8000 + 0x7eec), %eax + addr32 movl %ebx, (%eax) +#endif + +skip_smrr: +#endif /* The next section of code is potentially southbridge specific */