2 * This file is part of the coreboot project.
4 * Copyright (C) 2008-2010 coresystems GmbH
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; version 2 of
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
22 // Make sure no stage 2 code is included:
25 /* On AMD's platforms we can set SMBASE by writing an MSR */
26 #if !CONFIG_NORTHBRIDGE_AMD_AMDK8 && !CONFIG_NORTHBRIDGE_AMD_AMDFAM10
28 // FIXME: Is this piece of code southbridge specific, or
29 // can it be cleaned up so this include is not required?
30 // It's needed right now because we get our DEFAULT_PMBASE from
32 #if CONFIG_SOUTHBRIDGE_INTEL_I82801GX
33 #include "../../../southbridge/intel/i82801gx/i82801gx.h"
34 #elif CONFIG_SOUTHBRIDGE_INTEL_I82801DX
35 #include "../../../southbridge/intel/i82801dx/i82801dx.h"
36 #elif CONFIG_SOUTHBRIDGE_INTEL_SCH
37 #include "../../../southbridge/intel/sch/sch.h"
39 #error "Southbridge needs SMM handler support."
44 #include <cpu/x86/mtrr.h>
46 #endif /* CONFIG_SMM_TSEG */
48 #define LAPIC_ID 0xfee00020
50 .global smm_relocation_start
51 .global smm_relocation_end
53 /* initially SMM is some sort of real mode. */
57 * This trampoline code relocates SMBASE to 0xa0000 - ( lapicid * 0x400 )
59 * Why 0x400? It is a safe value to cover the save state area per CPU. On
60 * current AMD CPUs this area is _documented_ to be 0x200 bytes. On Intel
61 * Core 2 CPUs the _documented_ parts of the save state area is 48 bytes
62 * bigger, effectively sizing our data structures 0x300 bytes.
64 * LAPICID SMBASE SMM Entry SAVE STATE
65 * 0 0xa0000 0xa8000 0xafd00
66 * 1 0x9fc00 0xa7c00 0xaf900
67 * 2 0x9f800 0xa7800 0xaf500
68 * 3 0x9f400 0xa7400 0xaf100
69 * 4 0x9f000 0xa7000 0xaed00
70 * 5 0x9ec00 0xa6c00 0xae900
71 * 6 0x9e800 0xa6800 0xae500
72 * 7 0x9e400 0xa6400 0xae100
73 * 8 0x9e000 0xa6000 0xadd00
74 * 9 0x9dc00 0xa5c00 0xad900
75 * 10 0x9d800 0xa5800 0xad500
76 * 11 0x9d400 0xa5400 0xad100
77 * 12 0x9d000 0xa5000 0xacd00
78 * 13 0x9cc00 0xa4c00 0xac900
79 * 14 0x9c800 0xa4800 0xac500
80 * 15 0x9c400 0xa4400 0xac100
84 * 31 0x98400 0xa0400 0xa8100
86 * With 32 cores, the SMM handler would need to fit between
87 * 0xa0000-0xa0400 and the stub plus stack would need to go
88 * at 0xa8000-0xa8100 (example for core 0). That is not enough.
90 * This means we're basically limited to 16 cpu cores before
91 * we need to use the TSEG/HSEG for the actual SMM handler plus stack.
92 * When we exceed 32 cores, we also need to put SMBASE to TSEG/HSEG.
94 * If we figure out the documented values above are safe to use,
95 * we could pack the structure above even more, so we could use the
96 * scheme to pack save state areas for 63 AMD CPUs or 58 Intel CPUs
99 * Note: Some versions of Pentium M need their SMBASE aligned to 32k.
100 * On those the above only works for up to 2 cores. But for now we only
101 * care fore Core (2) Duo/Solo
105 smm_relocation_start:
106 /* Check revision to see if AMD64 style SMM_BASE
107 * Intel Core Solo/Duo: 0x30007
108 * Intel Core2 Solo/Duo: 0x30100
109 * Intel SandyBridge: 0x30101
111 * This check does not make much sense, unless someone ports
112 * SMI handling to AMD64 CPUs.
115 mov $0x38000 + 0x7efc, %ebx
116 addr32 mov (%ebx), %al
120 mov $0x38000 + 0x7ef8, %ebx
123 mov $0x38000 + 0x7f00, %ebx
126 /* Get this CPU's LAPIC ID */
128 addr32 movl (%esi), %ecx
131 /* calculate offset by multiplying the
132 * apic ID by 1024 (0x400)
138 movl $(TSEG_BAR), %ecx /* Get TSEG base from PCIE */
139 addr32 movl (%ecx), %eax /* Save TSEG_BAR in %eax */
140 andl $~1, %eax /* Remove lock bit */
144 subl %edx, %eax /* subtract offset, see above */
146 addr32 movl %eax, (%ebx)
149 /* Check for SMRR capability in MTRRCAP[11] */
150 movl $MTRRcap_MSR, %ecx
156 movl $(TSEG_BAR), %ecx /* Get TSEG base from PCIE */
157 addr32 movl (%ecx), %eax /* Save TSEG_BAR in %eax */
158 andl $~1, %eax /* Remove lock bit */
161 /* Set SMRR base address. */
162 movl $SMRRphysBase_MSR, %ecx
163 orl $MTRR_TYPE_WRBACK, %eax
168 movl $SMRRphysMask_MSR, %ecx
169 movl $(~(CONFIG_SMM_TSEG_SIZE - 1) | MTRRphysMaskValid), %eax
173 #if CONFIG_NORTHBRIDGE_INTEL_SANDYBRIDGE || CONFIG_NORTHBRIDGE_INTEL_IVYBRIDGE
175 * IED base is top 4M of TSEG
177 addl $(CONFIG_SMM_TSEG_SIZE - IED_SIZE), %ebx
178 movl $(0x30000 + 0x8000 + 0x7eec), %eax
179 addr32 movl %ebx, (%eax)
185 /* The next section of code is potentially southbridge specific */
187 /* Clear SMI status */
188 movw $(DEFAULT_PMBASE + 0x34), %dx
192 /* Clear PM1 status */
193 movw $(DEFAULT_PMBASE + 0x00), %dx
197 /* Set EOS bit so other SMIs can occur */
198 movw $(DEFAULT_PMBASE + 0x30), %dx
203 /* End of southbridge specific section. */
205 #if CONFIG_DEBUG_SMM_RELOCATION
206 /* print [SMM-x] so we can determine if CPUx went to SMM */
207 movw $CONFIG_TTYS0_BASE, %dx
217 /* calculate ascii of cpu number. More than 9 cores? -> FIXME */
229 /* That's it. return */