2 * This file is part of the coreboot project.
4 * Copyright (C) 2008 coresystems GmbH
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; version 2 of
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
23 * +--------------------------------+ 0xffff
24 * | Save State Map Node 0 |
25 * | Save State Map Node 1 |
26 * | Save State Map Node 2 |
27 * | Save State Map Node 3 |
29 * +--------------------------------+ 0xf000
32 * | EARLY DATA (lock, vectors) |
33 * +--------------------------------+ 0x8400
34 * | SMM Entry Node 0 (+ stack) |
35 * +--------------------------------+ 0x8000
36 * | SMM Entry Node 1 (+ stack) |
37 * | SMM Entry Node 2 (+ stack) |
38 * | SMM Entry Node 3 (+ stack) |
40 * +--------------------------------+ 0x7400
44 * +--------------------------------+ TSEG
48 #define LAPIC_ID 0xfee00020
49 #define SMM_STACK_SIZE (0x400 - 0x10)
51 /* Values for the xchg lock */
53 #define SMI_UNLOCKED 1
56 #if CONFIG_NORTHBRIDGE_INTEL_SANDYBRIDGE || CONFIG_NORTHBRIDGE_INTEL_IVYBRIDGE
57 #include <northbridge/intel/sandybridge/sandybridge.h>
58 #define TSEG_BAR (DEFAULT_PCIEXBAR | TSEG)
60 #error "Northbridge must define TSEG_BAR."
63 /* initially SMM is some sort of real mode. Let gcc know
64 * how to treat the SMM handler stub
67 .section ".handler", "a", @progbits
72 * SMM code to enable protected mode and jump to the
73 * C-written function void smi_handler(u32 smm_revision)
75 * All the bad magic is not all that bad after all.
78 movl $(TSEG_BAR), %eax /* Get TSEG base from PCIE */
79 addr32 movl (%eax), %edx /* Save TSEG_BAR in %edx */
80 andl $~1, %edx /* Remove lock bit */
84 addl $(smm_lock), %ebx
86 addr32 xchg %ax, (%ebx)
87 cmpw $SMI_UNLOCKED, %ax
89 /* Proceed if we got the lock */
90 je smm_check_prot_vector
92 /* If we did not get the lock, wait for release */
94 addr32 movw (%ebx), %ax
99 smm_check_prot_vector:
100 /* See if we need to adjust protected vector */
102 addl $(smm_prot_vector), %eax
103 addr32 movl (%eax), %ebx
104 cmpl $(smm_prot_start), %ebx
105 jne smm_check_gdt_vector
107 /* Adjust vector with TSEG offset */
109 addr32 movl %ebx, (%eax)
111 smm_check_gdt_vector:
112 /* See if we need to adjust GDT vector */
114 addl $(smm_gdt_vector + 2), %eax
115 addr32 movl (%eax), %ebx
116 cmpl $(smm_gdt - smm_handler_start), %ebx
119 /* Adjust vector with TSEG offset */
121 addr32 movl %ebx, (%eax)
124 movl $(smm_gdt_vector), %ebx
125 addl %edx, %ebx /* TSEG base in %edx */
129 andl $0x1FFAFFD1, %eax /* CD,NW,PG,AM,WP,NE,TS,EM,MP = 0 */
130 orl $0x1, %eax /* PE = 1 */
133 /* Enable protected mode */
134 movl $(smm_prot_vector), %eax
140 /* Use flat data segment */
148 /* Get this CPU's LAPIC ID */
153 /* calculate stack offset by multiplying the APIC ID
154 * by 1024 (0x400), and save that offset in ebp.
159 /* We put the stack for each core right above
160 * its SMM entry point. Core 0 starts at SMM_BASE + 0x8000,
161 * we spare 0x10 bytes for the jump to be sure.
163 movl $0x8010, %eax /* core 0 address */
164 addl %edx, %eax /* addjust for TSEG */
165 subl %ecx, %eax /* subtract offset, see above */
166 movl %eax, %ebx /* Save bottom of stack in ebx */
171 movl $(SMM_STACK_SIZE >> 2), %ecx
176 addl $SMM_STACK_SIZE, %ebx
179 /* Get SMM revision */
180 movl $0xfefc, %ebx /* core 0 address */
181 addl %edx, %ebx /* addjust for TSEG */
182 subl %ebp, %ebx /* subtract core X offset */
186 /* Call 32bit C handler */
190 movl $(TSEG_BAR), %eax /* Get TSEG base from PCIE */
191 movl (%eax), %ebx /* Save TSEG_BAR in %ebx */
192 andl $~1, %ebx /* Remove lock bit */
193 addl $(smm_lock), %ebx
194 movw $SMI_UNLOCKED, %ax
197 /* To return, just do rsm. It will "clean up" protected mode */
201 /* The first GDT entry can not be used. Keep it zero */
202 .long 0x00000000, 0x00000000
204 /* gdt selector 0x08, flat code segment */
206 .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */
208 /* gdt selector 0x10, flat data segment */
210 .byte 0x00, 0x93, 0xcf, 0x00
214 .section ".earlydata", "a", @progbits
232 .word smm_gdt_end - smm_gdt - 1
233 .long smm_gdt - smm_handler_start
235 .section ".jumptable", "a", @progbits
237 /* This is the SMM jump table. All cores use the same SMM handler
238 * for simplicity. But SMM Entry needs to be different due to the
239 * save state area. The jump table makes sure all CPUs jump into the
240 * real handler on SMM entry.
243 /* This code currently supports up to 16 CPU cores. If more than 16 CPU cores
244 * shall be used, below table has to be updated, as well as smm_tseg.ld
247 /* When using TSEG do a relative jump and fix up the CS later since we
248 * do not know what our TSEG base is yet.
254 jmp smm_handler_start
257 jmp smm_handler_start
260 jmp smm_handler_start
263 jmp smm_handler_start
266 jmp smm_handler_start
269 jmp smm_handler_start
272 jmp smm_handler_start
275 jmp smm_handler_start
278 jmp smm_handler_start
281 jmp smm_handler_start
284 jmp smm_handler_start
287 jmp smm_handler_start
290 jmp smm_handler_start
293 jmp smm_handler_start
296 jmp smm_handler_start
299 jmp smm_handler_start