Add support to run SMM handler in TSEG instead of ASEG
[coreboot.git] / src / cpu / x86 / smm / smihandler.c
1 /*
2  * This file is part of the coreboot project.
3  *
4  * Copyright (C) 2008-2009 coresystems GmbH
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation; version 2 of
9  * the License.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
19  * MA 02110-1301 USA
20  */
21
22 #include <arch/io.h>
23 #include <arch/romcc_io.h>
24 #include <console/console.h>
25 #include <cpu/x86/cache.h>
26 #include <cpu/x86/smm.h>
27
28 #if !CONFIG_SMM_TSEG /* TSEG handler locks in assembly */
29 typedef enum { SMI_LOCKED, SMI_UNLOCKED } smi_semaphore;
30
31 /* SMI multiprocessing semaphore */
32 static volatile smi_semaphore smi_handler_status __attribute__ ((aligned (4))) = SMI_UNLOCKED;
33
34 static int smi_obtain_lock(void)
35 {
36         u8 ret = SMI_LOCKED;
37
38         asm volatile (
39                 "movb %2, %%al\n"
40                 "xchgb %%al, %1\n"
41                 "movb %%al, %0\n"
42                 : "=g" (ret), "=m" (smi_handler_status)
43                 : "g" (SMI_LOCKED)
44                 : "eax"
45         );
46
47         return (ret == SMI_UNLOCKED);
48 }
49
50 void smi_release_lock(void)
51 {
52         asm volatile (
53                 "movb %1, %%al\n"
54                 "xchgb %%al, %0\n"
55                 : "=m" (smi_handler_status)
56                 : "g" (SMI_UNLOCKED)
57                 : "eax"
58         );
59 }
60 #endif
61
62 #define LAPIC_ID 0xfee00020
63 static inline __attribute__((always_inline)) unsigned long nodeid(void)
64 {
65         return (*((volatile unsigned long *)(LAPIC_ID)) >> 24);
66 }
67
68 void io_trap_handler(int smif)
69 {
70         /* If a handler function handled a given IO trap, it
71          * shall return a non-zero value
72          */
73         printk(BIOS_DEBUG, "SMI function trap 0x%x: ", smif);
74
75         if (southbridge_io_trap_handler(smif))
76                 return;
77
78         if (mainboard_io_trap_handler(smif))
79                 return;
80
81         printk(BIOS_DEBUG, "Unknown function\n");
82 }
83
84 /**
85  * @brief Set the EOS bit
86  */
87 static void smi_set_eos(void)
88 {
89         southbridge_smi_set_eos();
90 }
91
92 static u32 pci_orig;
93
94 /**
95  * @brief Backup PCI address to make sure we do not mess up the OS
96  */
97 static void smi_backup_pci_address(void)
98 {
99         pci_orig = inl(0xcf8);
100 }
101
102 /**
103  * @brief Restore PCI address previously backed up
104  */
105 static void smi_restore_pci_address(void)
106 {
107         outl(pci_orig, 0xcf8);
108 }
109
110 /**
111  * @brief Interrupt handler for SMI#
112  *
113  * @param smm_revision revision of the smm state save map
114  */
115
116 void smi_handler(u32 smm_revision)
117 {
118         unsigned int node;
119         smm_state_save_area_t state_save;
120
121 #if !CONFIG_SMM_TSEG
122         /* Are we ok to execute the handler? */
123         if (!smi_obtain_lock()) {
124                 /* For security reasons we don't release the other CPUs
125                  * until the CPU with the lock is actually done
126                  */
127                 while (smi_handler_status == SMI_LOCKED) {
128                         asm volatile (
129                                 ".byte 0xf3, 0x90\n"  /* hint a CPU we are in spinlock (PAUSE instruction, REP NOP) */
130                         );
131                 }
132                 return;
133         }
134 #endif
135
136         smi_backup_pci_address();
137
138         node=nodeid();
139
140         console_init();
141
142         printk(BIOS_SPEW, "\nSMI# #%d\n", node);
143
144         switch (smm_revision) {
145         case 0x00030002:
146         case 0x00030007:
147                 state_save.type = LEGACY;
148                 state_save.legacy_state_save = (legacy_smm_state_save_area_t *)
149                         (0xa8000 + 0x7e00 - (node * 0x400));
150                 break;
151         case 0x00030100:
152         case 0x00030101: /* SandyBridge */
153                 state_save.type = EM64T;
154                 state_save.em64t_state_save = (em64t_smm_state_save_area_t *)
155                         (0xa8000 + 0x7d00 - (node * 0x400));
156                 break;
157         case 0x00030064:
158                 state_save.type = AMD64;
159                 state_save.amd64_state_save = (amd64_smm_state_save_area_t *)
160                         (0xa8000 + 0x7e00 - (node * 0x400));
161                 break;
162         default:
163                 printk(BIOS_DEBUG, "smm_revision: 0x%08x\n", smm_revision);
164                 printk(BIOS_DEBUG, "SMI# not supported on your CPU\n");
165                 /* Don't release lock, so no further SMI will happen,
166                  * if we don't handle it anyways.
167                  */
168                 return;
169         }
170
171         /* Call chipset specific SMI handlers. */
172         if (cpu_smi_handler)
173                 cpu_smi_handler(node, &state_save);
174         if (northbridge_smi_handler)
175                 northbridge_smi_handler(node, &state_save);
176         if (southbridge_smi_handler)
177                 southbridge_smi_handler(node, &state_save);
178
179         smi_restore_pci_address();
180
181 #if !CONFIG_SMM_TSEG
182         smi_release_lock();
183 #endif
184
185         /* De-assert SMI# signal to allow another SMI */
186         smi_set_eos();
187 }