2 * This file is part of the coreboot project.
4 * Copyright (C) 2010 coresystems GmbH
5 * Copyright (C) 2010 Rudolf Marek
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include <console/console.h>
24 #include <cpu/x86/lapic.h>
25 #include <cpu/x86/msr.h>
26 #include <cpu/x86/mtrr.h>
27 #include <cpu/amd/mtrr.h>
28 #include <cpu/amd/model_fxx_msr.h>
29 #include <cpu/x86/cache.h>
30 #include <cpu/x86/smm.h>
33 #define SMM_BASE_MSR 0xc0010111
34 #define SMM_ADDR_MSR 0xc0010112
35 #define SMM_MASK_MSR 0xc0010113
36 #define SMM_BASE 0xa0000
38 extern unsigned char _binary_smm_start;
39 extern unsigned char _binary_smm_size;
41 static int smm_handler_copied = 0;
47 msr = rdmsr(HWCR_MSR);
48 if (msr.lo & (1 << 0)) {
49 // This sounds like a bug... ?
50 printk(BIOS_DEBUG, "SMM is still locked from last boot, using old handler.\n");
54 /* Only copy SMM handler once, not once per CPU */
55 if (!smm_handler_copied) {
56 msr_t syscfg_orig, mtrr_aseg_orig;
58 smm_handler_copied = 1;
60 /* Back up MSRs for later restore */
61 syscfg_orig = rdmsr(SYSCFG_MSR);
62 mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR);
64 /* MTRR changes don't like an enabled cache */
68 /* Allow changes to MTRR extended attributes */
69 msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
70 /* turn the extended attributes off until we fix
71 * them so A0000 is routed to memory
73 msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn;
74 wrmsr(SYSCFG_MSR, msr);
76 /* set DRAM access to 0xa0000 */
80 wrmsr(MTRRfix16K_A0000_MSR, msr);
82 /* enable the extended features */
84 msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
85 msr.lo |= SYSCFG_MSR_MtrrFixDramEn;
86 wrmsr(SYSCFG_MSR, msr);
89 /* copy the real SMM handler */
90 memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size);
97 wrmsr(SYSCFG_MSR, syscfg_orig);
99 wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig);
104 /* But set SMM base address on all CPUs/cores */
105 msr = rdmsr(SMM_BASE_MSR);
106 msr.lo = SMM_BASE - (lapicid() * 0x400);
107 wrmsr(SMM_BASE_MSR, msr);
109 /* enable the SMM memory window */
110 msr = rdmsr(SMM_MASK_MSR);
111 msr.lo |= (1 << 0); // Enable ASEG SMRAM Range
112 wrmsr(SMM_MASK_MSR, msr);
114 /* Set SMMLOCK to avoid exploits messing with SMM */
115 msr = rdmsr(HWCR_MSR);
117 wrmsr(HWCR_MSR, msr);
122 /* We lock SMM per CPU core */