Go back to SIPI WAIT state for those CPUS defining the newly introduced
[coreboot.git] / src / include / cpu / x86 / lapic.h
1 #ifndef CPU_X86_LAPIC_H
2 #define CPU_X86_LAPIC_H
3
4 #include <cpu/x86/lapic_def.h>
5 #include <cpu/x86/msr.h>
6 #include <arch/hlt.h>
7
8 /* See if I need to initialize the local apic */
9 #if CONFIG_SMP || CONFIG_IOAPIC
10 #  define NEED_LAPIC 1
11 #endif
12
13 static inline __attribute__((always_inline)) unsigned long lapic_read(unsigned long reg)
14 {
15         return *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg));
16 }
17
18 static inline __attribute__((always_inline)) void lapic_write(unsigned long reg, unsigned long v)
19 {
20         *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg)) = v;
21 }
22
23 static inline __attribute__((always_inline)) void lapic_wait_icr_idle(void)
24 {
25         do { } while ( lapic_read( LAPIC_ICR ) & LAPIC_ICR_BUSY );
26 }
27
28
29
30 static inline void enable_lapic(void)
31 {
32
33         msr_t msr;
34         msr = rdmsr(LAPIC_BASE_MSR);
35         msr.hi &= 0xffffff00;
36         msr.lo &= 0x000007ff;
37         msr.lo |= LAPIC_DEFAULT_BASE | (1 << 11);
38         wrmsr(LAPIC_BASE_MSR, msr);
39 }
40
41 static inline void disable_lapic(void)
42 {
43         msr_t msr;
44         msr = rdmsr(LAPIC_BASE_MSR);
45         msr.lo &= ~(1 << 11);
46         wrmsr(LAPIC_BASE_MSR, msr);
47 }
48
49 static inline __attribute__((always_inline)) unsigned long lapicid(void)
50 {
51         return lapic_read(LAPIC_ID) >> 24;
52 }
53
54
55 #if CONFIG_AP_IN_SIPI_WAIT != 1
56 /* If we need to go back to sipi wait, we use the long non-inlined version of
57  * this function in lapic_cpu_init.c
58  */
59 static inline __attribute__((always_inline)) void stop_this_cpu(void)
60 {
61
62         /* Called by an AP when it is ready to halt and wait for a new task */
63         for(;;) {
64                 hlt();
65         }
66 }
67 #endif
68
69 #if ! defined (__ROMCC__)
70
71 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
72
73 struct __xchg_dummy { unsigned long a[100]; };
74 #define __xg(x) ((struct __xchg_dummy *)(x))
75
76 /*
77  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
78  * Note 2: xchg has side effect, so that attribute volatile is necessary,
79  *        but generally the primitive is invalid, *ptr is output argument. --ANK
80  */
81 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
82 {
83         switch (size) {
84                 case 1:
85                         __asm__ __volatile__("xchgb %b0,%1"
86                                 :"=q" (x)
87                                 :"m" (*__xg(ptr)), "0" (x)
88                                 :"memory");
89                         break;
90                 case 2:
91                         __asm__ __volatile__("xchgw %w0,%1"
92                                 :"=r" (x)
93                                 :"m" (*__xg(ptr)), "0" (x)
94                                 :"memory");
95                         break;
96                 case 4:
97                         __asm__ __volatile__("xchgl %0,%1"
98                                 :"=r" (x)
99                                 :"m" (*__xg(ptr)), "0" (x)
100                                 :"memory");
101                         break;
102         }
103         return x;
104 }
105
106
107 static inline void lapic_write_atomic(unsigned long reg, unsigned long v)
108 {
109         xchg((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg), v);
110 }
111
112
113 #ifdef CONFIG_X86_GOOD_APIC
114 # define FORCE_READ_AROUND_WRITE 0
115 # define lapic_read_around(x) lapic_read(x)
116 # define lapic_write_around(x,y) lapic_write((x),(y))
117 #else
118 # define FORCE_READ_AROUND_WRITE 1
119 # define lapic_read_around(x) lapic_read(x)
120 # define lapic_write_around(x,y) lapic_write_atomic((x),(y))
121 #endif
122
123 static inline int lapic_remote_read(int apicid, int reg, unsigned long *pvalue)
124 {
125         int timeout;
126         unsigned long status;
127         int result;
128         lapic_wait_icr_idle();
129         lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
130         lapic_write_around(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
131         timeout = 0;
132         do {
133 #if 0
134                 udelay(100);
135 #endif
136                 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
137         } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
138
139         result = -1;
140         if (status == LAPIC_ICR_RR_VALID) {
141                 *pvalue = lapic_read(LAPIC_RRR);
142                 result = 0;
143         }
144         return result;
145 }
146
147
148 void setup_lapic(void);
149
150
151 #if CONFIG_SMP == 1
152 struct device;
153 int start_cpu(struct device *cpu);
154
155 #endif /* CONFIG_SMP */
156
157
158 #endif /* !__ROMCC__ */
159
160 #endif /* CPU_X86_LAPIC_H */