2 2005.12 yhlu add coreboot_ram cross the vga font buffer handling
3 2005.12 yhlu add CONFIG_RAMBASE above 1M support for SMP
4 2008.05 stepan add support for going back to sipi wait state
7 #include <cpu/x86/lapic.h>
10 #include <console/console.h>
12 #include <device/device.h>
13 #include <device/path.h>
14 #include <smp/atomic.h>
15 #include <smp/spinlock.h>
20 #if CONFIG_RAMBASE >= 0x100000
21 /* This is a lot more paranoid now, since Linux can NOT handle
22 * being told there is a CPU when none exists. So any errors
23 * will return 0, meaning no CPU.
25 * We actually handling that case by noting which cpus startup
26 * and not telling anyone about the ones that dont.
28 static unsigned long get_valid_start_eip(unsigned long orig_start_eip)
30 return (unsigned long)orig_start_eip & 0xffff; // 16 bit to avoid 0xa0000
34 #if CONFIG_HAVE_ACPI_RESUME == 1
36 char *lowmem_backup_ptr;
37 int lowmem_backup_size;
40 static void copy_secondary_start_to_1m_below(void)
42 #if CONFIG_RAMBASE >= 0x100000
43 extern char _secondary_start[];
44 extern char _secondary_start_end[];
45 unsigned long code_size;
46 unsigned long start_eip;
48 /* _secondary_start need to be masked 20 above bit, because 16 bit code in secondary.S
49 Also We need to copy the _secondary_start to the below 1M region
51 start_eip = get_valid_start_eip((unsigned long)_secondary_start);
52 code_size = (unsigned long)_secondary_start_end - (unsigned long)_secondary_start;
54 #if CONFIG_HAVE_ACPI_RESUME == 1
55 /* need to save it for RAM resume */
56 lowmem_backup_size = code_size;
57 lowmem_backup = malloc(code_size);
58 lowmem_backup_ptr = (char *)start_eip;
60 if (lowmem_backup == NULL)
61 die("Out of backup memory\n");
63 memcpy(lowmem_backup, lowmem_backup_ptr, lowmem_backup_size);
65 /* copy the _secondary_start to the ram below 1M*/
66 memcpy((unsigned char *)start_eip, (unsigned char *)_secondary_start, code_size);
68 printk_debug("start_eip=0x%08lx, offset=0x%08lx, code_size=0x%08lx\n", start_eip, ((unsigned long)_secondary_start - start_eip), code_size);
72 static int lapic_start_cpu(unsigned long apicid)
75 unsigned long send_status, accept_status, start_eip;
76 int j, num_starts, maxlvt;
77 extern char _secondary_start[];
80 * Starting actual IPI sequence...
83 printk_spew("Asserting INIT.\n");
86 * Turn INIT on target chip
88 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
94 lapic_write_around(LAPIC_ICR, LAPIC_INT_LEVELTRIG | LAPIC_INT_ASSERT
97 printk_spew("Waiting for send to finish...\n");
102 send_status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
103 } while (send_status && (timeout++ < 1000));
104 if (timeout >= 1000) {
105 printk_err("CPU %ld: First apic write timed out. Disabling\n",
108 printk_err("ESR is 0x%lx\n", lapic_read(LAPIC_ESR));
109 if (lapic_read(LAPIC_ESR)) {
110 printk_err("Try to reset ESR\n");
111 lapic_write_around(LAPIC_ESR, 0);
112 printk_err("ESR is 0x%lx\n", lapic_read(LAPIC_ESR));
118 printk_spew("Deasserting INIT.\n");
121 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
124 lapic_write_around(LAPIC_ICR, LAPIC_INT_LEVELTRIG | LAPIC_DM_INIT);
126 printk_spew("Waiting for send to finish...\n");
131 send_status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
132 } while (send_status && (timeout++ < 1000));
133 if (timeout >= 1000) {
134 printk_err("CPU %ld: Second apic write timed out. Disabling\n",
140 #if CONFIG_RAMBASE >= 0x100000
141 start_eip = get_valid_start_eip((unsigned long)_secondary_start);
143 start_eip = (unsigned long)_secondary_start;
149 * Run STARTUP IPI loop.
151 printk_spew("#startup loops: %d.\n", num_starts);
155 for (j = 1; j <= num_starts; j++) {
156 printk_spew("Sending STARTUP #%d to %lu.\n", j, apicid);
157 lapic_read_around(LAPIC_SPIV);
158 lapic_write(LAPIC_ESR, 0);
159 lapic_read(LAPIC_ESR);
160 printk_spew("After apic_write.\n");
167 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
169 /* Boot on the stack */
170 /* Kick the second */
171 lapic_write_around(LAPIC_ICR, LAPIC_DM_STARTUP
172 | (start_eip >> 12));
175 * Give the other CPU some time to accept the IPI.
179 printk_spew("Startup point 1.\n");
181 printk_spew("Waiting for send to finish...\n");
186 send_status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
187 } while (send_status && (timeout++ < 1000));
190 * Give the other CPU some time to accept the IPI.
194 * Due to the Pentium erratum 3AP.
197 lapic_read_around(LAPIC_SPIV);
198 lapic_write(LAPIC_ESR, 0);
200 accept_status = (lapic_read(LAPIC_ESR) & 0xEF);
201 if (send_status || accept_status)
204 printk_spew("After Startup.\n");
206 printk_warning("APIC never delivered???\n");
208 printk_warning("APIC delivery error (%lx).\n", accept_status);
209 if (send_status || accept_status)
214 /* Number of cpus that are currently running in coreboot */
215 static atomic_t active_cpus = ATOMIC_INIT(1);
217 /* start_cpu_lock covers last_cpu_index and secondary_stack.
218 * Only starting one cpu at a time let's me remove the logic
219 * for select the stack from assembly language.
221 * In addition communicating by variables to the cpu I
222 * am starting allows me to veryify it has started before
226 static spinlock_t start_cpu_lock = SPIN_LOCK_UNLOCKED;
227 static unsigned last_cpu_index = 0;
228 volatile unsigned long secondary_stack;
230 int start_cpu(device_t cpu)
232 extern unsigned char _estack[];
233 struct cpu_info *info;
234 unsigned long stack_end;
235 unsigned long apicid;
240 spin_lock(&start_cpu_lock);
242 /* Get the cpu's apicid */
243 apicid = cpu->path.apic.apic_id;
245 /* Get an index for the new processor */
246 index = ++last_cpu_index;
248 /* Find end of the new processors stack */
249 #if (CONFIG_LB_MEM_TOPK>1024) && (CONFIG_RAMBASE < 0x100000) && ((CONFIG_CONSOLE_VGA==1) || (CONFIG_PCI_ROM_RUN == 1))
250 if(index<1) { // only keep bsp on low
251 stack_end = ((unsigned long)_estack) - (CONFIG_STACK_SIZE*index) - sizeof(struct cpu_info);
253 // for all APs, let use stack after pgtbl, 20480 is the pgtbl size for every cpu
254 stack_end = 0x100000+(20480 + CONFIG_STACK_SIZE)*CONFIG_MAX_CPUS - (CONFIG_STACK_SIZE*index);
255 #if (0x100000+(20480 + CONFIG_STACK_SIZE)*CONFIG_MAX_CPUS) > (CONFIG_LB_MEM_TOPK<<10)
256 #warning "We may need to increase CONFIG_LB_MEM_TOPK, it need to be more than (0x100000+(20480 + CONFIG_STACK_SIZE)*CONFIG_MAX_CPUS)\n"
258 if(stack_end > (CONFIG_LB_MEM_TOPK<<10)) {
259 printk_debug("start_cpu: Please increase the CONFIG_LB_MEM_TOPK more than %luK\n", stack_end>>10);
260 die("Can not go on\n");
262 stack_end -= sizeof(struct cpu_info);
265 stack_end = ((unsigned long)_estack) - (CONFIG_STACK_SIZE*index) - sizeof(struct cpu_info);
269 /* Record the index and which cpu structure we are using */
270 info = (struct cpu_info *)stack_end;
274 /* Advertise the new stack to start_cpu */
275 secondary_stack = stack_end;
277 /* Until the cpu starts up report the cpu is not enabled */
279 cpu->initialized = 0;
282 result = lapic_start_cpu(apicid);
286 /* Wait 1s or until the new the new cpu calls in */
287 for(count = 0; count < 100000 ; count++) {
288 if (secondary_stack == 0) {
296 spin_unlock(&start_cpu_lock);
300 #if CONFIG_AP_IN_SIPI_WAIT == 1
302 * Normally this function is defined in lapic.h as an always inline function
303 * that just keeps the CPU in a hlt() loop. This does not work on all CPUs.
304 * I think all hyperthreading CPUs might need this version, but I could only
305 * verify this on the Intel Core Duo
307 void stop_this_cpu(void)
310 unsigned long send_status;
313 id = lapic_read(LAPIC_ID) >> 24;
315 printk_debug("CPU %ld going down...\n", id);
317 /* send an LAPIC INIT to myself */
318 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(id));
319 lapic_write_around(LAPIC_ICR, LAPIC_INT_LEVELTRIG | LAPIC_INT_ASSERT | LAPIC_DM_INIT);
321 /* wait for the ipi send to finish */
323 // When these two printk_spew calls are not removed, the
324 // machine will hang when log level is SPEW. Why?
325 printk_spew("Waiting for send to finish...\n");
333 send_status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
334 } while (send_status && (timeout++ < 1000));
335 if (timeout >= 1000) {
336 printk_err("timed out\n");
340 printk_spew("Deasserting INIT.\n");
341 /* Deassert the LAPIC INIT */
342 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(id));
343 lapic_write_around(LAPIC_ICR, LAPIC_INT_LEVELTRIG | LAPIC_DM_INIT);
345 printk_spew("Waiting for send to finish...\n");
350 send_status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
351 } while (send_status && (timeout++ < 1000));
352 if (timeout >= 1000) {
353 printk_err("timed out\n");
362 /* C entry point of secondary cpus */
363 void secondary_cpu_init(void)
365 atomic_inc(&active_cpus);
366 #if CONFIG_SERIAL_CPU_INIT == 1
367 #if CONFIG_MAX_CPUS>2
368 spin_lock(&start_cpu_lock);
372 #if CONFIG_SERIAL_CPU_INIT == 1
373 #if CONFIG_MAX_CPUS>2
374 spin_unlock(&start_cpu_lock);
378 atomic_dec(&active_cpus);
383 static void start_other_cpus(struct bus *cpu_bus, device_t bsp_cpu)
386 /* Loop through the cpus once getting them started */
388 for(cpu = cpu_bus->children; cpu ; cpu = cpu->sibling) {
389 if (cpu->path.type != DEVICE_PATH_APIC) {
392 #if CONFIG_SERIAL_CPU_INIT == 0
402 if (cpu->initialized) {
406 if (!start_cpu(cpu)) {
407 /* Record the error in cpu? */
408 printk_err("CPU 0x%02x would not start!\n",
409 cpu->path.apic.apic_id);
411 #if CONFIG_SERIAL_CPU_INIT == 1
412 #if CONFIG_MAX_CPUS>2
420 static void wait_other_cpus_stop(struct bus *cpu_bus)
423 int old_active_count, active_count;
424 /* Now loop until the other cpus have finished initializing */
425 old_active_count = 1;
426 active_count = atomic_read(&active_cpus);
427 while(active_count > 1) {
428 if (active_count != old_active_count) {
429 printk_info("Waiting for %d CPUS to stop\n", active_count - 1);
430 old_active_count = active_count;
433 active_count = atomic_read(&active_cpus);
435 for(cpu = cpu_bus->children; cpu; cpu = cpu->sibling) {
436 if (cpu->path.type != DEVICE_PATH_APIC) {
439 if (!cpu->initialized) {
440 printk_err("CPU 0x%02x did not initialize!\n",
441 cpu->path.apic.apic_id);
444 printk_debug("All AP CPUs stopped\n");
447 #else /* CONFIG_SMP */
448 #define initialize_other_cpus(root) do {} while(0)
449 #endif /* CONFIG_SMP */
451 #if CONFIG_WAIT_BEFORE_CPUS_INIT==0
452 #define cpus_ready_for_init() do {} while(0)
454 void cpus_ready_for_init(void);
457 #if CONFIG_HAVE_SMI_HANDLER
461 void initialize_cpus(struct bus *cpu_bus)
463 struct device_path cpu_path;
464 struct cpu_info *info;
466 /* Find the info struct for this cpu */
470 /* Ensure the local apic is enabled */
473 /* Get the device path of the boot cpu */
474 cpu_path.type = DEVICE_PATH_APIC;
475 cpu_path.apic.apic_id = lapicid();
477 /* Get the device path of the boot cpu */
478 cpu_path.type = DEVICE_PATH_CPU;
482 /* Find the device structure for the boot cpu */
483 info->cpu = alloc_find_dev(cpu_bus, &cpu_path);
486 copy_secondary_start_to_1m_below(); // why here? In case some day we can start core1 in amd_sibling_init
489 #if CONFIG_HAVE_SMI_HANDLER
493 cpus_ready_for_init();
496 #if CONFIG_SERIAL_CPU_INIT == 0
497 /* start all aps at first, so we can init ECC all together */
498 start_other_cpus(cpu_bus, info->cpu);
502 /* Initialize the bootstrap processor */
506 #if CONFIG_SERIAL_CPU_INIT == 1
507 start_other_cpus(cpu_bus, info->cpu);
510 /* Now wait the rest of the cpus stop*/
511 wait_other_cpus_stop(cpu_bus);