X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=src%2Fcpu%2Fx86%2Flapic%2Flapic_cpu_init.c;h=fc22ea4adf9a9e7fd5b8996475f485b518a283b6;hb=5ff7c13e858a31addf1558731a12cf6c753b576d;hp=20615e61c82eab8cdc0d3b5c0fc9477ed1bd9bb1;hpb=a335402bec6511c11ab54c943287e7be7fb1672d;p=coreboot.git diff --git a/src/cpu/x86/lapic/lapic_cpu_init.c b/src/cpu/x86/lapic/lapic_cpu_init.c index 20615e61c..fc22ea4ad 100644 --- a/src/cpu/x86/lapic/lapic_cpu_init.c +++ b/src/cpu/x86/lapic/lapic_cpu_init.c @@ -1,3 +1,9 @@ +/* + 2005.12 yhlu add coreboot_ram cross the vga font buffer handling + 2005.12 yhlu add CONFIG_RAMBASE above 1M support for SMP + 2008.05 stepan add support for going back to sipi wait state +*/ + #include #include #include @@ -9,27 +15,67 @@ #include #include - #if CONFIG_SMP == 1 /* This is a lot more paranoid now, since Linux can NOT handle - * being told there is a CPU when none exists. So any errors - * will return 0, meaning no CPU. + * being told there is a CPU when none exists. So any errors + * will return 0, meaning no CPU. * * We actually handling that case by noting which cpus startup * and not telling anyone about the ones that dont. - */ + */ +static unsigned long get_valid_start_eip(unsigned long orig_start_eip) +{ + return (unsigned long)orig_start_eip & 0xffff; // 16 bit to avoid 0xa0000 +} + +#if CONFIG_HAVE_ACPI_RESUME == 1 +char *lowmem_backup; +char *lowmem_backup_ptr; +int lowmem_backup_size; +#endif + +extern char _secondary_start[]; + +static void copy_secondary_start_to_1m_below(void) +{ + extern char _secondary_start_end[]; + unsigned long code_size; + unsigned long start_eip; + + /* _secondary_start need to be masked 20 above bit, because 16 bit code in secondary.S + Also We need to copy the _secondary_start to the below 1M region + */ + start_eip = get_valid_start_eip((unsigned long)_secondary_start); + code_size = (unsigned long)_secondary_start_end - (unsigned long)_secondary_start; + +#if CONFIG_HAVE_ACPI_RESUME == 1 + /* need to save it for RAM resume */ + lowmem_backup_size = code_size; + lowmem_backup = malloc(code_size); + lowmem_backup_ptr = (char *)start_eip; + + if (lowmem_backup == NULL) + die("Out of backup memory\n"); + + memcpy(lowmem_backup, lowmem_backup_ptr, lowmem_backup_size); +#endif + /* copy the _secondary_start to the ram below 1M*/ + memcpy((unsigned char *)start_eip, (unsigned char *)_secondary_start, code_size); + + printk(BIOS_DEBUG, "start_eip=0x%08lx, offset=0x%08lx, code_size=0x%08lx\n", start_eip, ((unsigned long)_secondary_start - start_eip), code_size); +} + static int lapic_start_cpu(unsigned long apicid) { int timeout; unsigned long send_status, accept_status, start_eip; int j, num_starts, maxlvt; - extern char _secondary_start[]; - + /* * Starting actual IPI sequence... */ - printk_spew("Asserting INIT.\n"); + printk(BIOS_SPEW, "Asserting INIT.\n"); /* * Turn INIT on target chip @@ -39,71 +85,76 @@ static int lapic_start_cpu(unsigned long apicid) /* * Send IPI */ - + lapic_write_around(LAPIC_ICR, LAPIC_INT_LEVELTRIG | LAPIC_INT_ASSERT | LAPIC_DM_INIT); - printk_spew("Waiting for send to finish...\n"); + printk(BIOS_SPEW, "Waiting for send to finish...\n"); timeout = 0; do { - printk_spew("+"); + printk(BIOS_SPEW, "+"); udelay(100); send_status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY; } while (send_status && (timeout++ < 1000)); if (timeout >= 1000) { - printk_err("CPU %d: First apic write timed out. Disabling\n", + printk(BIOS_ERR, "CPU %ld: First apic write timed out. Disabling\n", apicid); - // too bad. - printk_err("ESR is 0x%x\n", lapic_read(LAPIC_ESR)); + // too bad. + printk(BIOS_ERR, "ESR is 0x%lx\n", lapic_read(LAPIC_ESR)); if (lapic_read(LAPIC_ESR)) { - printk_err("Try to reset ESR\n"); + printk(BIOS_ERR, "Try to reset ESR\n"); lapic_write_around(LAPIC_ESR, 0); - printk_err("ESR is 0x%x\n", lapic_read(LAPIC_ESR)); + printk(BIOS_ERR, "ESR is 0x%lx\n", lapic_read(LAPIC_ESR)); } return 0; } +#if !defined (CONFIG_CPU_AMD_MODEL_10XXX) && !defined (CONFIG_CPU_AMD_MODEL_14XXX) mdelay(10); +#endif - printk_spew("Deasserting INIT.\n"); + printk(BIOS_SPEW, "Deasserting INIT.\n"); /* Target chip */ lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid)); /* Send IPI */ lapic_write_around(LAPIC_ICR, LAPIC_INT_LEVELTRIG | LAPIC_DM_INIT); - - printk_spew("Waiting for send to finish...\n"); + + printk(BIOS_SPEW, "Waiting for send to finish...\n"); timeout = 0; do { - printk_spew("+"); + printk(BIOS_SPEW, "+"); udelay(100); send_status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY; } while (send_status && (timeout++ < 1000)); if (timeout >= 1000) { - printk_err("CPU %d: Second apic write timed out. Disabling\n", + printk(BIOS_ERR, "CPU %ld: Second apic write timed out. Disabling\n", apicid); - // too bad. + // too bad. return 0; } - start_eip = (unsigned long)_secondary_start; - printk_spew("start_eip=0x%08lx\n", start_eip); - + start_eip = get_valid_start_eip((unsigned long)_secondary_start); + +#if !defined (CONFIG_CPU_AMD_MODEL_10XXX) && !defined (CONFIG_CPU_AMD_MODEL_14XXX) num_starts = 2; +#else + num_starts = 1; +#endif /* * Run STARTUP IPI loop. */ - printk_spew("#startup loops: %d.\n", num_starts); + printk(BIOS_SPEW, "#startup loops: %d.\n", num_starts); maxlvt = 4; for (j = 1; j <= num_starts; j++) { - printk_spew("Sending STARTUP #%d to %u.\n", j, apicid); + printk(BIOS_SPEW, "Sending STARTUP #%d to %lu.\n", j, apicid); lapic_read_around(LAPIC_SPIV); lapic_write(LAPIC_ESR, 0); lapic_read(LAPIC_ESR); - printk_spew("After apic_write.\n"); + printk(BIOS_SPEW, "After apic_write.\n"); /* * STARTUP IPI @@ -122,12 +173,12 @@ static int lapic_start_cpu(unsigned long apicid) */ udelay(300); - printk_spew("Startup point 1.\n"); + printk(BIOS_SPEW, "Startup point 1.\n"); - printk_spew("Waiting for send to finish...\n"); + printk(BIOS_SPEW, "Waiting for send to finish...\n"); timeout = 0; do { - printk_spew("+"); + printk(BIOS_SPEW, "+"); udelay(100); send_status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY; } while (send_status && (timeout++ < 1000)); @@ -147,17 +198,17 @@ static int lapic_start_cpu(unsigned long apicid) if (send_status || accept_status) break; } - printk_spew("After Startup.\n"); + printk(BIOS_SPEW, "After Startup.\n"); if (send_status) - printk_warning("APIC never delivered???\n"); + printk(BIOS_WARNING, "APIC never delivered???\n"); if (accept_status) - printk_warning("APIC delivery error (%lx).\n", accept_status); + printk(BIOS_WARNING, "APIC delivery error (%lx).\n", accept_status); if (send_status || accept_status) return 0; return 1; } -/* Number of cpus that are currently running in linuxbios */ +/* Number of cpus that are currently running in coreboot */ static atomic_t active_cpus = ATOMIC_INIT(1); /* start_cpu_lock covers last_cpu_index and secondary_stack. @@ -186,14 +237,14 @@ int start_cpu(device_t cpu) spin_lock(&start_cpu_lock); /* Get the cpu's apicid */ - apicid = cpu->path.u.apic.apic_id; + apicid = cpu->path.apic.apic_id; /* Get an index for the new processor */ index = ++last_cpu_index; - + /* Find end of the new processors stack */ - stack_end = ((unsigned long)_estack) - (STACK_SIZE*index) - sizeof(struct cpu_info); - + stack_end = ((unsigned long)_estack) - (CONFIG_STACK_SIZE*index) - sizeof(struct cpu_info); + /* Record the index and which cpu structure we are using */ info = (struct cpu_info *)stack_end; info->index = index; @@ -211,7 +262,7 @@ int start_cpu(device_t cpu) if (result) { result = 0; - /* Wait 1s or until the new the new cpu calls in */ + /* Wait 1s or until the new cpu calls in */ for(count = 0; count < 100000 ; count++) { if (secondary_stack == 0) { result = 1; @@ -225,52 +276,167 @@ int start_cpu(device_t cpu) return result; } +#if CONFIG_AP_IN_SIPI_WAIT == 1 +/** + * Normally this function is defined in lapic.h as an always inline function + * that just keeps the CPU in a hlt() loop. This does not work on all CPUs. + * I think all hyperthreading CPUs might need this version, but I could only + * verify this on the Intel Core Duo + */ +void stop_this_cpu(void) +{ + int timeout; + unsigned long send_status; + unsigned long id; + + id = lapic_read(LAPIC_ID) >> 24; + + printk(BIOS_DEBUG, "CPU %ld going down...\n", id); + + /* send an LAPIC INIT to myself */ + lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(id)); + lapic_write_around(LAPIC_ICR, LAPIC_INT_LEVELTRIG | LAPIC_INT_ASSERT | LAPIC_DM_INIT); + + /* wait for the ipi send to finish */ +#if 0 + // When these two printk(BIOS_SPEW, ...) calls are not removed, the + // machine will hang when log level is SPEW. Why? + printk(BIOS_SPEW, "Waiting for send to finish...\n"); +#endif + timeout = 0; + do { +#if 0 + printk(BIOS_SPEW, "+"); +#endif + udelay(100); + send_status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY; + } while (send_status && (timeout++ < 1000)); + if (timeout >= 1000) { + printk(BIOS_ERR, "timed out\n"); + } + mdelay(10); + + printk(BIOS_SPEW, "Deasserting INIT.\n"); + /* Deassert the LAPIC INIT */ + lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(id)); + lapic_write_around(LAPIC_ICR, LAPIC_INT_LEVELTRIG | LAPIC_DM_INIT); + + printk(BIOS_SPEW, "Waiting for send to finish...\n"); + timeout = 0; + do { + printk(BIOS_SPEW, "+"); + udelay(100); + send_status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY; + } while (send_status && (timeout++ < 1000)); + if (timeout >= 1000) { + printk(BIOS_ERR, "timed out\n"); + } + + while(1) { + hlt(); + } +} +#endif + +#ifdef __SSE3__ +static __inline__ __attribute__((always_inline)) unsigned long readcr4(void) +{ + unsigned long value; + __asm__ __volatile__ ( + "mov %%cr4, %[value]" + : [value] "=a" (value)); + return value; +} + +static __inline__ __attribute__((always_inline)) void writecr4(unsigned long Data) +{ + __asm__ __volatile__ ( + "mov %%eax, %%cr4" + : + : "a" (Data) + ); +} +#endif + /* C entry point of secondary cpus */ void secondary_cpu_init(void) { atomic_inc(&active_cpus); -#if CONFIG_MAX_CPUS>2 +#if CONFIG_SERIAL_CPU_INIT == 1 + #if CONFIG_MAX_CPUS>2 spin_lock(&start_cpu_lock); + #endif +#endif + +#ifdef __SSE3__ + /* + * Seems that CR4 was cleared when AP start via lapic_start_cpu() + * Turn on CR4.OSFXSR and CR4.OSXMMEXCPT when SSE options enabled + */ + u32 cr4_val; + cr4_val = readcr4(); + cr4_val |= (1 << 9 | 1 << 10); + writecr4(cr4_val); #endif cpu_initialize(); -#if CONFIG_MAX_CPUS>2 +#if CONFIG_SERIAL_CPU_INIT == 1 + #if CONFIG_MAX_CPUS>2 spin_unlock(&start_cpu_lock); + #endif #endif + atomic_dec(&active_cpus); + stop_this_cpu(); } -static void initialize_other_cpus(struct bus *cpu_bus) +static void start_other_cpus(struct bus *cpu_bus, device_t bsp_cpu) { - int old_active_count, active_count; device_t cpu; /* Loop through the cpus once getting them started */ + for(cpu = cpu_bus->children; cpu ; cpu = cpu->sibling) { if (cpu->path.type != DEVICE_PATH_APIC) { continue; } + #if CONFIG_SERIAL_CPU_INIT == 0 + if(cpu==bsp_cpu) { + continue; + } + #endif + if (!cpu->enabled) { continue; } + if (cpu->initialized) { continue; } + if (!start_cpu(cpu)) { /* Record the error in cpu? */ - printk_err("CPU %u would not start!\n", - cpu->path.u.apic.apic_id); + printk(BIOS_ERR, "CPU 0x%02x would not start!\n", + cpu->path.apic.apic_id); } -#if CONFIG_MAX_CPUS>2 +#if CONFIG_SERIAL_CPU_INIT == 1 + #if CONFIG_MAX_CPUS>2 udelay(10); + #endif #endif } +} + +static void wait_other_cpus_stop(struct bus *cpu_bus) +{ + device_t cpu; + int old_active_count, active_count; /* Now loop until the other cpus have finished initializing */ old_active_count = 1; active_count = atomic_read(&active_cpus); while(active_count > 1) { if (active_count != old_active_count) { - printk_info("Waiting for %d CPUS to stop\n", active_count - 1); + printk(BIOS_INFO, "Waiting for %d CPUS to stop\n", active_count - 1); old_active_count = active_count; } udelay(10); @@ -281,12 +447,11 @@ static void initialize_other_cpus(struct bus *cpu_bus) continue; } if (!cpu->initialized) { - printk_err("CPU %u did not initialize!\n", - cpu->path.u.apic.apic_id); -#warning "FIXME do I need a mainboard_cpu_fixup function?" + printk(BIOS_ERR, "CPU 0x%02x did not initialize!\n", + cpu->path.apic.apic_id); } } - printk_debug("All AP CPUs stopped\n"); + printk(BIOS_DEBUG, "All AP CPUs stopped\n"); } #else /* CONFIG_SMP */ @@ -307,20 +472,43 @@ void initialize_cpus(struct bus *cpu_bus) /* Get the device path of the boot cpu */ cpu_path.type = DEVICE_PATH_APIC; - cpu_path.u.apic.apic_id = lapicid(); + cpu_path.apic.apic_id = lapicid(); #else /* Get the device path of the boot cpu */ cpu_path.type = DEVICE_PATH_CPU; - cpu_path.u.cpu.id = 0; + cpu_path.cpu.id = 0; #endif - + /* Find the device structure for the boot cpu */ info->cpu = alloc_find_dev(cpu_bus, &cpu_path); - + +#if CONFIG_SMP == 1 + copy_secondary_start_to_1m_below(); // why here? In case some day we can start core1 in amd_sibling_init +#endif + +#if CONFIG_HAVE_SMI_HANDLER + smm_init(); +#endif + + cpus_ready_for_init(); + +#if CONFIG_SMP == 1 + #if CONFIG_SERIAL_CPU_INIT == 0 + /* start all aps at first, so we can init ECC all together */ + start_other_cpus(cpu_bus, info->cpu); + #endif +#endif + /* Initialize the bootstrap processor */ cpu_initialize(); - /* Now initialize the rest of the cpus */ - initialize_other_cpus(cpu_bus); +#if CONFIG_SMP == 1 + #if CONFIG_SERIAL_CPU_INIT == 1 + start_other_cpus(cpu_bus, info->cpu); + #endif + + /* Now wait the rest of the cpus stop*/ + wait_other_cpus_stop(cpu_bus); +#endif }