X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=src%2Fnorthbridge%2Famd%2Famdk8%2Framinit_f.c;h=319293b7ed61e9f138efeb6e261d57040e8ab19e;hb=5ff7c13e858a31addf1558731a12cf6c753b576d;hp=a883fa401d54b56b0b8c1e2768af82c497b53c6e;hpb=2ee6779a64922af755a35ce70f85f2d67b488557;p=coreboot.git diff --git a/src/northbridge/amd/amdk8/raminit_f.c b/src/northbridge/amd/amdk8/raminit_f.c index a883fa401..319293b7e 100644 --- a/src/northbridge/amd/amdk8/raminit_f.c +++ b/src/northbridge/amd/amdk8/raminit_f.c @@ -20,53 +20,30 @@ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ -#include #include #include #include #include #include "raminit.h" -#include "amdk8_f.h" -#include "spd_ddr2.h" - -#ifndef QRANK_DIMM_SUPPORT -#define QRANK_DIMM_SUPPORT 0 +#include "f.h" +#include +#if CONFIG_HAVE_OPTION_TABLE +#include "option_table.h" #endif -static inline void print_raminit(const char *strval, uint32_t val) -{ -#if CONFIG_USE_PRINTK_IN_CAR - printk_debug("%s%08x\r\n", strval, val); +#if CONFIG_DEBUG_RAM_SETUP +#define printk_raminit(args...) printk(BIOS_DEBUG, args) #else - print_debug(strval); print_debug_hex32(val); print_debug("\r\n"); -#endif -} - -#define RAM_TIMING_DEBUG 0 - -static inline void print_tx(const char *strval, uint32_t val) -{ -#if RAM_TIMING_DEBUG == 1 - print_raminit(strval, val); +#define printk_raminit(args...) #endif -} - - -static inline void print_t(const char *strval) -{ -#if RAM_TIMING_DEBUG == 1 - print_debug(strval); -#endif -} - -#if (CONFIG_LB_MEM_TOPK & (CONFIG_LB_MEM_TOPK -1)) != 0 -# error "CONFIG_LB_MEM_TOPK must be a power of 2" +#if (CONFIG_RAMTOP & (CONFIG_RAMTOP -1)) != 0 +# error "CONFIG_RAMTOP must be a power of 2" #endif -#include "amdk8_f_pci.c" +#include "f_pci.c" /* for PCI_ADDR(0, 0x18, 2, 0x98) index, @@ -96,7 +73,7 @@ static inline void print_t(const char *strval) */ -static void setup_resource_map(const unsigned int *register_values, int max) +void setup_resource_map(const unsigned int *register_values, int max) { int i; for (i = 0; i < max; i += 3) { @@ -415,7 +392,7 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in * 110 = 8 bus clocks * 111 = 9 bus clocks * [ 7: 7] Reserved - * [ 9: 8] Twtr (Internal DRAM Write-to-Read Command Delay, + * [ 9: 8] Twtr (Internal DRAM Write-to-Read Command Delay, * minium write-to-read delay when both access the same chip select) * 00 = Reserved * 01 = 1 bus clocks @@ -547,7 +524,7 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in * registered DIMM is present * [19:19] Reserved * [20:20] SlowAccessMode (Slow Access Mode (2T Mode)) - * 0 = DRAM address and control signals are driven for one + * 0 = DRAM address and control signals are driven for one * MEMCLK cycle * 1 = One additional MEMCLK of setup time is provided on all * DRAM address and control signals except CS, CKE, and ODT; @@ -713,9 +690,7 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in } sysinfo->ctrl_present[ctrl->node_id] = 1; - print_spew("setting up CPU"); - print_spew_hex8(ctrl->node_id); - print_spew(" northbridge registers\r\n"); + printk(BIOS_SPEW, "setting up CPU %02x northbridge registers\n", ctrl->node_id); max = ARRAY_SIZE(register_values); for (i = 0; i < max; i += 3) { device_t dev; @@ -728,33 +703,34 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in reg |= register_values[i+2]; pci_write_config32(dev, where, reg); } - - print_spew("done.\r\n"); + printk(BIOS_SPEW, "done.\n"); } - +#if 0 static int is_dual_channel(const struct mem_controller *ctrl) { uint32_t dcl; dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW); return dcl & DCL_Width128; } - +#endif static int is_opteron(const struct mem_controller *ctrl) { - /* Test to see if I am an Opteron. - * FIXME Testing dual channel capability is correct for now - * but a better test is probably required. - * m2 and s1g1 support dual channel too. but only support unbuffered dimm + /* Test to see if I am an Opteron. M2 and S1G1 support dual + * channel, too, but only support unbuffered DIMMs so we need a + * better test for Opterons. + * However, all code uses is_opteron() to find out whether to + * use dual channel, so if we really check for opteron here, we + * need to fix up all code using this function, too. */ -#warning "FIXME implement a better test for opterons" + uint32_t nbcap; nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP); return !!(nbcap & NBCAP_128Bit); } - +#if 0 static int is_registered(const struct mem_controller *ctrl) { /* Test to see if we are dealing with registered SDRAM. @@ -765,7 +741,7 @@ static int is_registered(const struct mem_controller *ctrl) dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW); return !(dcl & DCL_UnBuffDimm); } - +#endif static void spd_get_dimm_size(unsigned device, struct dimm_size *sz) { @@ -829,14 +805,14 @@ static void spd_get_dimm_size(unsigned device, struct dimm_size *sz) if (value <=4 ) value += 8; // add back to 1G to high value += (27-5); // make 128MB to the real lines if ( value != (sz->per_rank)) { - print_err("Bad RANK Size --\r\n"); + printk(BIOS_ERR, "Bad RANK Size --\n"); goto val_err; } goto out; val_err: - die("Bad SPD value\r\n"); + die("Bad SPD value\n"); /* If an hw_error occurs report that I have no memory */ hw_err: sz->per_rank = 0; @@ -850,7 +826,8 @@ static void spd_get_dimm_size(unsigned device, struct dimm_size *sz) static void set_dimm_size(const struct mem_controller *ctrl, - struct dimm_size *sz, unsigned index, struct mem_info *meminfo) + struct dimm_size *sz, unsigned index, + struct mem_info *meminfo) { uint32_t base0, base1; @@ -889,7 +866,7 @@ static void set_dimm_size(const struct mem_controller *ctrl, /* Set the appropriate DIMM base address register */ pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 0) << 2), base0); pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 1) << 2), base1); -#if QRANK_DIMM_SUPPORT == 1 +#if CONFIG_QRANK_DIMM_SUPPORT if (sz->rank == 4) { pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 4) << 2), base0); pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 5) << 2), base1); @@ -901,11 +878,11 @@ static void set_dimm_size(const struct mem_controller *ctrl, if (base0) { uint32_t dword; uint32_t ClkDis0; -#if CPU_SOCKET_TYPE == 0x10 /* L1 */ +#if CONFIG_CPU_SOCKET_TYPE == 0x10 /* L1 */ ClkDis0 = DTL_MemClkDis0; -#elif CPU_SOCKET_TYPE == 0x11 /* AM2 */ +#elif CONFIG_CPU_SOCKET_TYPE == 0x11 /* AM2 */ ClkDis0 = DTL_MemClkDis0_AM2; -#elif CPU_SOCKET_TYPE == 0x12 /* S1G1 */ +#elif CONFIG_CPU_SOCKET_TYPE == 0x12 /* S1G1 */ ClkDis0 = DTL_MemClkDis0_S1g1; #endif @@ -917,7 +894,7 @@ static void set_dimm_size(const struct mem_controller *ctrl, } else { dword = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW); //Channel A dword &= ~(ClkDis0 >> index); -#if QRANK_DIMM_SUPPORT == 1 +#if CONFIG_QRANK_DIMM_SUPPORT if (sz->rank == 4) { dword &= ~(ClkDis0 >> (index+2)); } @@ -927,7 +904,7 @@ static void set_dimm_size(const struct mem_controller *ctrl, if (meminfo->is_Width128) { // ChannelA+B dword = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC); dword &= ~(ClkDis0 >> index); -#if QRANK_DIMM_SUPPORT == 1 +#if CONFIG_QRANK_DIMM_SUPPORT if (sz->rank == 4) { dword &= ~(ClkDis0 >> (index+2)); } @@ -980,7 +957,7 @@ static void set_dimm_cs_map(const struct mem_controller *ctrl, } map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP); map &= ~(0xf << (index * 4)); -#if QRANK_DIMM_SUPPORT == 1 +#if CONFIG_QRANK_DIMM_SUPPORT if (sz->rank == 4) { map &= ~(0xf << ( (index + 2) * 4)); } @@ -991,7 +968,7 @@ static void set_dimm_cs_map(const struct mem_controller *ctrl, unsigned temp_map; temp_map = cs_map_aaa[(sz->bank-2)*3*4 + (sz->rows - 13)*3 + (sz->col - 9) ]; map |= temp_map << (index*4); -#if QRANK_DIMM_SUPPORT == 1 +#if CONFIG_QRANK_DIMM_SUPPORT if (sz->rank == 4) { map |= temp_map << ( (index + 2) * 4); } @@ -1025,14 +1002,13 @@ static long spd_set_ram_size(const struct mem_controller *ctrl, return -1; /* Report SPD error */ } set_dimm_size(ctrl, sz, i, meminfo); - set_dimm_cs_map (ctrl, sz, i, meminfo); + set_dimm_cs_map(ctrl, sz, i, meminfo); } return meminfo->dimm_mask; } - static void route_dram_accesses(const struct mem_controller *ctrl, - unsigned long base_k, unsigned long limit_k) + unsigned long base_k, unsigned long limit_k) { /* Route the addresses to the controller node */ unsigned node_id; @@ -1061,7 +1037,6 @@ static void route_dram_accesses(const struct mem_controller *ctrl, } } - static void set_top_mem(unsigned tom_k, unsigned hole_startk) { /* Error if I don't have memory */ @@ -1070,29 +1045,32 @@ static void set_top_mem(unsigned tom_k, unsigned hole_startk) } /* Report the amount of memory. */ - print_debug("RAM: 0x"); - print_debug_hex32(tom_k); - print_debug(" KB\r\n"); + printk(BIOS_DEBUG, "RAM end at 0x%08x kB\n", tom_k); + /* Now set top of memory */ msr_t msr; if (tom_k > (4*1024*1024)) { - /* Now set top of memory */ + printk_raminit("Handling memory mapped above 4 GB\n"); + printk_raminit("Upper RAM end at 0x%08x kB\n", tom_k); msr.lo = (tom_k & 0x003fffff) << 10; msr.hi = (tom_k & 0xffc00000) >> 22; wrmsr(TOP_MEM2, msr); + printk_raminit("Correcting memory amount mapped below 4 GB\n"); } /* Leave a 64M hole between TOP_MEM and TOP_MEM2 * so I can see my rom chip and other I/O devices. */ if (tom_k >= 0x003f0000) { -#if HW_MEM_HOLE_SIZEK != 0 +#if CONFIG_HW_MEM_HOLE_SIZEK != 0 if (hole_startk != 0) { tom_k = hole_startk; } else #endif tom_k = 0x3f0000; + printk_raminit("Adjusting lower RAM end\n"); } + printk_raminit("Lower RAM end at 0x%08x kB\n", tom_k); msr.lo = (tom_k & 0x003fffff) << 10; msr.hi = (tom_k & 0xffc00000) >> 22; wrmsr(TOP_MEM, msr); @@ -1129,6 +1107,15 @@ static unsigned long interleave_chip_selects(const struct mem_controller *ctrl, /* See if all of the memory chip selects are the same size * and if so count them. */ +#if defined(CMOS_VSTART_interleave_chip_selects) + if (read_option(interleave_chip_selects, 1) == 0) + return 0; +#else +#if !defined(CONFIG_INTERLEAVE_CHIP_SELECTS) || !CONFIG_INTERLEAVE_CHIP_SELECTS + return 0; +#endif +#endif + chip_selects = 0; common_size = 0; common_cs_mode = 0xff; @@ -1179,7 +1166,6 @@ static unsigned long interleave_chip_selects(const struct mem_controller *ctrl, csbase_inc <<=1; } - /* Compute the initial values for csbase and csbask. * In csbase just set the enable bit and the base to zero. * In csmask set the mask bits for the size and page level interleave. @@ -1202,11 +1188,12 @@ static unsigned long interleave_chip_selects(const struct mem_controller *ctrl, csbase += csbase_inc; } - print_debug("Interleaved\r\n"); + printk(BIOS_DEBUG, "Interleaved\n"); /* Return the memory size in K */ return common_size << ((27-10) + bits); } + static unsigned long order_chip_selects(const struct mem_controller *ctrl) { unsigned long tom; @@ -1242,7 +1229,7 @@ static unsigned long order_chip_selects(const struct mem_controller *ctrl) csbase = value; canidate = index; } - + /* See if I have found a new canidate */ if (csbase == 0) { break; @@ -1276,7 +1263,7 @@ static unsigned long order_chip_selects(const struct mem_controller *ctrl) return (tom & ~0xff000000) << (27-10); } -unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id) +static unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id) { unsigned node_id; unsigned end_k; @@ -1296,24 +1283,18 @@ unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id) return end_k; } - static void order_dimms(const struct mem_controller *ctrl, struct mem_info *meminfo) { unsigned long tom_k, base_k; - if (read_option(CMOS_VSTART_interleave_chip_selects, - CMOS_VLEN_interleave_chip_selects, 1) != 0) { - tom_k = interleave_chip_selects(ctrl, meminfo->is_Width128); - } else { - print_debug("Interleaving disabled\r\n"); - tom_k = 0; - } - + tom_k = interleave_chip_selects(ctrl, meminfo->is_Width128); + if (!tom_k) { + printk(BIOS_DEBUG, "Interleaving disabled\n"); tom_k = order_chip_selects(ctrl); } - + /* Compute the memory base address */ base_k = memory_end_k(ctrl, ctrl->node_id); tom_k += base_k; @@ -1321,20 +1302,17 @@ static void order_dimms(const struct mem_controller *ctrl, set_top_mem(tom_k, 0); } - static long disable_dimm(const struct mem_controller *ctrl, unsigned index, struct mem_info *meminfo) { - print_debug("disabling dimm"); - print_debug_hex8(index); - print_debug("\r\n"); + printk(BIOS_DEBUG, "disabling dimm %02x\n", index); if (!(meminfo->dimm_mask & 0x0F) && (meminfo->dimm_mask & 0xF0)) { /* channelB only? */ pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 4) << 2), 0); pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 5) << 2), 0); } else { pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 0) << 2), 0); pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 1) << 2), 0); -#if QRANK_DIMM_SUPPORT == 1 +#if CONFIG_QRANK_DIMM_SUPPORT if (meminfo->sz[index].rank == 4) { pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 4) << 2), 0); pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 5) << 2), 0); @@ -1346,7 +1324,6 @@ static long disable_dimm(const struct mem_controller *ctrl, unsigned index, return meminfo->dimm_mask; } - static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl, struct mem_info *meminfo) { @@ -1400,17 +1377,14 @@ static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl, } pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl); -#if 1 if (meminfo->is_registered) { - print_debug("Registered\r\n"); + printk(BIOS_SPEW, "Registered\n"); } else { - print_debug("Unbuffered\r\n"); + printk(BIOS_SPEW, "Unbuffered\n"); } -#endif return meminfo->dimm_mask; } - static unsigned int spd_detect_dimms(const struct mem_controller *ctrl) { unsigned dimm_mask; @@ -1420,6 +1394,7 @@ static unsigned int spd_detect_dimms(const struct mem_controller *ctrl) int byte; unsigned device; device = ctrl->channel0[i]; + printk_raminit("DIMM socket %i, channel 0 SPD device is 0x%02x\n", i, device); if (device) { byte = spd_read_byte(ctrl->channel0[i], SPD_MEM_TYPE); /* Type */ if (byte == SPD_MEM_TYPE_SDRAM_DDR2) { @@ -1427,6 +1402,7 @@ static unsigned int spd_detect_dimms(const struct mem_controller *ctrl) } } device = ctrl->channel1[i]; + printk_raminit("DIMM socket %i, channel 1 SPD device is 0x%02x\n", i, device); if (device) { byte = spd_read_byte(ctrl->channel1[i], SPD_MEM_TYPE); if (byte == SPD_MEM_TYPE_SDRAM_DDR2) { @@ -1448,30 +1424,35 @@ static long spd_enable_2channels(const struct mem_controller *ctrl, struct mem_i 4, /* *Column addresses */ 5, /* *Number of DIMM Ranks */ 6, /* *Module Data Width*/ - 9, /* *Cycle time at highest CAS Latency CL=X */ 11, /* *DIMM Conf Type */ 13, /* *Pri SDRAM Width */ 17, /* *Logical Banks */ - 18, /* *Supported CAS Latencies */ 20, /* *DIMM Type Info */ 21, /* *SDRAM Module Attributes */ - 23, /* *Cycle time at CAS Latnecy (CLX - 1) */ - 26, /* *Cycle time at CAS Latnecy (CLX - 2) */ 27, /* *tRP Row precharge time */ 28, /* *Minimum Row Active to Row Active Delay (tRRD) */ 29, /* *tRCD RAS to CAS */ 30, /* *tRAS Activate to Precharge */ 36, /* *Write recovery time (tWR) */ 37, /* *Internal write to read command delay (tRDP) */ - 38, /* *Internal read to precharge commanfd delay (tRTP) */ - 41, /* *Extension of Byte 41 tRC and Byte 42 tRFC */ + 38, /* *Internal read to precharge command delay (tRTP) */ + 40, /* *Extension of Byte 41 tRC and Byte 42 tRFC */ 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */ 42, /* *Minimum Auto Refresh Command Time(Trfc) */ + /* The SPD addresses 18, 9, 23, 26 need special treatment like + * in spd_set_memclk. Right now they cause many false negatives. + * Keep them at the end to see other mismatches (if any). + */ + 18, /* *Supported CAS Latencies */ + 9, /* *Cycle time at highest CAS Latency CL=X */ + 23, /* *Cycle time at CAS Latency (CLX - 1) */ + 26, /* *Cycle time at CAS Latency (CLX - 2) */ }; u32 dcl, dcm; + u8 common_cl; /* S1G1 and AM2 sockets are Mod64BitMux capable. */ -#if CPU_SOCKET_TYPE == 0x11 || CPU_SOCKET_TYPE == 0x12 +#if CONFIG_CPU_SOCKET_TYPE == 0x11 || CONFIG_CPU_SOCKET_TYPE == 0x12 u8 mux_cap = 1; #else u8 mux_cap = 0; @@ -1497,6 +1478,14 @@ static long spd_enable_2channels(const struct mem_controller *ctrl, struct mem_i } device0 = ctrl->channel0[i]; device1 = ctrl->channel1[i]; + /* Abort if the chips don't support a common CAS latency. */ + common_cl = spd_read_byte(device0, 18) & spd_read_byte(device1, 18); + if (!common_cl) { + printk(BIOS_DEBUG, "No common CAS latency supported\n"); + goto single_channel; + } else { + printk_raminit("Common CAS latency bitfield: 0x%02x\n", common_cl); + } for (j = 0; j < ARRAY_SIZE(addresses); j++) { unsigned addr; addr = addresses[j]; @@ -1509,11 +1498,12 @@ static long spd_enable_2channels(const struct mem_controller *ctrl, struct mem_i return -1; } if (value0 != value1) { + printk_raminit("SPD values differ between channel 0/1 for byte %i\n", addr); goto single_channel; } } } - print_spew("Enabling dual channel memory\r\n"); + printk(BIOS_SPEW, "Enabling dual channel memory\n"); dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW); dcl &= ~DCL_BurstLength32; /* 32byte mode may be preferred in platforms that include graphics controllers that generate a lot of 32-bytes system memory accesses 32byte mode is not supported when the DRAM interface is 128 bits wides, even 32byte mode is set, system still use 64 byte mode */ @@ -1532,7 +1522,7 @@ static long spd_enable_2channels(const struct mem_controller *ctrl, struct mem_i if (((meminfo->dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) { /* mux capable and single dimm in channelB */ if (mux_cap) { - printk_spew("Enable 64MuxMode & BurstLength32\n"); + printk(BIOS_SPEW, "Enable 64MuxMode & BurstLength32\n"); dcm = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC); dcm |= DCM_Mode64BitMux; pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dcm); @@ -1547,7 +1537,7 @@ static long spd_enable_2channels(const struct mem_controller *ctrl, struct mem_i } else { /* unmatched dual dimms ? */ /* unmatched dual dimms not supported by meminit code. Use single channelA dimm. */ meminfo->dimm_mask &= ~((1 << (DIMM_SOCKETS * 2)) - (1 << DIMM_SOCKETS)); - printk_spew("Unmatched dual dimms. Use single channelA dimm.\n"); + printk(BIOS_SPEW, "Unmatched dual dimms. Use single channelA dimm.\n"); } return meminfo->dimm_mask; } @@ -1566,7 +1556,7 @@ struct mem_param { static const struct mem_param speed[] = { { - .name = "200Mhz\r\n", + .name = "200MHz", .cycle_time = 0x500, .divisor = 200, // how many 1/40ns per clock .dch_memclk = DCH_MemClkFreq_200MHz, //0 @@ -1578,7 +1568,7 @@ struct mem_param { }, { - .name = "266Mhz\r\n", + .name = "266MHz", .cycle_time = 0x375, .divisor = 150, //???? .dch_memclk = DCH_MemClkFreq_266MHz, //1 @@ -1589,7 +1579,7 @@ struct mem_param { .DcqByPassMax = 4, }, { - .name = "333Mhz\r\n", + .name = "333MHz", .cycle_time = 0x300, .divisor = 120, .dch_memclk = DCH_MemClkFreq_333MHz, //2 @@ -1601,7 +1591,7 @@ struct mem_param { }, { - .name = "400Mhz\r\n", + .name = "400MHz", .cycle_time = 0x250, .divisor = 100, .dch_memclk = DCH_MemClkFreq_400MHz,//3 @@ -1628,10 +1618,7 @@ static const struct mem_param *get_mem_param(unsigned min_cycle_time) if (!param->cycle_time) { die("min_cycle_time to low"); } - print_spew(param->name); -#ifdef DRAM_MIN_CYCLE_TIME - print_debug(param->name); -#endif + printk(BIOS_SPEW, "%s\n", param->name); return param; } @@ -1656,14 +1643,28 @@ static uint8_t get_exact_divisor(int i, uint8_t divisor) /*15*/ 200, 160, 120, 100, }; - unsigned fid_cur; - int index; + int index; msr_t msr; - msr = rdmsr(0xc0010042); - fid_cur = msr.lo & 0x3f; - index = fid_cur>>1; + /* Check for FID control support */ + struct cpuid_result cpuid1; + cpuid1 = cpuid(0x80000007); + if( cpuid1.edx & 0x02 ) { + /* Use current FID */ + unsigned fid_cur; + msr = rdmsr(0xc0010042); + fid_cur = msr.lo & 0x3f; + + index = fid_cur>>1; + } else { + /* Use startup FID */ + unsigned fid_start; + msr = rdmsr(0xc0010015); + fid_start = (msr.lo & (0x3f << 24)); + + index = fid_start>>25; + } if (index>12) return divisor; @@ -1695,6 +1696,96 @@ static unsigned convert_to_linear(unsigned value) return value; } +static const uint8_t latency_indicies[] = { 25, 23, 9 }; + +static int find_optimum_spd_latency(u32 spd_device, unsigned *min_latency, unsigned *min_cycle_time) +{ + int new_cycle_time, new_latency; + int index; + int latencies; + int latency; + + /* First find the supported CAS latencies + * Byte 18 for DDR SDRAM is interpreted: + * bit 3 == CAS Latency = 3 + * bit 4 == CAS Latency = 4 + * bit 5 == CAS Latency = 5 + * bit 6 == CAS Latency = 6 + */ + new_cycle_time = 0x500; + new_latency = 6; + + latencies = spd_read_byte(spd_device, SPD_CAS_LAT); + if (latencies <= 0) + return 1; + + printk_raminit("\tlatencies: %08x\n", latencies); + /* Compute the lowest cas latency which can be expressed in this + * particular SPD EEPROM. You can store at most settings for 3 + * contiguous CAS latencies, so by taking the highest CAS + * latency maked as supported in the SPD and subtracting 2 you + * get the lowest expressable CAS latency. That latency is not + * necessarily supported, but a (maybe invalid) entry exists + * for it. + */ + latency = log2(latencies) - 2; + + /* Loop through and find a fast clock with a low latency */ + for (index = 0; index < 3; index++, latency++) { + int value; + if ((latency < 3) || (latency > 6) || + (!(latencies & (1 << latency)))) { + continue; + } + value = spd_read_byte(spd_device, latency_indicies[index]); + if (value < 0) { + return -1; + } + + printk_raminit("\tindex: %08x\n", index); + printk_raminit("\t\tlatency: %08x\n", latency); + printk_raminit("\t\tvalue1: %08x\n", value); + + value = convert_to_linear(value); + + printk_raminit("\t\tvalue2: %08x\n", value); + + /* Only increase the latency if we decrease the clock */ + if (value >= *min_cycle_time ) { + if (value < new_cycle_time) { + new_cycle_time = value; + new_latency = latency; + } else if (value == new_cycle_time) { + if (new_latency > latency) { + new_latency = latency; + } + } + } + printk_raminit("\t\tnew_cycle_time: %08x\n", new_cycle_time); + printk_raminit("\t\tnew_latency: %08x\n", new_latency); + + } + + if (new_latency > 6){ + return 1; + } + + /* Does min_latency need to be increased? */ + if (new_cycle_time > *min_cycle_time) { + *min_cycle_time = new_cycle_time; + } + + /* Does min_cycle_time need to be increased? */ + if (new_latency > *min_latency) { + *min_latency = new_latency; + } + + printk_raminit("2 min_cycle_time: %08x\n", *min_cycle_time); + printk_raminit("2 min_latency: %08x\n", *min_latency); + + return 0; +} + static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, struct mem_info *meminfo) { /* Compute the minimum cycle time for these dimms */ @@ -1703,8 +1794,6 @@ static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller * int i; uint32_t value; - static const uint8_t latency_indicies[] = { 25, 23, 9 }; - static const uint16_t min_cycle_times[] = { // use full speed to compare [NBCAP_MEMCLK_NOLIMIT] = 0x250, /*2.5ns */ [NBCAP_MEMCLK_333MHZ] = 0x300, /* 3.0ns */ @@ -1716,116 +1805,71 @@ static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller * value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP); min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK]; bios_cycle_time = min_cycle_times[ - read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)]; +#ifdef CMOS_VSTART_max_mem_clock + read_option(max_mem_clock, 0) +#else +#if defined(CONFIG_MAX_MEM_CLOCK) + CONFIG_MAX_MEM_CLOCK +#else + 0 // use DDR400 as default +#endif +#endif + ]; + if (bios_cycle_time > min_cycle_time) { min_cycle_time = bios_cycle_time; } min_latency = 3; - print_tx("1 min_cycle_time:", min_cycle_time); + printk_raminit("1 min_cycle_time: %08x\n", min_cycle_time); /* Compute the least latency with the fastest clock supported * by both the memory controller and the dimms. */ for (i = 0; i < DIMM_SOCKETS; i++) { - int new_cycle_time, new_latency; - int index; - int latencies; - int latency; - u32 spd_device = ctrl->channel0[i]; + u32 spd_device; - print_tx("1.1 dimm_mask:", meminfo->dimm_mask); - if (!(meminfo->dimm_mask & (1 << i))) { - if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) { /* channelB only? */ - spd_device = ctrl->channel1[i]; - } else { - continue; - } - } - - /* First find the supported CAS latencies - * Byte 18 for DDR SDRAM is interpreted: - * bit 3 == CAS Latency = 3 - * bit 4 == CAS Latency = 4 - * bit 5 == CAS Latency = 5 - * bit 6 == CAS Latency = 6 - */ - new_cycle_time = 0x500; - new_latency = 6; - - latencies = spd_read_byte(spd_device, SPD_CAS_LAT); - if (latencies <= 0) continue; + printk_raminit("1.1 dimm_mask: %08x\n", meminfo->dimm_mask); + printk_raminit("i: %08x\n",i); - print_tx("i:",i); - print_tx("\tlatencies:", latencies); - /* Compute the lowest cas latency supported */ - latency = log2(latencies) - 2; + if (meminfo->dimm_mask & (1 << i)) { + spd_device = ctrl->channel0[i]; + printk_raminit("Channel 0 settings:\n"); - /* Loop through and find a fast clock with a low latency */ - for (index = 0; index < 3; index++, latency++) { - int value; - if ((latency < 3) || (latency > 6) || - (!(latencies & (1 << latency)))) { - continue; - } - value = spd_read_byte(spd_device, latency_indicies[index]); - if (value < 0) { + switch (find_optimum_spd_latency(spd_device, &min_latency, &min_cycle_time)) { + case -1: goto hw_error; + break; + case 1: + continue; } - - print_tx("\tindex:", index); - print_tx("\t\tlatency:", latency); - print_tx("\t\tvalue1:", value); - - value = convert_to_linear(value); - - print_tx("\t\tvalue2:", value); - - /* Only increase the latency if we decreas the clock */ - if (value >= min_cycle_time ) { - if (value < new_cycle_time) { - new_cycle_time = value; - new_latency = latency; - } else if (value == new_cycle_time) { - if (new_latency > latency) { - new_latency = latency; - } - } - } - print_tx("\t\tnew_cycle_time:", new_cycle_time); - print_tx("\t\tnew_latency:", new_latency); - - } - - if (new_latency > 6){ - continue; } + if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) { + spd_device = ctrl->channel1[i]; + printk_raminit("Channel 1 settings:\n"); - /* Does min_latency need to be increased? */ - if (new_cycle_time > min_cycle_time) { - min_cycle_time = new_cycle_time; - } - - /* Does min_cycle_time need to be increased? */ - if (new_latency > min_latency) { - min_latency = new_latency; + switch (find_optimum_spd_latency(spd_device, &min_latency, &min_cycle_time)) { + case -1: + goto hw_error; + break; + case 1: + continue; + } } - print_tx("2 min_cycle_time:", min_cycle_time); - print_tx("2 min_latency:", min_latency); } /* Make a second pass through the dimms and disable * any that cannot support the selected memclk and cas latency. */ - print_tx("3 min_cycle_time:", min_cycle_time); - print_tx("3 min_latency:", min_latency); + printk_raminit("3 min_cycle_time: %08x\n", min_cycle_time); + printk_raminit("3 min_latency: %08x\n", min_latency); for (i = 0; (i < DIMM_SOCKETS); i++) { int latencies; int latency; int index; - int value; + int val; u32 spd_device = ctrl->channel0[i]; if (!(meminfo->dimm_mask & (1 << i))) { @@ -1859,14 +1903,14 @@ static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller * } /* Read the min_cycle_time for this latency */ - value = spd_read_byte(spd_device, latency_indicies[index]); - if (value < 0) goto hw_error; + val = spd_read_byte(spd_device, latency_indicies[index]); + if (val < 0) goto hw_error; - value = convert_to_linear(value); + val = convert_to_linear(val); /* All is good if the selected clock speed * is what I need or slower. */ - if (value <= min_cycle_time) { + if (val <= min_cycle_time) { continue; } /* Otherwise I have an error, disable the dimm */ @@ -1874,7 +1918,7 @@ static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller * meminfo->dimm_mask = disable_dimm(ctrl, i, meminfo); } - print_tx("4 min_cycle_time:", min_cycle_time); + printk_raminit("4 min_cycle_time: %08x\n", min_cycle_time); /* Now that I know the minimum cycle time lookup the memory parameters */ result.param = get_mem_param(min_cycle_time); @@ -1886,7 +1930,7 @@ static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller * value |= result.param->dch_memclk << DCH_MemClkFreq_SHIFT; pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value); - print_debug(result.param->name); + printk(BIOS_DEBUG, "%s\n", result.param->name); /* Update DRAM Timing Low with our selected cas latency */ value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW); @@ -1911,37 +1955,55 @@ static unsigned convert_to_1_4(unsigned value) valuex = fraction [value & 0x7]; return valuex; } -static int update_dimm_Trc(const struct mem_controller *ctrl, - const struct mem_param *param, - int i, long dimm_mask) + +static int get_dimm_Trc_clocks(u32 spd_device, const struct mem_param *param) { - unsigned clocks, old_clocks; - uint32_t dtl; int value; int value2; - u32 spd_device = ctrl->channel0[i]; - - if (!(dimm_mask & (1 << i)) && (dimm_mask & (1 << (DIMM_SOCKETS + i)))) { /* channelB only? */ - spd_device = ctrl->channel1[i]; - } - + int clocks; value = spd_read_byte(spd_device, SPD_TRC); - if (value < 0) return -1; + if (value < 0) + return -1; + printk_raminit("update_dimm_Trc: tRC (41) = %08x\n", value); value2 = spd_read_byte(spd_device, SPD_TRC -1); value <<= 2; value += convert_to_1_4(value2>>4); - value *=10; + value *= 10; + printk_raminit("update_dimm_Trc: tRC final value = %i\n", value); clocks = (value + param->divisor - 1)/param->divisor; + printk_raminit("update_dimm_Trc: clocks = %i\n", clocks); if (clocks < DTL_TRC_MIN) { + // We might want to die here instead or (at least|better) disable this bank. + printk(BIOS_NOTICE, "update_dimm_Trc: Can't refresh fast enough, " + "want %i clocks, minimum is %i clocks.\n", clocks, DTL_TRC_MIN); clocks = DTL_TRC_MIN; } + return clocks; +} + +static int update_dimm_Trc(const struct mem_controller *ctrl, + const struct mem_param *param, + int i, long dimm_mask) +{ + int clocks, old_clocks; + uint32_t dtl; + u32 spd_device = ctrl->channel0[i]; + + if (!(dimm_mask & (1 << i)) && (dimm_mask & (1 << (DIMM_SOCKETS + i)))) { /* channelB only? */ + spd_device = ctrl->channel1[i]; + } + + clocks = get_dimm_Trc_clocks(spd_device, param); + if (clocks == -1) + return clocks; if (clocks > DTL_TRC_MAX) { return 0; } + printk_raminit("update_dimm_Trc: clocks after adjustment = %i\n", clocks); dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW); old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE; @@ -2011,9 +2073,10 @@ static int update_dimm_TT_1_4(const struct mem_controller *ctrl, const struct me if (clocks < TT_MIN) { clocks = TT_MIN; } - + if (clocks > TT_MAX) { - return 0; + printk(BIOS_INFO, "warning spd byte : %x = %x > TT_MAX: %x, setting TT_MAX", SPD_TT, value, TT_MAX); + clocks = TT_MAX; } dtl = pci_read_config32(ctrl->f2, TT_REG); @@ -2029,7 +2092,6 @@ static int update_dimm_TT_1_4(const struct mem_controller *ctrl, const struct me return 1; } - static int update_dimm_Trcd(const struct mem_controller *ctrl, const struct mem_param *param, int i, long dimm_mask) { @@ -2054,30 +2116,27 @@ static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_ value = spd_read_byte(spd_device, SPD_TRAS); //in 1 ns if (value < 0) return -1; - print_tx("update_dimm_Tras: 0 value=", value); + printk_raminit("update_dimm_Tras: 0 value= %08x\n", value); value <<= 2; //convert it to in 1/4ns value *= 10; - print_tx("update_dimm_Tras: 1 value=", value); + printk_raminit("update_dimm_Tras: 1 value= %08x\n", value); clocks = (value + param->divisor - 1)/param->divisor; - print_tx("update_dimm_Tras: divisor=", param->divisor); - print_tx("update_dimm_Tras: clocks=", clocks); + printk_raminit("update_dimm_Tras: divisor= %08x\n", param->divisor); + printk_raminit("update_dimm_Tras: clocks= %08x\n", clocks); if (clocks < DTL_TRAS_MIN) { clocks = DTL_TRAS_MIN; } - if (clocks > DTL_TRAS_MAX) { return 0; } - dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW); old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE; if (old_clocks >= clocks) { // someone did it? return 1; } - dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT); dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT); pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl); @@ -2142,13 +2201,13 @@ static int update_dimm_Tref(const struct mem_controller *ctrl, return 1; } - static void set_4RankRDimm(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo) { -#if QRANK_DIMM_SUPPRT == 1 +#if CONFIG_QRANK_DIMM_SUPPORT int value; int i; + long dimm_mask = meminfo->dimm_mask; if (!(meminfo->is_registered)) return; @@ -2160,7 +2219,7 @@ static void set_4RankRDimm(const struct mem_controller *ctrl, continue; } - if (meminfo->sz.rank == 4) { + if (meminfo->sz[i].rank == 4) { value = 1; break; } @@ -2175,7 +2234,6 @@ static void set_4RankRDimm(const struct mem_controller *ctrl, #endif } - static uint32_t get_extra_dimm_mask(const struct mem_controller *ctrl, struct mem_info *meminfo) { @@ -2186,7 +2244,7 @@ static uint32_t get_extra_dimm_mask(const struct mem_controller *ctrl, uint32_t mask_single_rank; uint32_t mask_page_1k; int value; -#if QRANK_DIMM_SUPPORT == 1 +#if CONFIG_QRANK_DIMM_SUPPORT int rank; #endif @@ -2219,20 +2277,20 @@ static uint32_t get_extra_dimm_mask(const struct mem_controller *ctrl, value = spd_read_byte(spd_device, SPD_PRI_WIDTH); - #if QRANK_DIMM_SUPPORT == 1 + #if CONFIG_QRANK_DIMM_SUPPORT rank = meminfo->sz[i].rank; #endif if (value==4) { mask_x4 |= (1<f2, DRAM_CONFIG_LOW, dcl); } - static void set_ecc(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo) { @@ -2317,14 +2374,21 @@ static void set_ecc(const struct mem_controller *ctrl, if (nbcap & NBCAP_ECC) { dcl |= DCL_DimmEccEn; } - if (read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) { +#ifdef CMOS_VSTART_ECC_memory + if (read_option(ECC_memory, 1) == 0) { dcl &= ~DCL_DimmEccEn; } +#else // CMOS_VSTART_ECC_memory not defined +#if !CONFIG_ECC_MEMORY + dcl &= ~DCL_DimmEccEn; +#endif +#endif pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl); meminfo->is_ecc = 1; if (!(dcl & DCL_DimmEccEn)) { meminfo->is_ecc = 0; + printk(BIOS_DEBUG, "set_ecc: ECC disabled\n"); return; // already disabled the ECC, so don't need to read SPD any more } @@ -2333,7 +2397,7 @@ static void set_ecc(const struct mem_controller *ctrl, if (!(meminfo->dimm_mask & (1 << i))) { if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) { /* channelB only? */ spd_device = ctrl->channel1[i]; - printk_debug("set_ecc spd_device: 0x%x\n", spd_device); + printk(BIOS_DEBUG, "set_ecc spd_device: 0x%x\n", spd_device); } else { continue; } @@ -2366,8 +2430,8 @@ static void set_TT(const struct mem_controller *ctrl, uint32_t reg; if ((val < TT_MIN) || (val > TT_MAX)) { - print_err(str); - die(" Unknown\r\n"); + printk(BIOS_ERR, "%s", str); + die(" Unknown\n"); } reg = pci_read_config32(ctrl->f2, TT_REG); @@ -2396,19 +2460,16 @@ static void set_Twrwr(const struct mem_controller *ctrl, const struct mem_param set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TWRWR_SHIFT, DTH_TWRWR_MASK,DTH_TWRWR_BASE, DTH_TWRWR_MIN, DTH_TWRWR_MAX, param->Twrwr, "Twrwr"); } - static void set_Trdrd(const struct mem_controller *ctrl, const struct mem_param *param) { set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TRDRD_SHIFT, DTH_TRDRD_MASK,DTH_TRDRD_BASE, DTH_TRDRD_MIN, DTH_TRDRD_MAX, param->Trdrd, "Trdrd"); } - static void set_DcqBypassMax(const struct mem_controller *ctrl, const struct mem_param *param) { set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_DcqBypassMax_SHIFT, DCH_DcqBypassMax_MASK,DCH_DcqBypassMax_BASE, DCH_DcqBypassMax_MIN, DCH_DcqBypassMax_MAX, param->DcqByPassMax, "DcqBypassMax"); // value need to be in CMOS } - static void set_Tfaw(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo) { static const uint8_t faw_1k[] = {8, 10, 13, 14}; @@ -2426,10 +2487,8 @@ static void set_Tfaw(const struct mem_controller *ctrl, const struct mem_param * } set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_FourActWindow_SHIFT, DCH_FourActWindow_MASK, DCH_FourActWindow_BASE, DCH_FourActWindow_MIN, DCH_FourActWindow_MAX, faw, "FourActWindow"); - } - static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param) { uint32_t dch; @@ -2447,7 +2506,7 @@ static void set_max_async_latency(const struct mem_controller *ctrl, const struc pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch); } - +#if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */ static void set_SlowAccessMode(const struct mem_controller *ctrl) { uint32_t dch; @@ -2458,7 +2517,7 @@ static void set_SlowAccessMode(const struct mem_controller *ctrl) pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch); } - +#endif /* DRAM_OUTPUT_DRV_COMP_CTRL 0, 0x20 @@ -2468,11 +2527,12 @@ static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info * { uint32_t dword; uint32_t dwordx; +#if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */ unsigned SlowAccessMode = 0; +#endif +#if CONFIG_DIMM_SUPPORT==0x0104 /* DDR2 and REG */ long dimm_mask = meminfo->dimm_mask & 0x0f; - -#if DIMM_SUPPORT==0x0104 /* DDR2 and REG */ /* for REG DIMM */ dword = 0x00111222; dwordx = 0x002f0000; @@ -2496,7 +2556,7 @@ static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info * #endif -#if DIMM_SUPPORT==0x0204 /* DDR2 and SO-DIMM, S1G1 */ +#if CONFIG_DIMM_SUPPORT==0x0204 /* DDR2 and SO-DIMM, S1G1 */ dword = 0x00111222; dwordx = 0x002F2F00; @@ -2536,7 +2596,8 @@ static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info * } #endif -#if DIMM_SUPPORT==0x0004 /* DDR2 and unbuffered */ +#if CONFIG_DIMM_SUPPORT==0x0004 /* DDR2 and unbuffered */ + long dimm_mask = meminfo->dimm_mask & 0x0f; /* for UNBUF DIMM */ dword = 0x00111222; dwordx = 0x002f2f00; @@ -2610,15 +2671,15 @@ static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info * break; } - print_raminit("\tdimm_mask = ", meminfo->dimm_mask); - print_raminit("\tx4_mask = ", meminfo->x4_mask); - print_raminit("\tx16_mask = ", meminfo->x16_mask); - print_raminit("\tsingle_rank_mask = ", meminfo->single_rank_mask); - print_raminit("\tODC = ", dword); - print_raminit("\tAddr Timing= ", dwordx); + printk_raminit("\tdimm_mask = %08x\n", meminfo->dimm_mask); + printk_raminit("\tx4_mask = %08x\n", meminfo->x4_mask); + printk_raminit("\tx16_mask = %08x\n", meminfo->x16_mask); + printk_raminit("\tsingle_rank_mask = %08x\n", meminfo->single_rank_mask); + printk_raminit("\tODC = %08x\n", dword); + printk_raminit("\tAddr Timing= %08x\n", dwordx); #endif -#if (DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */ +#if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */ if (SlowAccessMode) { set_SlowAccessMode(ctrl); } @@ -2649,7 +2710,7 @@ static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info * static void set_RDqsEn(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo) { -#if CPU_SOCKET_TYPE==0x10 +#if CONFIG_CPU_SOCKET_TYPE==0x10 //only need to set for reg and x8 uint32_t dch; @@ -2664,7 +2725,6 @@ static void set_RDqsEn(const struct mem_controller *ctrl, #endif } - static void set_idle_cycle_limit(const struct mem_controller *ctrl, const struct mem_param *param) { @@ -2677,14 +2737,12 @@ static void set_idle_cycle_limit(const struct mem_controller *ctrl, pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dcm); } - static void set_RdWrQByp(const struct mem_controller *ctrl, const struct mem_param *param) { set_TT(ctrl, param, DRAM_CTRL_MISC, DCM_RdWrQByp_SHIFT, DCM_RdWrQByp_MASK,0, 0, 3, 2, "RdWrQByp"); } - static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo) @@ -2697,44 +2755,44 @@ static long spd_set_dram_timing(const struct mem_controller *ctrl, !(meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) ) { continue; } - print_tx("spd_set_dram_timing dimm socket: ", i); + printk_raminit("spd_set_dram_timing dimm socket: %08x\n", i); /* DRAM Timing Low Register */ - print_t("\ttrc\r\n"); + printk_raminit("\ttrc\n"); if ((rc = update_dimm_Trc (ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err; - print_t("\ttrcd\r\n"); + printk_raminit("\ttrcd\n"); if ((rc = update_dimm_Trcd(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err; - print_t("\ttrrd\r\n"); + printk_raminit("\ttrrd\n"); if ((rc = update_dimm_Trrd(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err; - print_t("\ttras\r\n"); + printk_raminit("\ttras\n"); if ((rc = update_dimm_Tras(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err; - print_t("\ttrp\r\n"); + printk_raminit("\ttrp\n"); if ((rc = update_dimm_Trp (ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err; - print_t("\ttrtp\r\n"); + printk_raminit("\ttrtp\n"); if ((rc = update_dimm_Trtp(ctrl, param, i, meminfo)) <= 0) goto dimm_err; - print_t("\ttwr\r\n"); + printk_raminit("\ttwr\n"); if ((rc = update_dimm_Twr (ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err; /* DRAM Timing High Register */ - print_t("\ttref\r\n"); + printk_raminit("\ttref\n"); if ((rc = update_dimm_Tref(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err; - print_t("\ttwtr\r\n"); + printk_raminit("\ttwtr\n"); if ((rc = update_dimm_Twtr(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err; - print_t("\ttrfc\r\n"); + printk_raminit("\ttrfc\n"); if ((rc = update_dimm_Trfc(ctrl, param, i, meminfo)) <= 0) goto dimm_err; /* DRAM Config Low */ continue; dimm_err: - printk_debug("spd_set_dram_timing dimm_err!\n"); + printk(BIOS_DEBUG, "spd_set_dram_timing dimm_err!\n"); if (rc < 0) { return -1; } @@ -2784,37 +2842,37 @@ static void sdram_set_spd_registers(const struct mem_controller *ctrl, #endif meminfo = &sysinfo->meminfo[ctrl->node_id]; - print_debug_addr("sdram_set_spd_registers: paramx :", ¶mx); + printk(BIOS_DEBUG, "sdram_set_spd_registers: paramx :%p\n", ¶mx); activate_spd_rom(ctrl); meminfo->dimm_mask = spd_detect_dimms(ctrl); - print_tx("sdram_set_spd_registers: dimm_mask=0x%x\n", meminfo->dimm_mask); + printk_raminit("sdram_set_spd_registers: dimm_mask=0x%x\n", meminfo->dimm_mask); if (!(meminfo->dimm_mask & ((1 << 2*DIMM_SOCKETS) - 1))) { - print_debug("No memory for this cpu\r\n"); + printk(BIOS_DEBUG, "No memory for this cpu\n"); return; } meminfo->dimm_mask = spd_enable_2channels(ctrl, meminfo); - print_tx("spd_enable_2channels: dimm_mask=0x%x\n", meminfo->dimm_mask); + printk_raminit("spd_enable_2channels: dimm_mask=0x%x\n", meminfo->dimm_mask); if (meminfo->dimm_mask == -1) goto hw_spd_err; meminfo->dimm_mask = spd_set_ram_size(ctrl, meminfo); - print_tx("spd_set_ram_size: dimm_mask=0x%x\n", meminfo->dimm_mask); + printk_raminit("spd_set_ram_size: dimm_mask=0x%x\n", meminfo->dimm_mask); if (meminfo->dimm_mask == -1) goto hw_spd_err; meminfo->dimm_mask = spd_handle_unbuffered_dimms(ctrl, meminfo); - print_tx("spd_handle_unbuffered_dimms: dimm_mask=0x%x\n", meminfo->dimm_mask); + printk_raminit("spd_handle_unbuffered_dimms: dimm_mask=0x%x\n", meminfo->dimm_mask); if (meminfo->dimm_mask == -1) goto hw_spd_err; result = spd_set_memclk(ctrl, meminfo); param = result.param; meminfo->dimm_mask = result.dimm_mask; - print_tx("spd_set_memclk: dimm_mask=0x%x\n", meminfo->dimm_mask); + printk_raminit("spd_set_memclk: dimm_mask=0x%x\n", meminfo->dimm_mask); if (meminfo->dimm_mask == -1) goto hw_spd_err; @@ -2826,7 +2884,7 @@ static void sdram_set_spd_registers(const struct mem_controller *ctrl, paramx.divisor = get_exact_divisor(param->dch_memclk, paramx.divisor); meminfo->dimm_mask = spd_set_dram_timing(ctrl, ¶mx, meminfo); - print_tx("spd_set_dram_timing: dimm_mask=0x%x\n", meminfo->dimm_mask); + printk_raminit("spd_set_dram_timing: dimm_mask=0x%x\n", meminfo->dimm_mask); if (meminfo->dimm_mask == -1) goto hw_spd_err; @@ -2843,7 +2901,7 @@ static void sdram_set_spd_registers(const struct mem_controller *ctrl, #include "raminit_f_dqs.c" -#if HW_MEM_HOLE_SIZEK != 0 +#if CONFIG_HW_MEM_HOLE_SIZEK != 0 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i) { int ii; @@ -2904,9 +2962,10 @@ static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl) uint32_t hole_startk; int i; - hole_startk = 4*1024*1024 - HW_MEM_HOLE_SIZEK; + hole_startk = 4*1024*1024 - CONFIG_HW_MEM_HOLE_SIZEK; -#if HW_MEM_HOLE_SIZE_AUTO_INC == 1 + printk_raminit("Handling memory hole at 0x%08x (default)\n", hole_startk); +#if CONFIG_HW_MEM_HOLE_SIZE_AUTO_INC == 1 /* We need to double check if the hole_startk is valid, if it is equal to basek, we need to decrease it some */ uint32_t basek_pri; @@ -2926,6 +2985,7 @@ static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl) } basek_pri = base_k; } + printk_raminit("Handling memory hole at 0x%08x (adjusted)\n", hole_startk); #endif /* find node index that need do set hole */ for (i=0; i < controllers; i++) { @@ -2949,30 +3009,37 @@ static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl) } #endif - +#if CONFIG_HAVE_ACPI_RESUME == 1 +#include "exit_from_self.c" +#endif static void sdram_enable(int controllers, const struct mem_controller *ctrl, struct sys_info *sysinfo) { int i; +#if CONFIG_HAVE_ACPI_RESUME == 1 + int suspend = acpi_is_wakeup_early(); +#else + int suspend = 0; +#endif #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1 unsigned cpu_f0_f1[8]; /* FIXME: How about 32 node machine later? */ tsc_t tsc, tsc0[8]; - print_debug_addr("sdram_enable: tsc0[8]: ", &tsc0[0]); -#endif + printk(BIOS_DEBUG, "sdram_enable: tsc0[8]: %p", &tsc0[0]); uint32_t dword; +#endif /* Error if I don't have memory */ if (memory_end_k(ctrl, controllers) == 0) { - die("No memory\r\n"); + die("No memory\n"); } /* Before enabling memory start the memory clocks */ for (i = 0; i < controllers; i++) { - uint32_t dtl, dch; + uint32_t dch; if (!sysinfo->ctrl_present[ i ]) continue; dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH); @@ -2990,16 +3057,16 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl, } } - /* We need to wait a mimmium of 20 MEMCLKS to enable the InitDram */ + /* We need to wait a minimum of 20 MEMCLKS to enable the InitDram */ memreset(controllers, ctrl); -#if 0 - print_debug("prepare to InitDram:"); - for (i=0; i<10; i++) { - print_debug_hex32(i); - print_debug("\b\b\b\b\b\b\b\b"); + + /* lets override the rest of the routine */ + if (suspend) { + printk(BIOS_DEBUG, "Wakeup!\n"); + exit_from_self(controllers, ctrl, sysinfo); + printk(BIOS_DEBUG, "Mem running !\n"); + return; } - print_debug("\r\n"); -#endif for (i = 0; i < controllers; i++) { uint32_t dcl, dch; @@ -3015,7 +3082,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl, dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW); if (dcl & DCL_DimmEccEn) { uint32_t mnc; - print_spew("ECC enabled\r\n"); + printk(BIOS_SPEW, "ECC enabled\n"); mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG); mnc |= MNC_ECC_EN; if (dcl & DCL_Width128) { @@ -3038,36 +3105,29 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl, } #endif -#if 0 - /* Set the DqsRcvEnTrain bit */ - dword = pci_read_config32(ctrl[i].f2, DRAM_CTRL); - dword |= DC_DqsRcvEnTrain; - pci_write_config32(ctrl[i].f2, DRAM_CTRL, dword); -#endif - pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl); dcl |= DCL_InitDram; pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl); } for (i = 0; i < controllers; i++) { - uint32_t dcl, dch, dcm; + uint32_t dcl, dcm; if (!sysinfo->ctrl_present[ i ]) continue; /* Skip everything if I don't have any memory on this controller */ if (sysinfo->meminfo[i].dimm_mask==0x00) continue; - print_debug("Initializing memory: "); + printk(BIOS_DEBUG, "Initializing memory: "); int loops = 0; do { dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW); loops++; if ((loops & 1023) == 0) { - print_debug("."); + printk(BIOS_DEBUG, "."); } } while(((dcl & DCL_InitDram) != 0) && (loops < TIMEOUT_LOOPS)); if (loops >= TIMEOUT_LOOPS) { - print_debug(" failed\r\n"); + printk(BIOS_DEBUG, " failed\n"); continue; } @@ -3080,7 +3140,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl, if (cpu_f0_f1[i]) { tsc= rdtsc(); - print_debug_dqs_tsc("\r\nbegin tsc0", i, tsc0[i].hi, tsc0[i].lo, 2); + print_debug_dqs_tsc("\nbegin tsc0", i, tsc0[i].hi, tsc0[i].lo, 2); print_debug_dqs_tsc("end tsc ", i, tsc.hi, tsc.lo, 2); if (tsc.lotom_k, sysinfo->tom2_k); #endif @@ -3150,26 +3210,25 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl, dqs_timing(i, &ctrl[i], sysinfo, 1); -#if MEM_TRAIN_SEQ == 1 +#if CONFIG_MEM_TRAIN_SEQ == 1 break; // only train the first node with ram #endif } -#if MEM_TRAIN_SEQ == 2 +#if CONFIG_MEM_TRAIN_SEQ == 2 clear_mtrr_dqs(sysinfo->tom2_k); #endif #endif -#if MEM_TRAIN_SEQ != 1 +#if CONFIG_MEM_TRAIN_SEQ != 1 wait_all_core0_mem_trained(sysinfo); #endif } - -static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a, - const uint16_t *spd_addr) +void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a, + const uint16_t *spd_addr) { int i; int j;