* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <cpu/x86/mem.h>
#include <cpu/x86/cache.h>
#include <cpu/x86/mtrr.h>
#include <cpu/x86/tsc.h>
#include <stdlib.h>
#include "raminit.h"
-#include "amdk8_f.h"
-#include "spd_ddr2.h"
-
-#ifndef QRANK_DIMM_SUPPORT
-#define QRANK_DIMM_SUPPORT 0
+#include "f.h"
+#include <spd_ddr2.h>
+#if CONFIG_HAVE_OPTION_TABLE
+#include "option_table.h"
#endif
-static inline void print_raminit(const char *strval, uint32_t val)
-{
-#if CONFIG_USE_PRINTK_IN_CAR
- printk_debug("%s%08x\r\n", strval, val);
+#if CONFIG_DEBUG_RAM_SETUP
+#define printk_raminit(args...) printk(BIOS_DEBUG, args)
#else
- print_debug(strval); print_debug_hex32(val); print_debug("\r\n");
-#endif
-}
-
-#define RAM_TIMING_DEBUG 0
-
-static inline void print_tx(const char *strval, uint32_t val)
-{
-#if RAM_TIMING_DEBUG == 1
- print_raminit(strval, val);
+#define printk_raminit(args...)
#endif
-}
-
-
-static inline void print_t(const char *strval)
-{
-#if RAM_TIMING_DEBUG == 1
- print_debug(strval);
-#endif
-}
-
-#if (CONFIG_LB_MEM_TOPK & (CONFIG_LB_MEM_TOPK -1)) != 0
-# error "CONFIG_LB_MEM_TOPK must be a power of 2"
+#if (CONFIG_RAMTOP & (CONFIG_RAMTOP -1)) != 0
+# error "CONFIG_RAMTOP must be a power of 2"
#endif
-#include "amdk8_f_pci.c"
+#include "f_pci.c"
/* for PCI_ADDR(0, 0x18, 2, 0x98) index,
*/
-static void setup_resource_map(const unsigned int *register_values, int max)
+void setup_resource_map(const unsigned int *register_values, int max)
{
int i;
for (i = 0; i < max; i += 3) {
* 110 = 8 bus clocks
* 111 = 9 bus clocks
* [ 7: 7] Reserved
- * [ 9: 8] Twtr (Internal DRAM Write-to-Read Command Delay,
+ * [ 9: 8] Twtr (Internal DRAM Write-to-Read Command Delay,
* minium write-to-read delay when both access the same chip select)
* 00 = Reserved
* 01 = 1 bus clocks
* registered DIMM is present
* [19:19] Reserved
* [20:20] SlowAccessMode (Slow Access Mode (2T Mode))
- * 0 = DRAM address and control signals are driven for one
+ * 0 = DRAM address and control signals are driven for one
* MEMCLK cycle
* 1 = One additional MEMCLK of setup time is provided on all
* DRAM address and control signals except CS, CKE, and ODT;
}
sysinfo->ctrl_present[ctrl->node_id] = 1;
- print_spew("setting up CPU");
- print_spew_hex8(ctrl->node_id);
- print_spew(" northbridge registers\r\n");
+ printk(BIOS_SPEW, "setting up CPU %02x northbridge registers\n", ctrl->node_id);
max = ARRAY_SIZE(register_values);
for (i = 0; i < max; i += 3) {
device_t dev;
reg |= register_values[i+2];
pci_write_config32(dev, where, reg);
}
-
- print_spew("done.\r\n");
+ printk(BIOS_SPEW, "done.\n");
}
-
+#if 0
static int is_dual_channel(const struct mem_controller *ctrl)
{
uint32_t dcl;
dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
return dcl & DCL_Width128;
}
-
+#endif
static int is_opteron(const struct mem_controller *ctrl)
{
- /* Test to see if I am an Opteron.
- * FIXME Testing dual channel capability is correct for now
- * but a better test is probably required.
- * m2 and s1g1 support dual channel too. but only support unbuffered dimm
+ /* Test to see if I am an Opteron. M2 and S1G1 support dual
+ * channel, too, but only support unbuffered DIMMs so we need a
+ * better test for Opterons.
+ * However, all code uses is_opteron() to find out whether to
+ * use dual channel, so if we really check for opteron here, we
+ * need to fix up all code using this function, too.
*/
-#warning "FIXME implement a better test for opterons"
+
uint32_t nbcap;
nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
return !!(nbcap & NBCAP_128Bit);
}
-
+#if 0
static int is_registered(const struct mem_controller *ctrl)
{
/* Test to see if we are dealing with registered SDRAM.
dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
return !(dcl & DCL_UnBuffDimm);
}
-
+#endif
static void spd_get_dimm_size(unsigned device, struct dimm_size *sz)
{
if (value <=4 ) value += 8; // add back to 1G to high
value += (27-5); // make 128MB to the real lines
if ( value != (sz->per_rank)) {
- print_err("Bad RANK Size --\r\n");
+ printk(BIOS_ERR, "Bad RANK Size --\n");
goto val_err;
}
goto out;
val_err:
- die("Bad SPD value\r\n");
+ die("Bad SPD value\n");
/* If an hw_error occurs report that I have no memory */
hw_err:
sz->per_rank = 0;
static void set_dimm_size(const struct mem_controller *ctrl,
- struct dimm_size *sz, unsigned index, struct mem_info *meminfo)
+ struct dimm_size *sz, unsigned index,
+ struct mem_info *meminfo)
{
uint32_t base0, base1;
/* Set the appropriate DIMM base address register */
pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 0) << 2), base0);
pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 1) << 2), base1);
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
if (sz->rank == 4) {
pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 4) << 2), base0);
pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 5) << 2), base1);
if (base0) {
uint32_t dword;
uint32_t ClkDis0;
-#if CPU_SOCKET_TYPE == 0x10 /* L1 */
+#if CONFIG_CPU_SOCKET_TYPE == 0x10 /* L1 */
ClkDis0 = DTL_MemClkDis0;
-#elif CPU_SOCKET_TYPE == 0x11 /* AM2 */
+#elif CONFIG_CPU_SOCKET_TYPE == 0x11 /* AM2 */
ClkDis0 = DTL_MemClkDis0_AM2;
-#elif CPU_SOCKET_TYPE == 0x12 /* S1G1 */
+#elif CONFIG_CPU_SOCKET_TYPE == 0x12 /* S1G1 */
ClkDis0 = DTL_MemClkDis0_S1g1;
#endif
} else {
dword = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW); //Channel A
dword &= ~(ClkDis0 >> index);
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
if (sz->rank == 4) {
dword &= ~(ClkDis0 >> (index+2));
}
if (meminfo->is_Width128) { // ChannelA+B
dword = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
dword &= ~(ClkDis0 >> index);
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
if (sz->rank == 4) {
dword &= ~(ClkDis0 >> (index+2));
}
}
map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
map &= ~(0xf << (index * 4));
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
if (sz->rank == 4) {
map &= ~(0xf << ( (index + 2) * 4));
}
unsigned temp_map;
temp_map = cs_map_aaa[(sz->bank-2)*3*4 + (sz->rows - 13)*3 + (sz->col - 9) ];
map |= temp_map << (index*4);
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
if (sz->rank == 4) {
map |= temp_map << ( (index + 2) * 4);
}
return -1; /* Report SPD error */
}
set_dimm_size(ctrl, sz, i, meminfo);
- set_dimm_cs_map (ctrl, sz, i, meminfo);
+ set_dimm_cs_map(ctrl, sz, i, meminfo);
}
return meminfo->dimm_mask;
}
-
static void route_dram_accesses(const struct mem_controller *ctrl,
- unsigned long base_k, unsigned long limit_k)
+ unsigned long base_k, unsigned long limit_k)
{
/* Route the addresses to the controller node */
unsigned node_id;
}
}
-
static void set_top_mem(unsigned tom_k, unsigned hole_startk)
{
/* Error if I don't have memory */
}
/* Report the amount of memory. */
- print_debug("RAM: 0x");
- print_debug_hex32(tom_k);
- print_debug(" KB\r\n");
+ printk(BIOS_DEBUG, "RAM end at 0x%08x kB\n", tom_k);
+ /* Now set top of memory */
msr_t msr;
if (tom_k > (4*1024*1024)) {
- /* Now set top of memory */
+ printk_raminit("Handling memory mapped above 4 GB\n");
+ printk_raminit("Upper RAM end at 0x%08x kB\n", tom_k);
msr.lo = (tom_k & 0x003fffff) << 10;
msr.hi = (tom_k & 0xffc00000) >> 22;
wrmsr(TOP_MEM2, msr);
+ printk_raminit("Correcting memory amount mapped below 4 GB\n");
}
/* Leave a 64M hole between TOP_MEM and TOP_MEM2
* so I can see my rom chip and other I/O devices.
*/
if (tom_k >= 0x003f0000) {
-#if HW_MEM_HOLE_SIZEK != 0
+#if CONFIG_HW_MEM_HOLE_SIZEK != 0
if (hole_startk != 0) {
tom_k = hole_startk;
} else
#endif
tom_k = 0x3f0000;
+ printk_raminit("Adjusting lower RAM end\n");
}
+ printk_raminit("Lower RAM end at 0x%08x kB\n", tom_k);
msr.lo = (tom_k & 0x003fffff) << 10;
msr.hi = (tom_k & 0xffc00000) >> 22;
wrmsr(TOP_MEM, msr);
/* See if all of the memory chip selects are the same size
* and if so count them.
*/
+#if defined(CMOS_VSTART_interleave_chip_selects)
+ if (read_option(interleave_chip_selects, 1) == 0)
+ return 0;
+#else
+#if !defined(CONFIG_INTERLEAVE_CHIP_SELECTS) || !CONFIG_INTERLEAVE_CHIP_SELECTS
+ return 0;
+#endif
+#endif
+
chip_selects = 0;
common_size = 0;
common_cs_mode = 0xff;
csbase_inc <<=1;
}
-
/* Compute the initial values for csbase and csbask.
* In csbase just set the enable bit and the base to zero.
* In csmask set the mask bits for the size and page level interleave.
csbase += csbase_inc;
}
- print_debug("Interleaved\r\n");
+ printk(BIOS_DEBUG, "Interleaved\n");
/* Return the memory size in K */
return common_size << ((27-10) + bits);
}
+
static unsigned long order_chip_selects(const struct mem_controller *ctrl)
{
unsigned long tom;
csbase = value;
canidate = index;
}
-
+
/* See if I have found a new canidate */
if (csbase == 0) {
break;
return (tom & ~0xff000000) << (27-10);
}
-unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
+static unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
{
unsigned node_id;
unsigned end_k;
return end_k;
}
-
static void order_dimms(const struct mem_controller *ctrl,
struct mem_info *meminfo)
{
unsigned long tom_k, base_k;
- if (read_option(CMOS_VSTART_interleave_chip_selects,
- CMOS_VLEN_interleave_chip_selects, 1) != 0) {
- tom_k = interleave_chip_selects(ctrl, meminfo->is_Width128);
- } else {
- print_debug("Interleaving disabled\r\n");
- tom_k = 0;
- }
-
+ tom_k = interleave_chip_selects(ctrl, meminfo->is_Width128);
+
if (!tom_k) {
+ printk(BIOS_DEBUG, "Interleaving disabled\n");
tom_k = order_chip_selects(ctrl);
}
-
+
/* Compute the memory base address */
base_k = memory_end_k(ctrl, ctrl->node_id);
tom_k += base_k;
set_top_mem(tom_k, 0);
}
-
static long disable_dimm(const struct mem_controller *ctrl, unsigned index,
struct mem_info *meminfo)
{
- print_debug("disabling dimm");
- print_debug_hex8(index);
- print_debug("\r\n");
+ printk(BIOS_DEBUG, "disabling dimm %02x\n", index);
if (!(meminfo->dimm_mask & 0x0F) && (meminfo->dimm_mask & 0xF0)) { /* channelB only? */
pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 4) << 2), 0);
pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 5) << 2), 0);
} else {
pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 0) << 2), 0);
pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 1) << 2), 0);
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
if (meminfo->sz[index].rank == 4) {
pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 4) << 2), 0);
pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 5) << 2), 0);
return meminfo->dimm_mask;
}
-
static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl,
struct mem_info *meminfo)
{
}
pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
-#if 1
if (meminfo->is_registered) {
- print_debug("Registered\r\n");
+ printk(BIOS_SPEW, "Registered\n");
} else {
- print_debug("Unbuffered\r\n");
+ printk(BIOS_SPEW, "Unbuffered\n");
}
-#endif
return meminfo->dimm_mask;
}
-
static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
{
unsigned dimm_mask;
int byte;
unsigned device;
device = ctrl->channel0[i];
+ printk_raminit("DIMM socket %i, channel 0 SPD device is 0x%02x\n", i, device);
if (device) {
byte = spd_read_byte(ctrl->channel0[i], SPD_MEM_TYPE); /* Type */
if (byte == SPD_MEM_TYPE_SDRAM_DDR2) {
}
}
device = ctrl->channel1[i];
+ printk_raminit("DIMM socket %i, channel 1 SPD device is 0x%02x\n", i, device);
if (device) {
byte = spd_read_byte(ctrl->channel1[i], SPD_MEM_TYPE);
if (byte == SPD_MEM_TYPE_SDRAM_DDR2) {
4, /* *Column addresses */
5, /* *Number of DIMM Ranks */
6, /* *Module Data Width*/
- 9, /* *Cycle time at highest CAS Latency CL=X */
11, /* *DIMM Conf Type */
13, /* *Pri SDRAM Width */
17, /* *Logical Banks */
- 18, /* *Supported CAS Latencies */
20, /* *DIMM Type Info */
21, /* *SDRAM Module Attributes */
- 23, /* *Cycle time at CAS Latnecy (CLX - 1) */
- 26, /* *Cycle time at CAS Latnecy (CLX - 2) */
27, /* *tRP Row precharge time */
28, /* *Minimum Row Active to Row Active Delay (tRRD) */
29, /* *tRCD RAS to CAS */
30, /* *tRAS Activate to Precharge */
36, /* *Write recovery time (tWR) */
37, /* *Internal write to read command delay (tRDP) */
- 38, /* *Internal read to precharge commanfd delay (tRTP) */
- 41, /* *Extension of Byte 41 tRC and Byte 42 tRFC */
+ 38, /* *Internal read to precharge command delay (tRTP) */
+ 40, /* *Extension of Byte 41 tRC and Byte 42 tRFC */
41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
42, /* *Minimum Auto Refresh Command Time(Trfc) */
+ /* The SPD addresses 18, 9, 23, 26 need special treatment like
+ * in spd_set_memclk. Right now they cause many false negatives.
+ * Keep them at the end to see other mismatches (if any).
+ */
+ 18, /* *Supported CAS Latencies */
+ 9, /* *Cycle time at highest CAS Latency CL=X */
+ 23, /* *Cycle time at CAS Latency (CLX - 1) */
+ 26, /* *Cycle time at CAS Latency (CLX - 2) */
};
u32 dcl, dcm;
+ u8 common_cl;
/* S1G1 and AM2 sockets are Mod64BitMux capable. */
-#if CPU_SOCKET_TYPE == 0x11 || CPU_SOCKET_TYPE == 0x12
+#if CONFIG_CPU_SOCKET_TYPE == 0x11 || CONFIG_CPU_SOCKET_TYPE == 0x12
u8 mux_cap = 1;
#else
u8 mux_cap = 0;
}
device0 = ctrl->channel0[i];
device1 = ctrl->channel1[i];
+ /* Abort if the chips don't support a common CAS latency. */
+ common_cl = spd_read_byte(device0, 18) & spd_read_byte(device1, 18);
+ if (!common_cl) {
+ printk(BIOS_DEBUG, "No common CAS latency supported\n");
+ goto single_channel;
+ } else {
+ printk_raminit("Common CAS latency bitfield: 0x%02x\n", common_cl);
+ }
for (j = 0; j < ARRAY_SIZE(addresses); j++) {
unsigned addr;
addr = addresses[j];
return -1;
}
if (value0 != value1) {
+ printk_raminit("SPD values differ between channel 0/1 for byte %i\n", addr);
goto single_channel;
}
}
}
- print_spew("Enabling dual channel memory\r\n");
+ printk(BIOS_SPEW, "Enabling dual channel memory\n");
dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
dcl &= ~DCL_BurstLength32; /* 32byte mode may be preferred in platforms that include graphics controllers that generate a lot of 32-bytes system memory accesses
32byte mode is not supported when the DRAM interface is 128 bits wides, even 32byte mode is set, system still use 64 byte mode */
if (((meminfo->dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
/* mux capable and single dimm in channelB */
if (mux_cap) {
- printk_spew("Enable 64MuxMode & BurstLength32\n");
+ printk(BIOS_SPEW, "Enable 64MuxMode & BurstLength32\n");
dcm = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
dcm |= DCM_Mode64BitMux;
pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dcm);
} else { /* unmatched dual dimms ? */
/* unmatched dual dimms not supported by meminit code. Use single channelA dimm. */
meminfo->dimm_mask &= ~((1 << (DIMM_SOCKETS * 2)) - (1 << DIMM_SOCKETS));
- printk_spew("Unmatched dual dimms. Use single channelA dimm.\n");
+ printk(BIOS_SPEW, "Unmatched dual dimms. Use single channelA dimm.\n");
}
return meminfo->dimm_mask;
}
static const struct mem_param speed[] = {
{
- .name = "200Mhz\r\n",
+ .name = "200MHz",
.cycle_time = 0x500,
.divisor = 200, // how many 1/40ns per clock
.dch_memclk = DCH_MemClkFreq_200MHz, //0
},
{
- .name = "266Mhz\r\n",
+ .name = "266MHz",
.cycle_time = 0x375,
.divisor = 150, //????
.dch_memclk = DCH_MemClkFreq_266MHz, //1
.DcqByPassMax = 4,
},
{
- .name = "333Mhz\r\n",
+ .name = "333MHz",
.cycle_time = 0x300,
.divisor = 120,
.dch_memclk = DCH_MemClkFreq_333MHz, //2
},
{
- .name = "400Mhz\r\n",
+ .name = "400MHz",
.cycle_time = 0x250,
.divisor = 100,
.dch_memclk = DCH_MemClkFreq_400MHz,//3
if (!param->cycle_time) {
die("min_cycle_time to low");
}
- print_spew(param->name);
-#ifdef DRAM_MIN_CYCLE_TIME
- print_debug(param->name);
-#endif
+ printk(BIOS_SPEW, "%s\n", param->name);
return param;
}
/*15*/ 200, 160, 120, 100,
};
- unsigned fid_cur;
- int index;
+ int index;
msr_t msr;
- msr = rdmsr(0xc0010042);
- fid_cur = msr.lo & 0x3f;
- index = fid_cur>>1;
+ /* Check for FID control support */
+ struct cpuid_result cpuid1;
+ cpuid1 = cpuid(0x80000007);
+ if( cpuid1.edx & 0x02 ) {
+ /* Use current FID */
+ unsigned fid_cur;
+ msr = rdmsr(0xc0010042);
+ fid_cur = msr.lo & 0x3f;
+
+ index = fid_cur>>1;
+ } else {
+ /* Use startup FID */
+ unsigned fid_start;
+ msr = rdmsr(0xc0010015);
+ fid_start = (msr.lo & (0x3f << 24));
+
+ index = fid_start>>25;
+ }
if (index>12) return divisor;
return value;
}
+static const uint8_t latency_indicies[] = { 25, 23, 9 };
+
+static int find_optimum_spd_latency(u32 spd_device, unsigned *min_latency, unsigned *min_cycle_time)
+{
+ int new_cycle_time, new_latency;
+ int index;
+ int latencies;
+ int latency;
+
+ /* First find the supported CAS latencies
+ * Byte 18 for DDR SDRAM is interpreted:
+ * bit 3 == CAS Latency = 3
+ * bit 4 == CAS Latency = 4
+ * bit 5 == CAS Latency = 5
+ * bit 6 == CAS Latency = 6
+ */
+ new_cycle_time = 0x500;
+ new_latency = 6;
+
+ latencies = spd_read_byte(spd_device, SPD_CAS_LAT);
+ if (latencies <= 0)
+ return 1;
+
+ printk_raminit("\tlatencies: %08x\n", latencies);
+ /* Compute the lowest cas latency which can be expressed in this
+ * particular SPD EEPROM. You can store at most settings for 3
+ * contiguous CAS latencies, so by taking the highest CAS
+ * latency maked as supported in the SPD and subtracting 2 you
+ * get the lowest expressable CAS latency. That latency is not
+ * necessarily supported, but a (maybe invalid) entry exists
+ * for it.
+ */
+ latency = log2(latencies) - 2;
+
+ /* Loop through and find a fast clock with a low latency */
+ for (index = 0; index < 3; index++, latency++) {
+ int value;
+ if ((latency < 3) || (latency > 6) ||
+ (!(latencies & (1 << latency)))) {
+ continue;
+ }
+ value = spd_read_byte(spd_device, latency_indicies[index]);
+ if (value < 0) {
+ return -1;
+ }
+
+ printk_raminit("\tindex: %08x\n", index);
+ printk_raminit("\t\tlatency: %08x\n", latency);
+ printk_raminit("\t\tvalue1: %08x\n", value);
+
+ value = convert_to_linear(value);
+
+ printk_raminit("\t\tvalue2: %08x\n", value);
+
+ /* Only increase the latency if we decrease the clock */
+ if (value >= *min_cycle_time ) {
+ if (value < new_cycle_time) {
+ new_cycle_time = value;
+ new_latency = latency;
+ } else if (value == new_cycle_time) {
+ if (new_latency > latency) {
+ new_latency = latency;
+ }
+ }
+ }
+ printk_raminit("\t\tnew_cycle_time: %08x\n", new_cycle_time);
+ printk_raminit("\t\tnew_latency: %08x\n", new_latency);
+
+ }
+
+ if (new_latency > 6){
+ return 1;
+ }
+
+ /* Does min_latency need to be increased? */
+ if (new_cycle_time > *min_cycle_time) {
+ *min_cycle_time = new_cycle_time;
+ }
+
+ /* Does min_cycle_time need to be increased? */
+ if (new_latency > *min_latency) {
+ *min_latency = new_latency;
+ }
+
+ printk_raminit("2 min_cycle_time: %08x\n", *min_cycle_time);
+ printk_raminit("2 min_latency: %08x\n", *min_latency);
+
+ return 0;
+}
+
static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, struct mem_info *meminfo)
{
/* Compute the minimum cycle time for these dimms */
int i;
uint32_t value;
- static const uint8_t latency_indicies[] = { 25, 23, 9 };
-
static const uint16_t min_cycle_times[] = { // use full speed to compare
[NBCAP_MEMCLK_NOLIMIT] = 0x250, /*2.5ns */
[NBCAP_MEMCLK_333MHZ] = 0x300, /* 3.0ns */
value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
bios_cycle_time = min_cycle_times[
- read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
+#ifdef CMOS_VSTART_max_mem_clock
+ read_option(max_mem_clock, 0)
+#else
+#if defined(CONFIG_MAX_MEM_CLOCK)
+ CONFIG_MAX_MEM_CLOCK
+#else
+ 0 // use DDR400 as default
+#endif
+#endif
+ ];
+
if (bios_cycle_time > min_cycle_time) {
min_cycle_time = bios_cycle_time;
}
min_latency = 3;
- print_tx("1 min_cycle_time:", min_cycle_time);
+ printk_raminit("1 min_cycle_time: %08x\n", min_cycle_time);
/* Compute the least latency with the fastest clock supported
* by both the memory controller and the dimms.
*/
for (i = 0; i < DIMM_SOCKETS; i++) {
- int new_cycle_time, new_latency;
- int index;
- int latencies;
- int latency;
- u32 spd_device = ctrl->channel0[i];
+ u32 spd_device;
- print_tx("1.1 dimm_mask:", meminfo->dimm_mask);
- if (!(meminfo->dimm_mask & (1 << i))) {
- if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) { /* channelB only? */
- spd_device = ctrl->channel1[i];
- } else {
- continue;
- }
- }
-
- /* First find the supported CAS latencies
- * Byte 18 for DDR SDRAM is interpreted:
- * bit 3 == CAS Latency = 3
- * bit 4 == CAS Latency = 4
- * bit 5 == CAS Latency = 5
- * bit 6 == CAS Latency = 6
- */
- new_cycle_time = 0x500;
- new_latency = 6;
-
- latencies = spd_read_byte(spd_device, SPD_CAS_LAT);
- if (latencies <= 0) continue;
+ printk_raminit("1.1 dimm_mask: %08x\n", meminfo->dimm_mask);
+ printk_raminit("i: %08x\n",i);
- print_tx("i:",i);
- print_tx("\tlatencies:", latencies);
- /* Compute the lowest cas latency supported */
- latency = log2(latencies) - 2;
+ if (meminfo->dimm_mask & (1 << i)) {
+ spd_device = ctrl->channel0[i];
+ printk_raminit("Channel 0 settings:\n");
- /* Loop through and find a fast clock with a low latency */
- for (index = 0; index < 3; index++, latency++) {
- int value;
- if ((latency < 3) || (latency > 6) ||
- (!(latencies & (1 << latency)))) {
- continue;
- }
- value = spd_read_byte(spd_device, latency_indicies[index]);
- if (value < 0) {
+ switch (find_optimum_spd_latency(spd_device, &min_latency, &min_cycle_time)) {
+ case -1:
goto hw_error;
+ break;
+ case 1:
+ continue;
}
-
- print_tx("\tindex:", index);
- print_tx("\t\tlatency:", latency);
- print_tx("\t\tvalue1:", value);
-
- value = convert_to_linear(value);
-
- print_tx("\t\tvalue2:", value);
-
- /* Only increase the latency if we decreas the clock */
- if (value >= min_cycle_time ) {
- if (value < new_cycle_time) {
- new_cycle_time = value;
- new_latency = latency;
- } else if (value == new_cycle_time) {
- if (new_latency > latency) {
- new_latency = latency;
- }
- }
- }
- print_tx("\t\tnew_cycle_time:", new_cycle_time);
- print_tx("\t\tnew_latency:", new_latency);
-
- }
-
- if (new_latency > 6){
- continue;
}
+ if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) {
+ spd_device = ctrl->channel1[i];
+ printk_raminit("Channel 1 settings:\n");
- /* Does min_latency need to be increased? */
- if (new_cycle_time > min_cycle_time) {
- min_cycle_time = new_cycle_time;
- }
-
- /* Does min_cycle_time need to be increased? */
- if (new_latency > min_latency) {
- min_latency = new_latency;
+ switch (find_optimum_spd_latency(spd_device, &min_latency, &min_cycle_time)) {
+ case -1:
+ goto hw_error;
+ break;
+ case 1:
+ continue;
+ }
}
- print_tx("2 min_cycle_time:", min_cycle_time);
- print_tx("2 min_latency:", min_latency);
}
/* Make a second pass through the dimms and disable
* any that cannot support the selected memclk and cas latency.
*/
- print_tx("3 min_cycle_time:", min_cycle_time);
- print_tx("3 min_latency:", min_latency);
+ printk_raminit("3 min_cycle_time: %08x\n", min_cycle_time);
+ printk_raminit("3 min_latency: %08x\n", min_latency);
for (i = 0; (i < DIMM_SOCKETS); i++) {
int latencies;
int latency;
int index;
- int value;
+ int val;
u32 spd_device = ctrl->channel0[i];
if (!(meminfo->dimm_mask & (1 << i))) {
}
/* Read the min_cycle_time for this latency */
- value = spd_read_byte(spd_device, latency_indicies[index]);
- if (value < 0) goto hw_error;
+ val = spd_read_byte(spd_device, latency_indicies[index]);
+ if (val < 0) goto hw_error;
- value = convert_to_linear(value);
+ val = convert_to_linear(val);
/* All is good if the selected clock speed
* is what I need or slower.
*/
- if (value <= min_cycle_time) {
+ if (val <= min_cycle_time) {
continue;
}
/* Otherwise I have an error, disable the dimm */
meminfo->dimm_mask = disable_dimm(ctrl, i, meminfo);
}
- print_tx("4 min_cycle_time:", min_cycle_time);
+ printk_raminit("4 min_cycle_time: %08x\n", min_cycle_time);
/* Now that I know the minimum cycle time lookup the memory parameters */
result.param = get_mem_param(min_cycle_time);
value |= result.param->dch_memclk << DCH_MemClkFreq_SHIFT;
pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
- print_debug(result.param->name);
+ printk(BIOS_DEBUG, "%s\n", result.param->name);
/* Update DRAM Timing Low with our selected cas latency */
value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
valuex = fraction [value & 0x7];
return valuex;
}
-static int update_dimm_Trc(const struct mem_controller *ctrl,
- const struct mem_param *param,
- int i, long dimm_mask)
+
+static int get_dimm_Trc_clocks(u32 spd_device, const struct mem_param *param)
{
- unsigned clocks, old_clocks;
- uint32_t dtl;
int value;
int value2;
- u32 spd_device = ctrl->channel0[i];
-
- if (!(dimm_mask & (1 << i)) && (dimm_mask & (1 << (DIMM_SOCKETS + i)))) { /* channelB only? */
- spd_device = ctrl->channel1[i];
- }
-
+ int clocks;
value = spd_read_byte(spd_device, SPD_TRC);
- if (value < 0) return -1;
+ if (value < 0)
+ return -1;
+ printk_raminit("update_dimm_Trc: tRC (41) = %08x\n", value);
value2 = spd_read_byte(spd_device, SPD_TRC -1);
value <<= 2;
value += convert_to_1_4(value2>>4);
- value *=10;
+ value *= 10;
+ printk_raminit("update_dimm_Trc: tRC final value = %i\n", value);
clocks = (value + param->divisor - 1)/param->divisor;
+ printk_raminit("update_dimm_Trc: clocks = %i\n", clocks);
if (clocks < DTL_TRC_MIN) {
+ // We might want to die here instead or (at least|better) disable this bank.
+ printk(BIOS_NOTICE, "update_dimm_Trc: Can't refresh fast enough, "
+ "want %i clocks, minimum is %i clocks.\n", clocks, DTL_TRC_MIN);
clocks = DTL_TRC_MIN;
}
+ return clocks;
+}
+
+static int update_dimm_Trc(const struct mem_controller *ctrl,
+ const struct mem_param *param,
+ int i, long dimm_mask)
+{
+ int clocks, old_clocks;
+ uint32_t dtl;
+ u32 spd_device = ctrl->channel0[i];
+
+ if (!(dimm_mask & (1 << i)) && (dimm_mask & (1 << (DIMM_SOCKETS + i)))) { /* channelB only? */
+ spd_device = ctrl->channel1[i];
+ }
+
+ clocks = get_dimm_Trc_clocks(spd_device, param);
+ if (clocks == -1)
+ return clocks;
if (clocks > DTL_TRC_MAX) {
return 0;
}
+ printk_raminit("update_dimm_Trc: clocks after adjustment = %i\n", clocks);
dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
if (clocks < TT_MIN) {
clocks = TT_MIN;
}
-
+
if (clocks > TT_MAX) {
- return 0;
+ printk(BIOS_INFO, "warning spd byte : %x = %x > TT_MAX: %x, setting TT_MAX", SPD_TT, value, TT_MAX);
+ clocks = TT_MAX;
}
dtl = pci_read_config32(ctrl->f2, TT_REG);
return 1;
}
-
static int update_dimm_Trcd(const struct mem_controller *ctrl,
const struct mem_param *param, int i, long dimm_mask)
{
value = spd_read_byte(spd_device, SPD_TRAS); //in 1 ns
if (value < 0) return -1;
- print_tx("update_dimm_Tras: 0 value=", value);
+ printk_raminit("update_dimm_Tras: 0 value= %08x\n", value);
value <<= 2; //convert it to in 1/4ns
value *= 10;
- print_tx("update_dimm_Tras: 1 value=", value);
+ printk_raminit("update_dimm_Tras: 1 value= %08x\n", value);
clocks = (value + param->divisor - 1)/param->divisor;
- print_tx("update_dimm_Tras: divisor=", param->divisor);
- print_tx("update_dimm_Tras: clocks=", clocks);
+ printk_raminit("update_dimm_Tras: divisor= %08x\n", param->divisor);
+ printk_raminit("update_dimm_Tras: clocks= %08x\n", clocks);
if (clocks < DTL_TRAS_MIN) {
clocks = DTL_TRAS_MIN;
}
-
if (clocks > DTL_TRAS_MAX) {
return 0;
}
-
dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
if (old_clocks >= clocks) { // someone did it?
return 1;
}
-
dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
return 1;
}
-
static void set_4RankRDimm(const struct mem_controller *ctrl,
const struct mem_param *param, struct mem_info *meminfo)
{
-#if QRANK_DIMM_SUPPRT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
int value;
int i;
+ long dimm_mask = meminfo->dimm_mask;
if (!(meminfo->is_registered)) return;
continue;
}
- if (meminfo->sz.rank == 4) {
+ if (meminfo->sz[i].rank == 4) {
value = 1;
break;
}
#endif
}
-
static uint32_t get_extra_dimm_mask(const struct mem_controller *ctrl,
struct mem_info *meminfo)
{
uint32_t mask_single_rank;
uint32_t mask_page_1k;
int value;
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
int rank;
#endif
value = spd_read_byte(spd_device, SPD_PRI_WIDTH);
- #if QRANK_DIMM_SUPPORT == 1
+ #if CONFIG_QRANK_DIMM_SUPPORT
rank = meminfo->sz[i].rank;
#endif
if (value==4) {
mask_x4 |= (1<<i);
- #if QRANK_DIMM_SUPPORT == 1
+ #if CONFIG_QRANK_DIMM_SUPPORT
if (rank==4) {
mask_x4 |= 1<<(i+2);
}
#endif
} else if (value==16) {
mask_x16 |= (1<<i);
- #if QRANK_DIMM_SUPPORT == 1
+ #if CONFIG_QRANK_DIMM_SUPPORT
if (rank==4) {
mask_x16 |= 1<<(i+2);
}
}
-#if DIMM_SUPPORT == 0x0204
+#if CONFIG_DIMM_SUPPORT == 0x0204
odt = 0x2; /* 150 ohms */
#endif
pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
}
-
static void set_ecc(const struct mem_controller *ctrl,
const struct mem_param *param, struct mem_info *meminfo)
{
if (nbcap & NBCAP_ECC) {
dcl |= DCL_DimmEccEn;
}
- if (read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
+#ifdef CMOS_VSTART_ECC_memory
+ if (read_option(ECC_memory, 1) == 0) {
dcl &= ~DCL_DimmEccEn;
}
+#else // CMOS_VSTART_ECC_memory not defined
+#if !CONFIG_ECC_MEMORY
+ dcl &= ~DCL_DimmEccEn;
+#endif
+#endif
pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
meminfo->is_ecc = 1;
if (!(dcl & DCL_DimmEccEn)) {
meminfo->is_ecc = 0;
+ printk(BIOS_DEBUG, "set_ecc: ECC disabled\n");
return; // already disabled the ECC, so don't need to read SPD any more
}
if (!(meminfo->dimm_mask & (1 << i))) {
if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) { /* channelB only? */
spd_device = ctrl->channel1[i];
- printk_debug("set_ecc spd_device: 0x%x\n", spd_device);
+ printk(BIOS_DEBUG, "set_ecc spd_device: 0x%x\n", spd_device);
} else {
continue;
}
uint32_t reg;
if ((val < TT_MIN) || (val > TT_MAX)) {
- print_err(str);
- die(" Unknown\r\n");
+ printk(BIOS_ERR, "%s", str);
+ die(" Unknown\n");
}
reg = pci_read_config32(ctrl->f2, TT_REG);
set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TWRWR_SHIFT, DTH_TWRWR_MASK,DTH_TWRWR_BASE, DTH_TWRWR_MIN, DTH_TWRWR_MAX, param->Twrwr, "Twrwr");
}
-
static void set_Trdrd(const struct mem_controller *ctrl, const struct mem_param *param)
{
set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TRDRD_SHIFT, DTH_TRDRD_MASK,DTH_TRDRD_BASE, DTH_TRDRD_MIN, DTH_TRDRD_MAX, param->Trdrd, "Trdrd");
}
-
static void set_DcqBypassMax(const struct mem_controller *ctrl, const struct mem_param *param)
{
set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_DcqBypassMax_SHIFT, DCH_DcqBypassMax_MASK,DCH_DcqBypassMax_BASE, DCH_DcqBypassMax_MIN, DCH_DcqBypassMax_MAX, param->DcqByPassMax, "DcqBypassMax"); // value need to be in CMOS
}
-
static void set_Tfaw(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
{
static const uint8_t faw_1k[] = {8, 10, 13, 14};
}
set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_FourActWindow_SHIFT, DCH_FourActWindow_MASK, DCH_FourActWindow_BASE, DCH_FourActWindow_MIN, DCH_FourActWindow_MAX, faw, "FourActWindow");
-
}
-
static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
{
uint32_t dch;
pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
}
-
+#if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
static void set_SlowAccessMode(const struct mem_controller *ctrl)
{
uint32_t dch;
pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
}
-
+#endif
/*
DRAM_OUTPUT_DRV_COMP_CTRL 0, 0x20
{
uint32_t dword;
uint32_t dwordx;
+#if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
unsigned SlowAccessMode = 0;
+#endif
+#if CONFIG_DIMM_SUPPORT==0x0104 /* DDR2 and REG */
long dimm_mask = meminfo->dimm_mask & 0x0f;
-
-#if DIMM_SUPPORT==0x0104 /* DDR2 and REG */
/* for REG DIMM */
dword = 0x00111222;
dwordx = 0x002f0000;
#endif
-#if DIMM_SUPPORT==0x0204 /* DDR2 and SO-DIMM, S1G1 */
+#if CONFIG_DIMM_SUPPORT==0x0204 /* DDR2 and SO-DIMM, S1G1 */
dword = 0x00111222;
dwordx = 0x002F2F00;
}
#endif
-#if DIMM_SUPPORT==0x0004 /* DDR2 and unbuffered */
+#if CONFIG_DIMM_SUPPORT==0x0004 /* DDR2 and unbuffered */
+ long dimm_mask = meminfo->dimm_mask & 0x0f;
/* for UNBUF DIMM */
dword = 0x00111222;
dwordx = 0x002f2f00;
break;
}
- print_raminit("\tdimm_mask = ", meminfo->dimm_mask);
- print_raminit("\tx4_mask = ", meminfo->x4_mask);
- print_raminit("\tx16_mask = ", meminfo->x16_mask);
- print_raminit("\tsingle_rank_mask = ", meminfo->single_rank_mask);
- print_raminit("\tODC = ", dword);
- print_raminit("\tAddr Timing= ", dwordx);
+ printk_raminit("\tdimm_mask = %08x\n", meminfo->dimm_mask);
+ printk_raminit("\tx4_mask = %08x\n", meminfo->x4_mask);
+ printk_raminit("\tx16_mask = %08x\n", meminfo->x16_mask);
+ printk_raminit("\tsingle_rank_mask = %08x\n", meminfo->single_rank_mask);
+ printk_raminit("\tODC = %08x\n", dword);
+ printk_raminit("\tAddr Timing= %08x\n", dwordx);
#endif
-#if (DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
+#if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
if (SlowAccessMode) {
set_SlowAccessMode(ctrl);
}
static void set_RDqsEn(const struct mem_controller *ctrl,
const struct mem_param *param, struct mem_info *meminfo)
{
-#if CPU_SOCKET_TYPE==0x10
+#if CONFIG_CPU_SOCKET_TYPE==0x10
//only need to set for reg and x8
uint32_t dch;
#endif
}
-
static void set_idle_cycle_limit(const struct mem_controller *ctrl,
const struct mem_param *param)
{
pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dcm);
}
-
static void set_RdWrQByp(const struct mem_controller *ctrl,
const struct mem_param *param)
{
set_TT(ctrl, param, DRAM_CTRL_MISC, DCM_RdWrQByp_SHIFT, DCM_RdWrQByp_MASK,0, 0, 3, 2, "RdWrQByp");
}
-
static long spd_set_dram_timing(const struct mem_controller *ctrl,
const struct mem_param *param,
struct mem_info *meminfo)
!(meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) ) {
continue;
}
- print_tx("spd_set_dram_timing dimm socket: ", i);
+ printk_raminit("spd_set_dram_timing dimm socket: %08x\n", i);
/* DRAM Timing Low Register */
- print_t("\ttrc\r\n");
+ printk_raminit("\ttrc\n");
if ((rc = update_dimm_Trc (ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
- print_t("\ttrcd\r\n");
+ printk_raminit("\ttrcd\n");
if ((rc = update_dimm_Trcd(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
- print_t("\ttrrd\r\n");
+ printk_raminit("\ttrrd\n");
if ((rc = update_dimm_Trrd(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
- print_t("\ttras\r\n");
+ printk_raminit("\ttras\n");
if ((rc = update_dimm_Tras(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
- print_t("\ttrp\r\n");
+ printk_raminit("\ttrp\n");
if ((rc = update_dimm_Trp (ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
- print_t("\ttrtp\r\n");
+ printk_raminit("\ttrtp\n");
if ((rc = update_dimm_Trtp(ctrl, param, i, meminfo)) <= 0) goto dimm_err;
- print_t("\ttwr\r\n");
+ printk_raminit("\ttwr\n");
if ((rc = update_dimm_Twr (ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
/* DRAM Timing High Register */
- print_t("\ttref\r\n");
+ printk_raminit("\ttref\n");
if ((rc = update_dimm_Tref(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
- print_t("\ttwtr\r\n");
+ printk_raminit("\ttwtr\n");
if ((rc = update_dimm_Twtr(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
- print_t("\ttrfc\r\n");
+ printk_raminit("\ttrfc\n");
if ((rc = update_dimm_Trfc(ctrl, param, i, meminfo)) <= 0) goto dimm_err;
/* DRAM Config Low */
continue;
dimm_err:
- printk_debug("spd_set_dram_timing dimm_err!\n");
+ printk(BIOS_DEBUG, "spd_set_dram_timing dimm_err!\n");
if (rc < 0) {
return -1;
}
#endif
meminfo = &sysinfo->meminfo[ctrl->node_id];
- print_debug_addr("sdram_set_spd_registers: paramx :", ¶mx);
+ printk(BIOS_DEBUG, "sdram_set_spd_registers: paramx :%p\n", ¶mx);
activate_spd_rom(ctrl);
meminfo->dimm_mask = spd_detect_dimms(ctrl);
- print_tx("sdram_set_spd_registers: dimm_mask=0x%x\n", meminfo->dimm_mask);
+ printk_raminit("sdram_set_spd_registers: dimm_mask=0x%x\n", meminfo->dimm_mask);
if (!(meminfo->dimm_mask & ((1 << 2*DIMM_SOCKETS) - 1)))
{
- print_debug("No memory for this cpu\r\n");
+ printk(BIOS_DEBUG, "No memory for this cpu\n");
return;
}
meminfo->dimm_mask = spd_enable_2channels(ctrl, meminfo);
- print_tx("spd_enable_2channels: dimm_mask=0x%x\n", meminfo->dimm_mask);
+ printk_raminit("spd_enable_2channels: dimm_mask=0x%x\n", meminfo->dimm_mask);
if (meminfo->dimm_mask == -1)
goto hw_spd_err;
meminfo->dimm_mask = spd_set_ram_size(ctrl, meminfo);
- print_tx("spd_set_ram_size: dimm_mask=0x%x\n", meminfo->dimm_mask);
+ printk_raminit("spd_set_ram_size: dimm_mask=0x%x\n", meminfo->dimm_mask);
if (meminfo->dimm_mask == -1)
goto hw_spd_err;
meminfo->dimm_mask = spd_handle_unbuffered_dimms(ctrl, meminfo);
- print_tx("spd_handle_unbuffered_dimms: dimm_mask=0x%x\n", meminfo->dimm_mask);
+ printk_raminit("spd_handle_unbuffered_dimms: dimm_mask=0x%x\n", meminfo->dimm_mask);
if (meminfo->dimm_mask == -1)
goto hw_spd_err;
result = spd_set_memclk(ctrl, meminfo);
param = result.param;
meminfo->dimm_mask = result.dimm_mask;
- print_tx("spd_set_memclk: dimm_mask=0x%x\n", meminfo->dimm_mask);
+ printk_raminit("spd_set_memclk: dimm_mask=0x%x\n", meminfo->dimm_mask);
if (meminfo->dimm_mask == -1)
goto hw_spd_err;
paramx.divisor = get_exact_divisor(param->dch_memclk, paramx.divisor);
meminfo->dimm_mask = spd_set_dram_timing(ctrl, ¶mx, meminfo);
- print_tx("spd_set_dram_timing: dimm_mask=0x%x\n", meminfo->dimm_mask);
+ printk_raminit("spd_set_dram_timing: dimm_mask=0x%x\n", meminfo->dimm_mask);
if (meminfo->dimm_mask == -1)
goto hw_spd_err;
#include "raminit_f_dqs.c"
-#if HW_MEM_HOLE_SIZEK != 0
+#if CONFIG_HW_MEM_HOLE_SIZEK != 0
static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
{
int ii;
uint32_t hole_startk;
int i;
- hole_startk = 4*1024*1024 - HW_MEM_HOLE_SIZEK;
+ hole_startk = 4*1024*1024 - CONFIG_HW_MEM_HOLE_SIZEK;
-#if HW_MEM_HOLE_SIZE_AUTO_INC == 1
+ printk_raminit("Handling memory hole at 0x%08x (default)\n", hole_startk);
+#if CONFIG_HW_MEM_HOLE_SIZE_AUTO_INC == 1
/* We need to double check if the hole_startk is valid, if it is equal
to basek, we need to decrease it some */
uint32_t basek_pri;
}
basek_pri = base_k;
}
+ printk_raminit("Handling memory hole at 0x%08x (adjusted)\n", hole_startk);
#endif
/* find node index that need do set hole */
for (i=0; i < controllers; i++) {
}
#endif
-
+#if CONFIG_HAVE_ACPI_RESUME == 1
+#include "exit_from_self.c"
+#endif
static void sdram_enable(int controllers, const struct mem_controller *ctrl,
struct sys_info *sysinfo)
{
int i;
+#if CONFIG_HAVE_ACPI_RESUME == 1
+ int suspend = acpi_is_wakeup_early();
+#else
+ int suspend = 0;
+#endif
#if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
unsigned cpu_f0_f1[8];
/* FIXME: How about 32 node machine later? */
tsc_t tsc, tsc0[8];
- print_debug_addr("sdram_enable: tsc0[8]: ", &tsc0[0]);
-#endif
+ printk(BIOS_DEBUG, "sdram_enable: tsc0[8]: %p", &tsc0[0]);
uint32_t dword;
+#endif
/* Error if I don't have memory */
if (memory_end_k(ctrl, controllers) == 0) {
- die("No memory\r\n");
+ die("No memory\n");
}
/* Before enabling memory start the memory clocks */
for (i = 0; i < controllers; i++) {
- uint32_t dtl, dch;
+ uint32_t dch;
if (!sysinfo->ctrl_present[ i ])
continue;
dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
}
}
- /* We need to wait a mimmium of 20 MEMCLKS to enable the InitDram */
+ /* We need to wait a minimum of 20 MEMCLKS to enable the InitDram */
memreset(controllers, ctrl);
-#if 0
- print_debug("prepare to InitDram:");
- for (i=0; i<10; i++) {
- print_debug_hex32(i);
- print_debug("\b\b\b\b\b\b\b\b");
+
+ /* lets override the rest of the routine */
+ if (suspend) {
+ printk(BIOS_DEBUG, "Wakeup!\n");
+ exit_from_self(controllers, ctrl, sysinfo);
+ printk(BIOS_DEBUG, "Mem running !\n");
+ return;
}
- print_debug("\r\n");
-#endif
for (i = 0; i < controllers; i++) {
uint32_t dcl, dch;
dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
if (dcl & DCL_DimmEccEn) {
uint32_t mnc;
- print_spew("ECC enabled\r\n");
+ printk(BIOS_SPEW, "ECC enabled\n");
mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
mnc |= MNC_ECC_EN;
if (dcl & DCL_Width128) {
}
#endif
-#if 0
- /* Set the DqsRcvEnTrain bit */
- dword = pci_read_config32(ctrl[i].f2, DRAM_CTRL);
- dword |= DC_DqsRcvEnTrain;
- pci_write_config32(ctrl[i].f2, DRAM_CTRL, dword);
-#endif
-
pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
dcl |= DCL_InitDram;
pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
}
for (i = 0; i < controllers; i++) {
- uint32_t dcl, dch, dcm;
+ uint32_t dcl, dcm;
if (!sysinfo->ctrl_present[ i ])
continue;
/* Skip everything if I don't have any memory on this controller */
if (sysinfo->meminfo[i].dimm_mask==0x00) continue;
- print_debug("Initializing memory: ");
+ printk(BIOS_DEBUG, "Initializing memory: ");
int loops = 0;
do {
dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
loops++;
if ((loops & 1023) == 0) {
- print_debug(".");
+ printk(BIOS_DEBUG, ".");
}
} while(((dcl & DCL_InitDram) != 0) && (loops < TIMEOUT_LOOPS));
if (loops >= TIMEOUT_LOOPS) {
- print_debug(" failed\r\n");
+ printk(BIOS_DEBUG, " failed\n");
continue;
}
if (cpu_f0_f1[i]) {
tsc= rdtsc();
- print_debug_dqs_tsc("\r\nbegin tsc0", i, tsc0[i].hi, tsc0[i].lo, 2);
+ print_debug_dqs_tsc("\nbegin tsc0", i, tsc0[i].hi, tsc0[i].lo, 2);
print_debug_dqs_tsc("end tsc ", i, tsc.hi, tsc.lo, 2);
if (tsc.lo<tsc0[i].lo) {
print_debug_dqs_tsc(" dtsc0", i, tsc0[i].hi, tsc0[i].lo, 2);
}
#endif
- print_debug(" done\r\n");
+ printk(BIOS_DEBUG, " done\n");
}
-#if HW_MEM_HOLE_SIZEK != 0
+#if CONFIG_HW_MEM_HOLE_SIZEK != 0
/* init hw mem hole here */
/* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
set_hw_mem_hole(controllers, ctrl);
}
-#if MEM_TRAIN_SEQ == 0
+#if CONFIG_MEM_TRAIN_SEQ == 0
#if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
dqs_timing(controllers, ctrl, tsc0, sysinfo);
#else
#endif
#else
-#if MEM_TRAIN_SEQ == 2
+#if CONFIG_MEM_TRAIN_SEQ == 2
/* need to enable mtrr, so dqs training could access the test address */
setup_mtrr_dqs(sysinfo->tom_k, sysinfo->tom2_k);
#endif
dqs_timing(i, &ctrl[i], sysinfo, 1);
-#if MEM_TRAIN_SEQ == 1
+#if CONFIG_MEM_TRAIN_SEQ == 1
break; // only train the first node with ram
#endif
}
-#if MEM_TRAIN_SEQ == 2
+#if CONFIG_MEM_TRAIN_SEQ == 2
clear_mtrr_dqs(sysinfo->tom2_k);
#endif
#endif
-#if MEM_TRAIN_SEQ != 1
+#if CONFIG_MEM_TRAIN_SEQ != 1
wait_all_core0_mem_trained(sysinfo);
#endif
}
-
-static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
- const uint16_t *spd_addr)
+void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
+ const uint16_t *spd_addr)
{
int i;
int j;