# error "CONFIG_RAMTOP must be a power of 2"
#endif
-#ifndef QRANK_DIMM_SUPPORT
-#define QRANK_DIMM_SUPPORT 0
-#endif
-
void setup_resource_map(const unsigned int *register_values, int max)
{
int i;
if (nbcap & NBCAP_ECC) {
dcl |= DCL_DimmEccEn;
}
- if (read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
+ if (read_option(ECC_memory, 1) == 0) {
dcl &= ~DCL_DimmEccEn;
}
pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
unsigned long side2;
unsigned long rows;
unsigned long col;
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
unsigned long rank;
#endif
};
sz.side2 = 0;
sz.rows = 0;
sz.col = 0;
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
sz.rank = 0;
#endif
if ((value != 2) && (value != 4 )) {
goto val_err;
}
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
sz.rank = value;
#endif
sz.side2 = 0;
sz.rows = 0;
sz.col = 0;
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
sz.rank = 0;
#endif
- out:
+out:
return sz;
}
/* Set the appropriate DIMM base address register */
pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
if (sz.rank == 4) {
pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
if (base0) {
dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
dch |= DCH_MEMCLK_EN0 << index;
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
if (sz.rank == 4) {
dch |= DCH_MEMCLK_EN0 << (index + 2);
}
map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
map &= ~(0xf << (index * 4));
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
if (sz.rank == 4) {
map &= ~(0xf << ( (index + 2) * 4));
}
if (sz.side1 >= (25 +3)) {
if (is_cpu_pre_d0()) {
map |= (sz.side1 - (25 + 3)) << (index *4);
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
if (sz.rank == 4) {
map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
}
}
else {
map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
if (sz.rank == 4) {
map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
}
{
unsigned long tom_k, base_k;
- if (read_option(CMOS_VSTART_interleave_chip_selects, CMOS_VLEN_interleave_chip_selects, 1) != 0) {
+ if (read_option(interleave_chip_selects, 1) != 0) {
tom_k = interleave_chip_selects(ctrl);
} else {
printk(BIOS_DEBUG, "Interleaving disabled\n");
dcl &= ~DCL_UnBuffDimm;
if (unbuffered) {
if ((has_dualch) && (!is_cpu_pre_d0())) {
- dcl |= DCL_UnBuffDimm; /* set DCL_DualDIMMen too? */
-
- /* set DCL_En2T if you have non-equal DDR mem types! */
-
+ dcl |= DCL_UnBuffDimm;
+#if CONFIG_CPU_AMD_SOCKET_939
if ((cpuid_eax(1) & 0x30) == 0x30) {
/* CS[7:4] is copy of CS[3:0], should be set for 939 socket */
dcl |= DCL_UpperCSMap;
}
+#endif
} else {
dcl |= DCL_UnBuffDimm;
}
17, /* *Logical Banks */
18, /* *Supported CAS Latencies */
21, /* *SDRAM Module Attributes */
- 23, /* *Cycle time at CAS Latnecy (CLX - 0.5) */
- 26, /* *Cycle time at CAS Latnecy (CLX - 1.0) */
+ 23, /* *Cycle time at CAS Latency (CLX - 0.5) */
+ 25, /* *Cycle time at CAS Latency (CLX - 1.0) */
27, /* *tRP Row precharge time */
28, /* *Minimum Row Active to Row Active Delay (tRRD) */
29, /* *tRCD RAS to CAS */
char name[9];
};
-static const struct mem_param *get_mem_param(unsigned min_cycle_time)
+static const struct mem_param *get_mem_param(int freq)
{
static const struct mem_param speed[] = {
- {
- .name = "100Mhz",
+ [NBCAP_MEMCLK_100MHZ] = {
+ .name = "100MHz",
.cycle_time = 0xa0,
.divisor = (10 <<1),
.tRC = 0x46,
.dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
.rdpreamble = { ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0) }
},
- {
- .name = "133Mhz",
+ [NBCAP_MEMCLK_133MHZ] = {
+ .name = "133MHz",
.cycle_time = 0x75,
.divisor = (7<<1)+1,
.tRC = 0x41,
.dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
.rdpreamble = { ((8 << 1) + 0), ((7 << 1) + 0), ((7 << 1) + 1), ((7 << 1) + 0) }
},
- {
- .name = "166Mhz",
+ [NBCAP_MEMCLK_166MHZ] = {
+ .name = "166MHz",
.cycle_time = 0x60,
.divisor = (6<<1),
.tRC = 0x3C,
.dtl_trwt = { { 3, 2, 3 }, { 3, 3, 4 }, { 4, 3, 4 }},
.rdpreamble = { ((7 << 1) + 1), ((6 << 1) + 0), ((6 << 1) + 1), ((6 << 1) + 0) }
},
- {
- .name = "200Mhz",
+ [NBCAP_MEMCLK_200MHZ] = {
+ .name = "200MHz",
.cycle_time = 0x50,
.divisor = (5<<1),
.tRC = 0x37,
.dtl_twtr = 2,
.dtl_trwt = { { 0, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
.rdpreamble = { ((7 << 1) + 0), ((5 << 1) + 0), ((5 << 1) + 1), ((5 << 1) + 1) }
- },
- {
- .cycle_time = 0x00,
- },
+ }
};
const struct mem_param *param;
- for (param = &speed[0]; param->cycle_time ; param++) {
- if (min_cycle_time > (param+1)->cycle_time) {
- break;
- }
- }
- if (!param->cycle_time) {
- die("min_cycle_time to low");
- }
+
+ param = speed + freq;
printk(BIOS_SPEW, "%s\n", param->name);
return param;
}
const struct mem_param *param;
long dimm_mask;
};
+
+static int spd_dimm_loading_socket(const struct mem_controller *ctrl, long dimm_mask, int *freq_1t)
+{
+
+#if CONFIG_CPU_AMD_SOCKET_939
+
+/* + 1 raise so we detect 0 as bad field */
+#define DDR200 (NBCAP_MEMCLK_100MHZ + 1)
+#define DDR333 (NBCAP_MEMCLK_166MHZ + 1)
+#define DDR400 (NBCAP_MEMCLK_200MHZ + 1)
+#define DDR_2T 0x80
+#define DDR_MASK 0x7
+
+#define DDR200_2T (DDR_2T | DDR200)
+#define DDR333_2T (DDR_2T | DDR333)
+#define DDR400_2T (DDR_2T | DDR400)
+
+/*
+ Following table comes directly from BKDG (unbuffered DIMM support)
+ [Y][X] Y = ch0_0, ch1_0, ch0_1, ch1_1 1=present 0=empty
+ X uses same layout but 1 means double rank 0 is single rank/empty
+
+ Following tables come from BKDG the ch{0_0,1_0,0_1,1_1} maps to
+ MEMCS_{1L,1H,2L,2H} in i the PDF. PreE is table 45, and revE table 46.
+*/
+
+ static const unsigned char dimm_loading_config_preE[16][16] = {
+ [0x8] = {[0x0] = DDR400,[0x8] = DDR400},
+ [0x2] = {[0x0] = DDR333,[0x2] = DDR400},
+ [0xa] = {[0x0] = DDR400_2T,[0x2] = DDR400_2T,
+ [0x8] = DDR400_2T,[0xa] = DDR333_2T},
+ [0xc] = {[0x0] = DDR400,[0xc] = DDR400},
+ [0x3] = {[0x0] = DDR333,[0x3] = DDR400},
+ [0xf] = {[0x0] = DDR400_2T,[0x3] = DDR400_2T,
+ [0xc] = DDR400_2T,[0xf] = DDR333_2T},
+ };
+
+ static const unsigned char dimm_loading_config_revE[16][16] = {
+ [0x8] = {[0x0] = DDR400, [0x8] = DDR400},
+ [0x2] = {[0x0] = DDR333, [0x2] = DDR400},
+ [0x4] = {[0x0] = DDR400, [0x4] = DDR400},
+ [0x1] = {[0x0] = DDR333, [0x1] = DDR400},
+ [0xa] = {[0x0] = DDR400_2T, [0x2] = DDR400_2T,
+ [0x8] = DDR400_2T, [0xa] = DDR333_2T},
+ [0x5] = {[0x0] = DDR400_2T, [0x1] = DDR400_2T,
+ [0x4] = DDR400_2T, [0x5] = DDR333_2T},
+ [0xc] = {[0x0] = DDR400, [0xc] = DDR400, [0x4] = DDR400, [0x8] = DDR400},
+ [0x3] = {[0x0] = DDR333, [0x1] = DDR333, [0x2] = DDR333, [0x3] = DDR400},
+ [0xe] = {[0x0] = DDR400_2T, [0x4] = DDR400_2T, [0x2] = DDR400_2T,
+ [0x6] = DDR400_2T, [0x8] = DDR400_2T, [0xc] = DDR400_2T,
+ [0xa] = DDR333_2T, [0xe] = DDR333_2T},
+ [0xb] = {[0x0] = DDR333, [0x1] = DDR400_2T, [0x2] = DDR333_2T,
+ [0x3] = DDR400_2T, [0x8] = DDR333_2T, [0x9] = DDR400_2T,
+ [0xa] = DDR333_2T, [0xb] = DDR333_2T},
+ [0xd] = {[0x0] = DDR400_2T, [0x8] = DDR400_2T, [0x1] = DDR400_2T,
+ [0x9] = DDR333_2T, [0x4] = DDR400_2T, [0xc] = DDR400_2T,
+ [0x5] = DDR333_2T, [0xd] = DDR333_2T},
+ [0x7] = {[0x0] = DDR333, [0x2] = DDR400_2T, [0x1] = DDR333_2T,
+ [0x3] = DDR400_2T, [0x4] = DDR333_2T, [0x6] = DDR400_2T,
+ [0x5] = DDR333_2T, [0x7] = DDR333_2T},
+ [0xf] = {[0x0] = DDR400_2T, [0x1] = DDR400_2T, [0x4] = DDR400_2T,
+ [0x5] = DDR333_2T, [0x2] = DDR400_2T, [0x3] = DDR400_2T,
+ [0x6] = DDR400_2T, [0x7] = DDR333_2T, [0x8] = DDR400_2T,
+ [0x9] = DDR400_2T, [0xc] = DDR400_2T, [0xd] = DDR333_2T,
+ [0xa] = DDR333_2T, [0xb] = DDR333_2T, [0xe] = DDR333_2T,
+ [0xf] = DDR333_2T},
+ };
+ /*The dpos matches channel positions defined in BKDG and above arrays
+ The rpos is bitmask of dual rank dimms in same order as dpos */
+ unsigned int dloading = 0, i, rpos = 0, dpos = 0;
+ const unsigned char (*dimm_loading_config)[16] = dimm_loading_config_revE;
+ int rank;
+ uint32_t dcl;
+
+ if (is_cpu_pre_e0()) {
+ dimm_loading_config = dimm_loading_config_preE;
+ }
+
+ /* only DIMMS two per channel */
+ for (i = 0; i < 2; i++) {
+ if ((dimm_mask & (1 << i))) {
+ /* read rank channel 0 */
+ rank = spd_read_byte(ctrl->channel0[i], 5);
+ if (rank < 0) goto hw_error;
+ rpos |= (rank == 2) ? (1 << (3 - (i * 2))) : 0;
+ dpos |= (1 << (3 - (i * 2)));
+ }
+
+ if ((dimm_mask & (1 << (i+DIMM_SOCKETS)))) {
+ /* read rank channel 1*/
+ rank = spd_read_byte(ctrl->channel1[i], 5);
+ if (rank < 0) goto hw_error;
+ rpos |= (rank == 2) ? (1 << (2 - (i * 2))) : 0;
+ dpos |= (1 << (2 - (i * 2)));
+ }
+ }
+ /* now the lookup, decode the max speed DDR400_2T etc */
+ dloading = dimm_loading_config[dpos][rpos] & DDR_MASK;
+#if 0
+ printk(BIOS_DEBUG, "XXX %x %x dload %x 2T %x\n", dpos,rpos, dloading, dimm_loading_config[dpos][rpos] & DDR_2T);
+#endif
+hw_error:
+ if (dloading != 0) {
+ /* we have valid combination check the restrictions */
+ dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
+ dcl |= ((dimm_loading_config[dpos][rpos] & DDR_2T) || CONFIG_K8_FORCE_2T_DRAM_TIMING) ? (DCL_En2T) : 0;
+ /* Set DuallDimm is second channel is completely empty (revD+) */
+ if (((cpuid_eax(1) & 0xfff0f) >= 0x10f00) && ((dpos & 0x5) == 0)) {
+ printk(BIOS_DEBUG, "Setting DualDIMMen\n");
+ dcl |= DCL_DualDIMMen;
+ }
+ pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
+ return dloading - 1;
+ } else {
+ /* if we don't find it we se it to DDR400 */
+ printk(BIOS_WARNING, "Detected strange DIMM configuration, may not work! (or bug)\n");
+ return NBCAP_MEMCLK_200MHZ;
+ }
+
+#elif CONFIG_CPU_AMD_SOCKET_754
+
+#define CFGIDX(DIMM1,DIMM2,DIMM3) ((DIMM3)*9+(DIMM2)*3+(DIMM1))
+
+#define EMPTY 0
+#define X8S_X16 1
+#define X8D 2
+
+#define DDR200 NBCAP_MEMCLK_100MHZ
+#define DDR333 NBCAP_MEMCLK_166MHZ
+#define DDR400 NBCAP_MEMCLK_200MHZ
+
+ /* this is table 42 from the BKDG, ignoring footnote 4,
+ * with the EMPTY, EMPTY, EMPTY row added */
+ static const unsigned char cfgtable[][2] = {
+ [CFGIDX(EMPTY, EMPTY, EMPTY )] = { DDR400, DDR400 },
+ [CFGIDX(X8S_X16, EMPTY, EMPTY )] = { DDR400, DDR400 },
+ [CFGIDX(EMPTY, X8S_X16, EMPTY )] = { DDR400, DDR400 },
+ [CFGIDX(EMPTY, EMPTY, X8S_X16 )] = { DDR400, DDR400 },
+ [CFGIDX(X8D, EMPTY, EMPTY )] = { DDR400, DDR400 },
+ [CFGIDX(EMPTY, X8D, EMPTY )] = { DDR400, DDR400 },
+ [CFGIDX(EMPTY, EMPTY, X8D )] = { DDR400, DDR400 },
+ [CFGIDX(X8S_X16, X8S_X16, EMPTY )] = { DDR400, DDR400 },
+ [CFGIDX(X8S_X16, X8D, EMPTY )] = { DDR400, DDR400 },
+ [CFGIDX(X8S_X16, EMPTY, X8S_X16 )] = { DDR400, DDR400 },
+ [CFGIDX(X8S_X16, EMPTY, X8D )] = { DDR400, DDR400 },
+ [CFGIDX(X8D, X8S_X16, EMPTY )] = { DDR400, DDR400 },
+ [CFGIDX(X8D, X8D, EMPTY )] = { DDR333, DDR333 },
+ [CFGIDX(X8D, EMPTY, X8S_X16 )] = { DDR400, DDR400 },
+ [CFGIDX(X8D, EMPTY, X8D )] = { DDR333, DDR333 },
+ [CFGIDX(EMPTY, X8S_X16, X8S_X16 )] = { DDR333, DDR400 },
+ [CFGIDX(EMPTY, X8S_X16, X8D )] = { DDR200, DDR400 },
+ [CFGIDX(EMPTY, X8D, X8S_X16 )] = { DDR200, DDR400 },
+ [CFGIDX(EMPTY, X8D, X8D )] = { DDR200, DDR333 },
+ [CFGIDX(X8S_X16, X8S_X16, X8S_X16 )] = { DDR333, DDR400 },
+ [CFGIDX(X8S_X16, X8S_X16, X8D )] = { DDR200, DDR333 },
+ [CFGIDX(X8S_X16, X8D, X8S_X16 )] = { DDR200, DDR333 },
+ [CFGIDX(X8S_X16, X8D, X8D )] = { DDR200, DDR333 },
+ [CFGIDX(X8D, X8S_X16, X8S_X16 )] = { DDR333, DDR333 },
+ [CFGIDX(X8D, X8S_X16, X8D )] = { DDR200, DDR333 },
+ [CFGIDX(X8D, X8D, X8S_X16 )] = { DDR200, DDR333 },
+ [CFGIDX(X8D, X8D, X8D )] = { DDR200, DDR333 }
+ };
+
+ int i, rank, width, dimmtypes[3];
+ const unsigned char *cfg;
+
+ for (i = 0; i < 3; i++) {
+ if (dimm_mask & (1 << i)) {
+ rank = spd_read_byte(ctrl->channel0[i], 5);
+ width = spd_read_byte(ctrl->channel0[i], 13);
+ if (rank < 0 || width < 0) die("failed to read SPD");
+ width &= 0x7f;
+ /* this is my guess as to how the criteria in the table
+ * are to be understood:
+ */
+ dimmtypes[i] = width >= (rank == 1 ? 8 : 16) ? X8S_X16 : X8D;
+ } else {
+ dimmtypes[i] = EMPTY;
+ }
+ }
+ cfg = cfgtable[CFGIDX(dimmtypes[0], dimmtypes[1], dimmtypes[2])];
+ *freq_1t = cfg[0];
+ return is_cpu_c0() ? cfg[0] : cfg[1];
+
+#else /* CONFIG_CPU_AMD_SOCKET_* */
+
+/* well, there are socket 940 boards supported which obviously fail to
+ * compile with this */
+// #error load dependent memory clock limiting is not implemented for this socket
+
+ /* see BKDG 4.1.3--if you just want to test a setup that doesn't
+ * require limiting, you may use the following code */
+
+ *freq_1t = NBCAP_MEMCLK_200MHZ;
+ return NBCAP_MEMCLK_200MHZ;
+
+#endif /* CONFIG_CPU_AMD_SOCKET_* */
+
+}
+
static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask)
{
- /* Compute the minimum cycle time for these dimms */
struct spd_set_memclk_result result;
- unsigned min_cycle_time, min_latency, bios_cycle_time;
- int i;
+ unsigned char cl_at_freq[NBCAP_MEMCLK_MASK + 1];
+ int dimm, freq, max_freq_bios, max_freq_dloading, max_freq_1t;
uint32_t value;
- static const uint8_t latency_indicies[] = { 26, 23, 9 };
- static const unsigned char min_cycle_times[] = {
+ static const uint8_t spd_min_cycle_time_indices[] = { 9, 23, 25 };
+ static const unsigned char cycle_time_at_freq[] = {
[NBCAP_MEMCLK_200MHZ] = 0x50, /* 5ns */
[NBCAP_MEMCLK_166MHZ] = 0x60, /* 6ns */
[NBCAP_MEMCLK_133MHZ] = 0x75, /* 7.5ns */
[NBCAP_MEMCLK_100MHZ] = 0xa0, /* 10ns */
};
- value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
-
- min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
- bios_cycle_time = min_cycle_times[
- read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
- if (bios_cycle_time > min_cycle_time) {
- min_cycle_time = bios_cycle_time;
- }
- min_latency = 2;
-
- /* Compute the least latency with the fastest clock supported
- * by both the memory controller and the dimms.
+ /* BEWARE that the constants for frequencies order in reverse of what
+ * would be intuitive. 200 MHz has the lowest constant, 100 MHz the
+ * highest. Thus, all comparisons and traversal directions having to
+ * do with frequencies are/have to be the opposite of what would be
+ * intuitive.
*/
- for (i = 0; i < DIMM_SOCKETS; i++) {
- int new_cycle_time, new_latency;
- int index;
- int latencies;
- int latency;
- if (!(dimm_mask & (1 << i))) {
+ /* the CLs supported by the controller: */
+ memset(cl_at_freq, 0x1c, sizeof(cl_at_freq));
+ memset(cl_at_freq, 0x00,
+ (pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP) >>
+ NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK);
+ max_freq_bios = read_option(max_mem_clock, 0);
+ if (max_freq_bios <= NBCAP_MEMCLK_100MHZ)
+ memset(cl_at_freq, 0x00, max_freq_bios);
+ for (dimm = 0; dimm < DIMM_SOCKETS; dimm++) {
+ int x,i,spd_cls,cl,spd_min_cycle_time;
+ unsigned char cl_at_freq_mask[sizeof(cl_at_freq)];
+
+ if (!(dimm_mask & (1 << dimm)))
continue;
- }
-
- /* First find the supported CAS latencies
- * Byte 18 for DDR SDRAM is interpreted:
+ /* Byte 18 for DDR SDRAM is interpreted:
* bit 0 == CAS Latency = 1.0
* bit 1 == CAS Latency = 1.5
* bit 2 == CAS Latency = 2.0
* bit 3 == CAS Latency = 2.5
* bit 4 == CAS Latency = 3.0
* bit 5 == CAS Latency = 3.5
- * bit 6 == TBD
+ * bit 6 == CAS Latency = 4.0
* bit 7 == TBD
*/
- new_cycle_time = 0xa0;
- new_latency = 5;
-
- latencies = spd_read_byte(ctrl->channel0[i], 18);
- if (latencies <= 0) continue;
-
- /* Compute the lowest cas latency supported */
- latency = log2(latencies) -2;
-
- /* Loop through and find a fast clock with a low latency */
- for (index = 0; index < 3; index++, latency++) {
- int spd_value;
- if ((latency < 2) || (latency > 4) ||
- (!(latencies & (1 << latency)))) {
+ spd_cls = spd_read_byte(ctrl->channel0[dimm], 18);
+ if (spd_cls <= 0)
+ goto hw_error;
+ memset(cl_at_freq_mask, 0x00, sizeof(cl_at_freq_mask));
+ for (cl = 1 << log2(spd_cls), i = 0; i < 3; cl >>= 1, i++) {
+ if (!(spd_cls & cl))
continue;
- }
- spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
- if (spd_value < 0) {
+ spd_min_cycle_time = spd_read_byte(ctrl->channel0[dimm],
+ spd_min_cycle_time_indices[i]);
+ if (spd_min_cycle_time < 0)
goto hw_error;
- }
-
- /* Only increase the latency if we decreas the clock */
- if ((spd_value >= min_cycle_time) && (spd_value < new_cycle_time)) {
- new_cycle_time = spd_value;
- new_latency = latency;
- }
- }
- if (new_latency > 4){
- continue;
- }
- /* Does min_latency need to be increased? */
- if (new_cycle_time > min_cycle_time) {
- min_cycle_time = new_cycle_time;
- }
- /* Does min_cycle_time need to be increased? */
- if (new_latency > min_latency) {
- min_latency = new_latency;
+ if ((!spd_min_cycle_time) || (spd_min_cycle_time & 0x0f) > 9)
+ continue;
+ for (x = 0; x < sizeof(cl_at_freq_mask); x++)
+ if (cycle_time_at_freq[x] >= spd_min_cycle_time)
+ cl_at_freq_mask[x] |= cl;
}
+ for (x = 0; x < sizeof(cl_at_freq_mask); x++)
+ cl_at_freq[x] &= cl_at_freq_mask[x];
}
- /* Make a second pass through the dimms and disable
- * any that cannot support the selected memclk and cas latency.
- */
-
- for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
- int latencies;
- int latency;
- int index;
- int spd_value;
- if (!(dimm_mask & (1 << i))) {
- continue;
- }
-
- latencies = spd_read_byte(ctrl->channel0[i], 18);
- if (latencies < 0) goto hw_error;
- if (latencies == 0) {
- goto dimm_err;
- }
- /* Compute the lowest cas latency supported */
- latency = log2(latencies) -2;
+ freq = NBCAP_MEMCLK_200MHZ;
+ while (freq < sizeof(cl_at_freq) && !cl_at_freq[freq])
+ freq++;
- /* Walk through searching for the selected latency */
- for (index = 0; index < 3; index++, latency++) {
- if (!(latencies & (1 << latency))) {
- continue;
- }
- if (latency == min_latency)
- break;
- }
- /* If I can't find the latency or my index is bad error */
- if ((latency != min_latency) || (index >= 3)) {
- goto dimm_err;
- }
+ max_freq_dloading = spd_dimm_loading_socket(ctrl, dimm_mask, &max_freq_1t);
+ if (max_freq_dloading > freq) {
+ printk(BIOS_WARNING, "Memory speed reduced due to signal loading conditions\n");
+ freq = max_freq_dloading;
+ while (freq < sizeof(cl_at_freq) && !cl_at_freq[freq])
+ freq++;
+ }
- /* Read the min_cycle_time for this latency */
- spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
- if (spd_value < 0) goto hw_error;
+ /* if the next lower frequency gives a CL at least one whole cycle
+ * shorter, select that (see end of BKDG 4.1.1.1) */
+ if (freq < sizeof(cl_at_freq)-1 && cl_at_freq[freq+1] &&
+ log2f(cl_at_freq[freq]) - log2f(cl_at_freq[freq+1]) >= 2)
+ freq++;
- /* All is good if the selected clock speed
- * is what I need or slower.
- */
- if (spd_value <= min_cycle_time) {
- continue;
- }
- /* Otherwise I have an error, disable the dimm */
- dimm_err:
- dimm_mask = disable_dimm(ctrl, i, dimm_mask);
- }
-#if 0
-//down speed for full load 4 rank support
-#if QRANK_DIMM_SUPPORT
- if (dimm_mask == (3|(3<<DIMM_SOCKETS)) ) {
- int ranks = 4;
- for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
- int val;
- if (!(dimm_mask & (1 << i))) {
- continue;
- }
- val = spd_read_byte(ctrl->channel0[i], 5);
- if (val!=ranks) {
- ranks = val;
- break;
- }
- }
- if (ranks==4) {
- if (min_cycle_time <= 0x50 ) {
- min_cycle_time = 0x60;
- }
- }
+ if (freq == sizeof(cl_at_freq))
+ goto hw_error;
+#if CONFIG_CPU_AMD_SOCKET_754
+ if (freq < max_freq_1t || CONFIG_K8_FORCE_2T_DRAM_TIMING) {
+ pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW,
+ pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW) | DCL_En2T);
}
#endif
-#endif
- /* Now that I know the minimum cycle time lookup the memory parameters */
- result.param = get_mem_param(min_cycle_time);
+
+ result.param = get_mem_param(freq);
/* Update DRAM Config High with our selected memory speed */
value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
/* Update DRAM Timing Low with our selected cas latency */
value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
- value |= latencies[min_latency - 2] << DTL_TCL_SHIFT;
+ value |= latencies[log2f(cl_at_freq[freq]) - 2] << DTL_TCL_SHIFT;
pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
result.dimm_mask = dimm_mask;
{
uint32_t dcl;
int value;
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
int rank;
#endif
int dimm;
return -1;
}
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
rank = spd_read_byte(ctrl->channel0[i], 5); /* number of physical banks */
if (rank < 0) {
return -1;
#endif
dimm = 1<<(DCL_x4DIMM_SHIFT+i);
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
if (rank==4) {
dimm |= 1<<(DCL_x4DIMM_SHIFT+i+2);
}
#endif
{
int i;
+ u32 whatWait = 0;
+#if CONFIG_HAVE_ACPI_RESUME == 1
+ int suspend = acpi_is_wakeup_early();
+#else
+ int suspend = 0;
+#endif
/* Error if I don't have memory */
if (memory_end_k(ctrl, controllers) == 0) {
}
pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
}
- dcl |= DCL_DisDqsHys;
- pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
+
+ if (!suspend) {
+ dcl |= DCL_DisDqsHys;
+ pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
+ }
dcl &= ~DCL_DisDqsHys;
dcl &= ~DCL_DLL_Disable;
dcl &= ~DCL_D_DRV;
dcl &= ~DCL_QFC_EN;
- dcl |= DCL_DramInit;
+
+ if (suspend) {
+ enable_lapic();
+ init_timer();
+ dcl |= (DCL_ESR | DCL_SRS);
+ /* Handle errata 85 Insufficient Delay Between MEMCLK Startup
+ and CKE Assertion During Resume From S3 */
+ udelay(10); /* for unregistered */
+ if (is_registered(&ctrl[i])) {
+ udelay(100); /* 110us for registered (we wait 10us already) */
+ }
+ whatWait = DCL_ESR;
+ } else {
+ dcl |= DCL_DramInit;
+ whatWait = DCL_DramInit;
+ }
pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
}
if ((loops & 1023) == 0) {
printk(BIOS_DEBUG, ".");
}
- } while(((dcl & DCL_DramInit) != 0) && (loops < TIMEOUT_LOOPS));
+ } while(((dcl & whatWait) != 0) && (loops < TIMEOUT_LOOPS));
if (loops >= TIMEOUT_LOOPS) {
printk(BIOS_DEBUG, " failed\n");
continue;
if (!is_cpu_pre_c0()) {
/* Wait until it is safe to touch memory */
+#if 0
+ /* the registers are marked read-only but code zeros them */
dcl &= ~(DCL_MemClrStatus | DCL_DramEnable);
pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
+#endif
do {
dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
- } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
+ } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) ||
+ ((dcl & DCL_SRS)));
}
printk(BIOS_DEBUG, " done\n");