Move C labels to start-of-line
[coreboot.git] / src / northbridge / amd / amdk8 / raminit.c
index 7ad1b8004c2c7b2b423012e34ad6843875518a78..9cb7c60004b7d1eddefab539af5c68b695c8ec5b 100644 (file)
 #include <reset.h>
 #include "raminit.h"
 #include "amdk8.h"
+#if CONFIG_HAVE_OPTION_TABLE
+#include "option_table.h"
+#endif
 
 #if (CONFIG_RAMTOP & (CONFIG_RAMTOP -1)) != 0
 # error "CONFIG_RAMTOP must be a power of 2"
 #endif
 
-#ifndef QRANK_DIMM_SUPPORT
-#define QRANK_DIMM_SUPPORT 0
-#endif
-
-static void setup_resource_map(const unsigned int *register_values, int max)
+void setup_resource_map(const unsigned int *register_values, int max)
 {
        int i;
 //     printk(BIOS_DEBUG, "setting up resource map....");
@@ -42,7 +41,7 @@ static int controller_present(const struct mem_controller *ctrl)
        return pci_read_config32(ctrl->f0, 0) == 0x11001022;
 }
 
-#if RAMINIT_SYSINFO==1
+#if CONFIG_RAMINIT_SYSINFO
 static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
 #else
 static void sdram_set_registers(const struct mem_controller *ctrl)
@@ -549,8 +548,7 @@ static void hw_enable_ecc(const struct mem_controller *ctrl)
        if (nbcap & NBCAP_ECC) {
                dcl |= DCL_DimmEccEn;
        }
-       if (CONFIG_HAVE_OPTION_TABLE &&
-           read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
+       if (read_option(ECC_memory, 1) == 0) {
                dcl &= ~DCL_DimmEccEn;
        }
        pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
@@ -593,7 +591,7 @@ struct dimm_size {
        unsigned long side2;
        unsigned long rows;
        unsigned long col;
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
        unsigned long rank;
 #endif
 };
@@ -607,7 +605,7 @@ static struct dimm_size spd_get_dimm_size(unsigned device)
        sz.side2 = 0;
        sz.rows = 0;
        sz.col = 0;
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
        sz.rank = 0;
 #endif
 
@@ -651,7 +649,7 @@ static struct dimm_size spd_get_dimm_size(unsigned device)
        if ((value != 2) && (value != 4 )) {
                goto val_err;
        }
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
        sz.rank = value;
 #endif
 
@@ -680,10 +678,10 @@ hw_err:
        sz.side2 = 0;
        sz.rows = 0;
        sz.col = 0;
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
        sz.rank = 0;
 #endif
- out:
+out:
        return sz;
 }
 
@@ -728,7 +726,7 @@ static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz
        /* Set the appropriate DIMM base address register */
        pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
        pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
        if (sz.rank == 4) {
                pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
                pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
@@ -739,7 +737,7 @@ static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz
        if (base0) {
                dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
                dch |= DCH_MEMCLK_EN0 << index;
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
                if (sz.rank == 4) {
                        dch |= DCH_MEMCLK_EN0 << (index + 2);
                }
@@ -761,7 +759,7 @@ static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz,
 
        map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
        map &= ~(0xf << (index * 4));
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
        if (sz.rank == 4) {
                map &= ~(0xf << ( (index + 2) * 4));
        }
@@ -772,7 +770,7 @@ static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz,
        if (sz.side1 >= (25 +3)) {
                if (is_cpu_pre_d0()) {
                        map |= (sz.side1 - (25 + 3)) << (index *4);
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
                        if (sz.rank == 4) {
                                map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
                        }
@@ -780,7 +778,7 @@ static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz,
                }
                else {
                        map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
                        if (sz.rank == 4) {
                                map |=  cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
                        }
@@ -1102,8 +1100,7 @@ static void order_dimms(const struct mem_controller *ctrl)
 {
        unsigned long tom_k, base_k;
 
-       if ((!CONFIG_HAVE_OPTION_TABLE) ||
-           read_option(CMOS_VSTART_interleave_chip_selects, CMOS_VLEN_interleave_chip_selects, 1) != 0) {
+       if (read_option(interleave_chip_selects, 1) != 0) {
                tom_k = interleave_chip_selects(ctrl);
        } else {
                printk(BIOS_DEBUG, "Interleaving disabled\n");
@@ -1167,14 +1164,13 @@ static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl,
        dcl &= ~DCL_UnBuffDimm;
        if (unbuffered) {
                if ((has_dualch) && (!is_cpu_pre_d0())) {
-                       dcl |= DCL_UnBuffDimm; /* set DCL_DualDIMMen too? */
-
-                       /* set DCL_En2T if you have non-equal DDR mem types! */
-
+                       dcl |= DCL_UnBuffDimm;
+#if CONFIG_CPU_AMD_SOCKET_939
                        if ((cpuid_eax(1) & 0x30) == 0x30) {
                                /* CS[7:4] is copy of CS[3:0], should be set for 939 socket */
                                dcl |= DCL_UpperCSMap;
                        }
+#endif
                } else {
                        dcl |= DCL_UnBuffDimm;
                }
@@ -1234,8 +1230,8 @@ static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_ma
                17,     /* *Logical Banks */
                18,     /* *Supported CAS Latencies */
                21,     /* *SDRAM Module Attributes */
-               23,     /* *Cycle time at CAS Latnecy (CLX - 0.5) */
-               26,     /* *Cycle time at CAS Latnecy (CLX - 1.0) */
+               23,     /* *Cycle time at CAS Latency (CLX - 0.5) */
+               25,     /* *Cycle time at CAS Latency (CLX - 1.0) */
                27,     /* *tRP Row precharge time */
                28,     /* *Minimum Row Active to Row Active Delay (tRRD) */
                29,     /* *tRCD RAS to CAS */
@@ -1305,11 +1301,11 @@ struct mem_param {
        char name[9];
 };
 
-static const struct mem_param *get_mem_param(unsigned min_cycle_time)
+static const struct mem_param *get_mem_param(int freq)
 {
        static const struct mem_param speed[] = {
-               {
-                       .name       = "100Mhz",
+               [NBCAP_MEMCLK_100MHZ] = {
+                       .name       = "100MHz",
                        .cycle_time = 0xa0,
                        .divisor    = (10 <<1),
                        .tRC        = 0x46,
@@ -1322,8 +1318,8 @@ static const struct mem_param *get_mem_param(unsigned min_cycle_time)
                        .dtl_trwt   = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
                        .rdpreamble = { ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0) }
                },
-               {
-                       .name       = "133Mhz",
+               [NBCAP_MEMCLK_133MHZ] = {
+                       .name       = "133MHz",
                        .cycle_time = 0x75,
                        .divisor    = (7<<1)+1,
                        .tRC        = 0x41,
@@ -1336,8 +1332,8 @@ static const struct mem_param *get_mem_param(unsigned min_cycle_time)
                        .dtl_trwt   = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
                        .rdpreamble = { ((8 << 1) + 0), ((7 << 1) + 0), ((7 << 1) + 1), ((7 << 1) + 0) }
                },
-               {
-                       .name       = "166Mhz",
+               [NBCAP_MEMCLK_166MHZ] = {
+                       .name       = "166MHz",
                        .cycle_time = 0x60,
                        .divisor    = (6<<1),
                        .tRC        = 0x3C,
@@ -1350,8 +1346,8 @@ static const struct mem_param *get_mem_param(unsigned min_cycle_time)
                        .dtl_trwt   = { { 3, 2, 3 }, { 3, 3, 4 }, { 4, 3, 4 }},
                        .rdpreamble = { ((7 << 1) + 1), ((6 << 1) + 0), ((6 << 1) + 1), ((6 << 1) + 0) }
                },
-               {
-                       .name       = "200Mhz",
+               [NBCAP_MEMCLK_200MHZ] = {
+                       .name       = "200MHz",
                        .cycle_time = 0x50,
                        .divisor    = (5<<1),
                        .tRC        = 0x37,
@@ -1363,20 +1359,11 @@ static const struct mem_param *get_mem_param(unsigned min_cycle_time)
                        .dtl_twtr   = 2,
                        .dtl_trwt   = { { 0, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
                        .rdpreamble = { ((7 << 1) + 0), ((5 << 1) + 0), ((5 << 1) + 1), ((5 << 1) + 1) }
-               },
-               {
-                       .cycle_time = 0x00,
-               },
+               }
        };
        const struct mem_param *param;
-       for (param = &speed[0]; param->cycle_time ; param++) {
-               if (min_cycle_time > (param+1)->cycle_time) {
-                       break;
-               }
-       }
-       if (!param->cycle_time) {
-               die("min_cycle_time to low");
-       }
+
+       param = speed + freq;
        printk(BIOS_SPEW, "%s\n", param->name);
        return param;
 }
@@ -1385,171 +1372,302 @@ struct spd_set_memclk_result {
        const struct mem_param *param;
        long dimm_mask;
 };
+
+static int spd_dimm_loading_socket(const struct mem_controller *ctrl, long dimm_mask, int *freq_1t)
+{
+
+#if CONFIG_CPU_AMD_SOCKET_939
+
+/* + 1 raise so we detect 0 as bad field */
+#define DDR200 (NBCAP_MEMCLK_100MHZ + 1)
+#define DDR333 (NBCAP_MEMCLK_166MHZ + 1)
+#define DDR400 (NBCAP_MEMCLK_200MHZ + 1)
+#define DDR_2T 0x80
+#define DDR_MASK 0x7
+
+#define DDR200_2T (DDR_2T | DDR200)
+#define DDR333_2T (DDR_2T | DDR333)
+#define DDR400_2T (DDR_2T | DDR400)
+
+/*
+       Following table comes directly from BKDG (unbuffered DIMM support)
+       [Y][X] Y = ch0_0, ch1_0, ch0_1, ch1_1 1=present 0=empty
+         X uses same layout but 1 means double rank 0 is single rank/empty
+
+       Following tables come from BKDG the ch{0_0,1_0,0_1,1_1} maps to
+       MEMCS_{1L,1H,2L,2H} in i the PDF. PreE is table 45, and revE table 46.
+*/
+
+       static const unsigned char dimm_loading_config_preE[16][16] = {
+               [0x8] = {[0x0] = DDR400,[0x8] = DDR400},
+               [0x2] = {[0x0] = DDR333,[0x2] = DDR400},
+               [0xa] = {[0x0] = DDR400_2T,[0x2] = DDR400_2T,
+                        [0x8] = DDR400_2T,[0xa] = DDR333_2T},
+               [0xc] = {[0x0] = DDR400,[0xc] = DDR400},
+               [0x3] = {[0x0] = DDR333,[0x3] = DDR400},
+               [0xf] = {[0x0] = DDR400_2T,[0x3] = DDR400_2T,
+                        [0xc] = DDR400_2T,[0xf] = DDR333_2T},
+       };
+
+       static const unsigned char dimm_loading_config_revE[16][16] = {
+               [0x8] = {[0x0] = DDR400, [0x8] = DDR400},
+               [0x2] = {[0x0] = DDR333, [0x2] = DDR400},
+               [0x4] = {[0x0] = DDR400, [0x4] = DDR400},
+               [0x1] = {[0x0] = DDR333, [0x1] = DDR400},
+               [0xa] = {[0x0] = DDR400_2T, [0x2] = DDR400_2T,
+                        [0x8] = DDR400_2T, [0xa] = DDR333_2T},
+               [0x5] = {[0x0] = DDR400_2T, [0x1] = DDR400_2T,
+                        [0x4] = DDR400_2T, [0x5] = DDR333_2T},
+               [0xc] = {[0x0] = DDR400, [0xc] = DDR400, [0x4] = DDR400, [0x8] = DDR400},
+               [0x3] = {[0x0] = DDR333, [0x1] = DDR333, [0x2] = DDR333, [0x3] = DDR400},
+               [0xe] = {[0x0] = DDR400_2T, [0x4] = DDR400_2T, [0x2] = DDR400_2T,
+                        [0x6] = DDR400_2T, [0x8] = DDR400_2T, [0xc] = DDR400_2T,
+                        [0xa] = DDR333_2T, [0xe] = DDR333_2T},
+               [0xb] = {[0x0] = DDR333, [0x1] = DDR400_2T, [0x2] = DDR333_2T,
+                        [0x3] = DDR400_2T, [0x8] = DDR333_2T, [0x9] = DDR400_2T,
+                        [0xa] = DDR333_2T, [0xb] = DDR333_2T},
+               [0xd] = {[0x0] = DDR400_2T, [0x8] = DDR400_2T, [0x1] = DDR400_2T,
+                        [0x9] = DDR333_2T, [0x4] = DDR400_2T, [0xc] = DDR400_2T,
+                        [0x5] = DDR333_2T, [0xd] = DDR333_2T},
+               [0x7] = {[0x0] = DDR333,    [0x2] = DDR400_2T, [0x1] = DDR333_2T,
+                        [0x3] = DDR400_2T, [0x4] = DDR333_2T, [0x6] = DDR400_2T,
+                        [0x5] = DDR333_2T, [0x7] = DDR333_2T},
+               [0xf] = {[0x0] = DDR400_2T, [0x1] = DDR400_2T, [0x4] = DDR400_2T,
+                        [0x5] = DDR333_2T, [0x2] = DDR400_2T, [0x3] = DDR400_2T,
+                        [0x6] = DDR400_2T, [0x7] = DDR333_2T, [0x8] = DDR400_2T,
+                        [0x9] = DDR400_2T, [0xc] = DDR400_2T, [0xd] = DDR333_2T,
+                        [0xa] = DDR333_2T, [0xb] = DDR333_2T, [0xe] = DDR333_2T,
+                        [0xf] = DDR333_2T},
+       };
+       /*The dpos matches channel positions defined in BKDG and above arrays
+         The rpos is bitmask of dual rank dimms in same order as dpos */
+       unsigned int dloading = 0, i, rpos = 0, dpos = 0;
+       const unsigned char (*dimm_loading_config)[16] = dimm_loading_config_revE;
+       int rank;
+       uint32_t dcl;
+
+       if (is_cpu_pre_e0()) {
+               dimm_loading_config = dimm_loading_config_preE;
+       }
+
+       /* only DIMMS two per channel */
+       for (i = 0; i < 2; i++) {
+               if ((dimm_mask & (1 << i))) {
+                       /* read rank channel 0 */
+                       rank = spd_read_byte(ctrl->channel0[i], 5);
+                       if (rank < 0) goto hw_error;
+                       rpos |= (rank == 2) ? (1 << (3 - (i * 2))) : 0;
+                       dpos |= (1 << (3 - (i * 2)));
+               }
+
+               if ((dimm_mask & (1 << (i+DIMM_SOCKETS)))) {
+                       /* read rank channel 1*/
+                       rank = spd_read_byte(ctrl->channel1[i], 5);
+                       if (rank < 0) goto hw_error;
+                       rpos |= (rank == 2) ? (1 << (2 - (i * 2))) : 0;
+                       dpos |= (1 << (2 - (i * 2)));
+               }
+       }
+       /* now the lookup, decode the max speed DDR400_2T etc */
+       dloading = dimm_loading_config[dpos][rpos] & DDR_MASK;
+#if 0
+       printk(BIOS_DEBUG, "XXX %x %x dload %x 2T %x\n", dpos,rpos, dloading, dimm_loading_config[dpos][rpos] & DDR_2T);
+#endif
+hw_error:
+       if (dloading != 0) {
+               /* we have valid combination check the restrictions */
+               dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
+               dcl |= ((dimm_loading_config[dpos][rpos] & DDR_2T) || CONFIG_K8_FORCE_2T_DRAM_TIMING) ? (DCL_En2T) : 0;
+               /* Set DuallDimm is second channel is completely empty (revD+) */
+               if (((cpuid_eax(1) & 0xfff0f) >= 0x10f00) && ((dpos & 0x5) == 0)) {
+                       printk(BIOS_DEBUG, "Setting DualDIMMen\n");
+                       dcl |= DCL_DualDIMMen;
+               }
+               pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
+               return dloading - 1;
+       } else {
+               /* if we don't find it we se it to DDR400 */
+               printk(BIOS_WARNING, "Detected strange DIMM configuration, may not work! (or bug)\n");
+               return NBCAP_MEMCLK_200MHZ;
+       }
+
+#elif CONFIG_CPU_AMD_SOCKET_754
+
+#define CFGIDX(DIMM1,DIMM2,DIMM3) ((DIMM3)*9+(DIMM2)*3+(DIMM1))
+
+#define EMPTY 0
+#define X8S_X16 1
+#define X8D 2
+
+#define DDR200 NBCAP_MEMCLK_100MHZ
+#define DDR333 NBCAP_MEMCLK_166MHZ
+#define DDR400 NBCAP_MEMCLK_200MHZ
+
+       /* this is table 42 from the BKDG, ignoring footnote 4,
+        * with the EMPTY, EMPTY, EMPTY row added */
+       static const unsigned char cfgtable[][2] = {
+               [CFGIDX(EMPTY,          EMPTY,          EMPTY   )] = { DDR400, DDR400 },
+               [CFGIDX(X8S_X16,        EMPTY,          EMPTY   )] = { DDR400, DDR400 },
+               [CFGIDX(EMPTY,          X8S_X16,        EMPTY   )] = { DDR400, DDR400 },
+               [CFGIDX(EMPTY,          EMPTY,          X8S_X16 )] = { DDR400, DDR400 },
+               [CFGIDX(X8D,            EMPTY,          EMPTY   )] = { DDR400, DDR400 },
+               [CFGIDX(EMPTY,          X8D,            EMPTY   )] = { DDR400, DDR400 },
+               [CFGIDX(EMPTY,          EMPTY,          X8D     )] = { DDR400, DDR400 },
+               [CFGIDX(X8S_X16,        X8S_X16,        EMPTY   )] = { DDR400, DDR400 },
+               [CFGIDX(X8S_X16,        X8D,            EMPTY   )] = { DDR400, DDR400 },
+               [CFGIDX(X8S_X16,        EMPTY,          X8S_X16 )] = { DDR400, DDR400 },
+               [CFGIDX(X8S_X16,        EMPTY,          X8D     )] = { DDR400, DDR400 },
+               [CFGIDX(X8D,            X8S_X16,        EMPTY   )] = { DDR400, DDR400 },
+               [CFGIDX(X8D,            X8D,            EMPTY   )] = { DDR333, DDR333 },
+               [CFGIDX(X8D,            EMPTY,          X8S_X16 )] = { DDR400, DDR400 },
+               [CFGIDX(X8D,            EMPTY,          X8D     )] = { DDR333, DDR333 },
+               [CFGIDX(EMPTY,          X8S_X16,        X8S_X16 )] = { DDR333, DDR400 },
+               [CFGIDX(EMPTY,          X8S_X16,        X8D     )] = { DDR200, DDR400 },
+               [CFGIDX(EMPTY,          X8D,            X8S_X16 )] = { DDR200, DDR400 },
+               [CFGIDX(EMPTY,          X8D,            X8D     )] = { DDR200, DDR333 },
+               [CFGIDX(X8S_X16,        X8S_X16,        X8S_X16 )] = { DDR333, DDR400 },
+               [CFGIDX(X8S_X16,        X8S_X16,        X8D     )] = { DDR200, DDR333 },
+               [CFGIDX(X8S_X16,        X8D,            X8S_X16 )] = { DDR200, DDR333 },
+               [CFGIDX(X8S_X16,        X8D,            X8D     )] = { DDR200, DDR333 },
+               [CFGIDX(X8D,            X8S_X16,        X8S_X16 )] = { DDR333, DDR333 },
+               [CFGIDX(X8D,            X8S_X16,        X8D     )] = { DDR200, DDR333 },
+               [CFGIDX(X8D,            X8D,            X8S_X16 )] = { DDR200, DDR333 },
+               [CFGIDX(X8D,            X8D,            X8D     )] = { DDR200, DDR333 }
+       };
+
+       int i, rank, width, dimmtypes[3];
+       const unsigned char *cfg;
+
+       for (i = 0; i < 3; i++) {
+               if (dimm_mask & (1 << i)) {
+                       rank = spd_read_byte(ctrl->channel0[i], 5);
+                       width = spd_read_byte(ctrl->channel0[i], 13);
+                       if (rank < 0 || width < 0) die("failed to read SPD");
+                       width &= 0x7f;
+                       /* this is my guess as to how the criteria in the table
+                        * are to be understood:
+                        */
+                       dimmtypes[i] = width >= (rank == 1 ? 8 : 16) ? X8S_X16 : X8D;
+               } else {
+                       dimmtypes[i] = EMPTY;
+               }
+       }
+       cfg = cfgtable[CFGIDX(dimmtypes[0], dimmtypes[1], dimmtypes[2])];
+       *freq_1t = cfg[0];
+       return is_cpu_c0() ? cfg[0] : cfg[1];
+
+#else /* CONFIG_CPU_AMD_SOCKET_* */
+
+/* well, there are socket 940 boards supported which obviously fail to
+ * compile with this */
+//     #error load dependent memory clock limiting is not implemented for this socket
+
+       /* see BKDG 4.1.3--if you just want to test a setup that doesn't
+        * require limiting, you may use the following code */
+
+       *freq_1t = NBCAP_MEMCLK_200MHZ;
+       return NBCAP_MEMCLK_200MHZ;
+
+#endif /* CONFIG_CPU_AMD_SOCKET_* */
+
+}
+
 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask)
 {
-       /* Compute the minimum cycle time for these dimms */
        struct spd_set_memclk_result result;
-       unsigned min_cycle_time, min_latency, bios_cycle_time;
-       int i;
+       unsigned char cl_at_freq[NBCAP_MEMCLK_MASK + 1];
+       int dimm, freq, max_freq_bios, max_freq_dloading, max_freq_1t;
        uint32_t value;
 
-       static const uint8_t latency_indicies[] = { 26, 23, 9 };
-       static const unsigned char min_cycle_times[] = {
+       static const uint8_t spd_min_cycle_time_indices[] = { 9, 23, 25 };
+       static const unsigned char cycle_time_at_freq[] = {
                [NBCAP_MEMCLK_200MHZ] = 0x50, /* 5ns */
                [NBCAP_MEMCLK_166MHZ] = 0x60, /* 6ns */
                [NBCAP_MEMCLK_133MHZ] = 0x75, /* 7.5ns */
                [NBCAP_MEMCLK_100MHZ] = 0xa0, /* 10ns */
        };
 
-       value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
-
-       min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
-       bios_cycle_time = min_cycle_times[
-               read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
-       if (CONFIG_HAVE_OPTION_TABLE && bios_cycle_time > min_cycle_time) {
-               min_cycle_time = bios_cycle_time;
-       }
-       min_latency = 2;
-
-       /* Compute the least latency with the fastest clock supported
-        * by both the memory controller and the dimms.
+       /* BEWARE that the constants for frequencies order in reverse of what
+        * would be intuitive. 200 MHz has the lowest constant, 100 MHz the
+        * highest. Thus, all comparisons and traversal directions having to
+        * do with frequencies are/have to be the opposite of what would be
+        * intuitive.
         */
-       for (i = 0; i < DIMM_SOCKETS; i++) {
-               int new_cycle_time, new_latency;
-               int index;
-               int latencies;
-               int latency;
 
-               if (!(dimm_mask & (1 << i))) {
+       /* the CLs supported by the controller: */
+       memset(cl_at_freq, 0x1c, sizeof(cl_at_freq));
+       memset(cl_at_freq, 0x00,
+               (pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP) >>
+                NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK);
+       max_freq_bios = read_option(max_mem_clock, 0);
+       if (max_freq_bios <= NBCAP_MEMCLK_100MHZ)
+               memset(cl_at_freq, 0x00, max_freq_bios);
+       for (dimm = 0; dimm < DIMM_SOCKETS; dimm++) {
+               int x,i,spd_cls,cl,spd_min_cycle_time;
+               unsigned char cl_at_freq_mask[sizeof(cl_at_freq)];
+
+               if (!(dimm_mask & (1 << dimm)))
                        continue;
-               }
-
-               /* First find the supported CAS latencies
-                * Byte 18 for DDR SDRAM is interpreted:
+               /* Byte 18 for DDR SDRAM is interpreted:
                 * bit 0 == CAS Latency = 1.0
                 * bit 1 == CAS Latency = 1.5
                 * bit 2 == CAS Latency = 2.0
                 * bit 3 == CAS Latency = 2.5
                 * bit 4 == CAS Latency = 3.0
                 * bit 5 == CAS Latency = 3.5
-                * bit 6 == TBD
+                * bit 6 == CAS Latency = 4.0
                 * bit 7 == TBD
                 */
-               new_cycle_time = 0xa0;
-               new_latency = 5;
-
-               latencies = spd_read_byte(ctrl->channel0[i], 18);
-               if (latencies <= 0) continue;
-
-               /* Compute the lowest cas latency supported */
-               latency = log2(latencies) -2;
-
-               /* Loop through and find a fast clock with a low latency */
-               for (index = 0; index < 3; index++, latency++) {
-                       int spd_value;
-                       if ((latency < 2) || (latency > 4) ||
-                               (!(latencies & (1 << latency)))) {
+               spd_cls = spd_read_byte(ctrl->channel0[dimm], 18);
+               if (spd_cls <= 0)
+                       goto hw_error;
+               memset(cl_at_freq_mask, 0x00, sizeof(cl_at_freq_mask));
+               for (cl = 1 << log2(spd_cls), i = 0; i < 3; cl >>= 1, i++) {
+                       if (!(spd_cls & cl))
                                continue;
-                       }
-                       spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
-                       if (spd_value < 0) {
+                       spd_min_cycle_time = spd_read_byte(ctrl->channel0[dimm],
+                                       spd_min_cycle_time_indices[i]);
+                       if (spd_min_cycle_time < 0)
                                goto hw_error;
-                       }
-
-                       /* Only increase the latency if we decreas the clock */
-                       if ((spd_value >= min_cycle_time) && (spd_value < new_cycle_time)) {
-                               new_cycle_time = spd_value;
-                               new_latency = latency;
-                       }
-               }
-               if (new_latency > 4){
-                       continue;
-               }
-               /* Does min_latency need to be increased? */
-               if (new_cycle_time > min_cycle_time) {
-                       min_cycle_time = new_cycle_time;
-               }
-               /* Does min_cycle_time need to be increased? */
-               if (new_latency > min_latency) {
-                       min_latency = new_latency;
+                       if ((!spd_min_cycle_time) || (spd_min_cycle_time & 0x0f) > 9)
+                               continue;
+                       for (x = 0; x < sizeof(cl_at_freq_mask); x++)
+                               if (cycle_time_at_freq[x] >= spd_min_cycle_time)
+                                       cl_at_freq_mask[x] |= cl;
                }
+               for (x = 0; x < sizeof(cl_at_freq_mask); x++)
+                       cl_at_freq[x] &= cl_at_freq_mask[x];
        }
-       /* Make a second pass through the dimms and disable
-        * any that cannot support the selected memclk and cas latency.
-        */
-
-       for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
-               int latencies;
-               int latency;
-               int index;
-               int spd_value;
-               if (!(dimm_mask & (1 << i))) {
-                       continue;
-               }
-
-               latencies = spd_read_byte(ctrl->channel0[i], 18);
-               if (latencies < 0) goto hw_error;
-               if (latencies == 0) {
-                       goto dimm_err;
-               }
 
-               /* Compute the lowest cas latency supported */
-               latency = log2(latencies) -2;
+       freq = NBCAP_MEMCLK_200MHZ;
+       while (freq < sizeof(cl_at_freq) && !cl_at_freq[freq])
+               freq++;
 
-               /* Walk through searching for the selected latency */
-               for (index = 0; index < 3; index++, latency++) {
-                       if (!(latencies & (1 << latency))) {
-                               continue;
-                       }
-                       if (latency == min_latency)
-                               break;
-               }
-               /* If I can't find the latency or my index is bad error */
-               if ((latency != min_latency) || (index >= 3)) {
-                       goto dimm_err;
-               }
+       max_freq_dloading = spd_dimm_loading_socket(ctrl, dimm_mask, &max_freq_1t);
+       if (max_freq_dloading > freq) {
+               printk(BIOS_WARNING, "Memory speed reduced due to signal loading conditions\n");
+               freq = max_freq_dloading;
+               while (freq < sizeof(cl_at_freq) && !cl_at_freq[freq])
+                       freq++;
+       }
 
-               /* Read the min_cycle_time for this latency */
-               spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
-               if (spd_value < 0) goto hw_error;
+       /* if the next lower frequency gives a CL at least one whole cycle
+        * shorter, select that (see end of BKDG 4.1.1.1) */
+       if (freq < sizeof(cl_at_freq)-1 && cl_at_freq[freq+1] &&
+               log2f(cl_at_freq[freq]) - log2f(cl_at_freq[freq+1]) >= 2)
+                       freq++;
 
-               /* All is good if the selected clock speed
-                * is what I need or slower.
-                */
-               if (spd_value <= min_cycle_time) {
-                       continue;
-               }
-               /* Otherwise I have an error, disable the dimm */
-       dimm_err:
-               dimm_mask = disable_dimm(ctrl, i, dimm_mask);
-       }
-#if 0
-//down speed for full load 4 rank support
-#if QRANK_DIMM_SUPPORT
-       if (dimm_mask == (3|(3<<DIMM_SOCKETS)) ) {
-               int ranks = 4;
-               for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
-                       int val;
-                       if (!(dimm_mask & (1 << i))) {
-                               continue;
-                       }
-                       val = spd_read_byte(ctrl->channel0[i], 5);
-                       if (val!=ranks) {
-                               ranks = val;
-                               break;
-                       }
-               }
-               if (ranks==4) {
-                       if (min_cycle_time <= 0x50 ) {
-                               min_cycle_time = 0x60;
-                       }
-               }
+       if (freq == sizeof(cl_at_freq))
+               goto hw_error;
 
+#if CONFIG_CPU_AMD_SOCKET_754
+       if (freq < max_freq_1t || CONFIG_K8_FORCE_2T_DRAM_TIMING) {
+               pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW,
+                       pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW) | DCL_En2T);
        }
 #endif
-#endif
-       /* Now that I know the minimum cycle time lookup the memory parameters */
-       result.param = get_mem_param(min_cycle_time);
+
+       result.param = get_mem_param(freq);
 
        /* Update DRAM Config High with our selected memory speed */
        value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
@@ -1571,7 +1689,7 @@ static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *
        /* Update DRAM Timing Low with our selected cas latency */
        value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
        value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
-       value |= latencies[min_latency - 2] << DTL_TCL_SHIFT;
+       value |= latencies[log2f(cl_at_freq[freq]) - 2] << DTL_TCL_SHIFT;
        pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
 
        result.dimm_mask = dimm_mask;
@@ -1792,7 +1910,7 @@ static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_pa
 {
        uint32_t dcl;
        int value;
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
        int rank;
 #endif
        int dimm;
@@ -1801,7 +1919,7 @@ static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_pa
                return -1;
        }
 
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
        rank = spd_read_byte(ctrl->channel0[i], 5);     /* number of physical banks */
        if (rank < 0) {
                return -1;
@@ -1809,7 +1927,7 @@ static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_pa
 #endif
 
        dimm = 1<<(DCL_x4DIMM_SHIFT+i);
-#if QRANK_DIMM_SUPPORT == 1
+#if CONFIG_QRANK_DIMM_SUPPORT
        if (rank==4) {
                dimm |= 1<<(DCL_x4DIMM_SHIFT+i+2);
        }
@@ -2061,7 +2179,7 @@ static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct
        return dimm_mask;
 }
 
-#if RAMINIT_SYSINFO==1
+#if CONFIG_RAMINIT_SYSINFO
 static void sdram_set_spd_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
 #else
 static void sdram_set_spd_registers(const struct mem_controller *ctrl)
@@ -2222,13 +2340,19 @@ static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
 #endif
 
 #define TIMEOUT_LOOPS 300000
-#if RAMINIT_SYSINFO == 1
+#if CONFIG_RAMINIT_SYSINFO
 static void sdram_enable(int controllers, const struct mem_controller *ctrl, struct sys_info *sysinfo)
 #else
 static void sdram_enable(int controllers, const struct mem_controller *ctrl)
 #endif
 {
        int i;
+       u32 whatWait = 0;
+#if CONFIG_HAVE_ACPI_RESUME == 1
+       int suspend = acpi_is_wakeup_early();
+#else
+       int suspend = 0;
+#endif
 
        /* Error if I don't have memory */
        if (memory_end_k(ctrl, controllers) == 0) {
@@ -2280,13 +2404,31 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl)
                        }
                        pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
                }
-               dcl |= DCL_DisDqsHys;
-               pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
+
+               if (!suspend) {
+                       dcl |= DCL_DisDqsHys;
+                       pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
+               }
                dcl &= ~DCL_DisDqsHys;
                dcl &= ~DCL_DLL_Disable;
                dcl &= ~DCL_D_DRV;
                dcl &= ~DCL_QFC_EN;
-               dcl |= DCL_DramInit;
+
+               if (suspend) {
+                       enable_lapic();
+                       init_timer();
+                       dcl |= (DCL_ESR | DCL_SRS);
+                       /* Handle errata 85 Insufficient Delay Between MEMCLK Startup
+                          and CKE Assertion During Resume From S3 */
+                       udelay(10); /* for unregistered */
+                       if (is_registered(&ctrl[i])) {
+                               udelay(100); /* 110us for registered (we wait 10us already) */
+                       }
+                       whatWait = DCL_ESR;
+               } else {
+                       dcl |= DCL_DramInit;
+                       whatWait = DCL_DramInit;
+               }
                pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
        }
 
@@ -2308,7 +2450,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl)
                        if ((loops & 1023) == 0) {
                                printk(BIOS_DEBUG, ".");
                        }
-               } while(((dcl & DCL_DramInit) != 0) && (loops < TIMEOUT_LOOPS));
+               } while(((dcl & whatWait) != 0) && (loops < TIMEOUT_LOOPS));
                if (loops >= TIMEOUT_LOOPS) {
                        printk(BIOS_DEBUG, " failed\n");
                        continue;
@@ -2316,11 +2458,15 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl)
 
                if (!is_cpu_pre_c0()) {
                        /* Wait until it is safe to touch memory */
+#if 0
+                       /* the registers are marked read-only but code zeros them */
                        dcl &= ~(DCL_MemClrStatus | DCL_DramEnable);
                        pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
+#endif
                        do {
                                dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
-                       } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
+                       } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) ||
+                                       ((dcl & DCL_SRS)));
                }
 
                printk(BIOS_DEBUG, " done\n");
@@ -2348,7 +2494,7 @@ static void set_sysinfo_in_ram(unsigned val)
 {
 }
 
-static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
+void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
                          const uint16_t *spd_addr)
 {
        int i;