1 /* This should be done by Eric
2 2004.11 yhlu add 4 rank DIMM support
3 2004.12 yhlu add D0 support
4 2005.02 yhlu add E0 memory hole support
7 #include <cpu/x86/mem.h>
8 #include <cpu/x86/cache.h>
9 #include <cpu/x86/mtrr.h>
13 #if (CONFIG_LB_MEM_TOPK & (CONFIG_LB_MEM_TOPK -1)) != 0
14 # error "CONFIG_LB_MEM_TOPK must be a power of 2"
17 #ifndef K8_4RANK_DIMM_SUPPORT
18 #define K8_4RANK_DIMM_SUPPORT 0
21 #if defined (__GNUC__)
22 static void hard_reset(void);
26 static void setup_resource_map(const unsigned int *register_values, int max)
29 // print_debug("setting up resource map....");
33 for(i = 0; i < max; i += 3) {
39 prink_debug("%08x <- %08x\r\n", register_values[i], register_values[i+2]);
41 print_debug_hex32(register_values[i]);
43 print_debug_hex32(register_values[i+2]);
47 dev = register_values[i] & ~0xfff;
48 where = register_values[i] & 0xfff;
49 reg = pci_read_config32(dev, where);
50 reg &= register_values[i+1];
51 reg |= register_values[i+2];
52 pci_write_config32(dev, where, reg);
54 reg = pci_read_config32(register_values[i]);
55 reg &= register_values[i+1];
56 reg |= register_values[i+2] & ~register_values[i+1];
57 pci_write_config32(register_values[i], reg);
60 // print_debug("done.\r\n");
64 static int controller_present(const struct mem_controller *ctrl)
66 return pci_read_config32(ctrl->f0, 0) == 0x11001022;
69 static void sdram_set_registers(const struct mem_controller *ctrl)
71 static const unsigned int register_values[] = {
73 /* Careful set limit registers before base registers which contain the enables */
74 /* DRAM Limit i Registers
83 * [ 2: 0] Destination Node ID
93 * [10: 8] Interleave select
94 * specifies the values of A[14:12] to use with interleave enable.
96 * [31:16] DRAM Limit Address i Bits 39-24
97 * This field defines the upper address bits of a 40 bit address
98 * that define the end of the DRAM region.
100 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
101 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
102 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
103 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
104 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
105 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
106 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
107 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
108 /* DRAM Base i Registers
117 * [ 0: 0] Read Enable
120 * [ 1: 1] Write Enable
121 * 0 = Writes Disabled
124 * [10: 8] Interleave Enable
125 * 000 = No interleave
126 * 001 = Interleave on A[12] (2 nodes)
128 * 011 = Interleave on A[12] and A[14] (4 nodes)
132 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
134 * [13:16] DRAM Base Address i Bits 39-24
135 * This field defines the upper address bits of a 40-bit address
136 * that define the start of the DRAM region.
138 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
139 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
140 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
141 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
142 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
143 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
144 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
145 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
147 /* DRAM CS Base Address i Registers
156 * [ 0: 0] Chip-Select Bank Enable
160 * [15: 9] Base Address (19-13)
161 * An optimization used when all DIMM are the same size...
163 * [31:21] Base Address (35-25)
164 * This field defines the top 11 addresses bit of a 40-bit
165 * address that define the memory address space. These
166 * bits decode 32-MByte blocks of memory.
168 PCI_ADDR(0, 0x18, 2, 0x40), 0x001f01fe, 0x00000000,
169 PCI_ADDR(0, 0x18, 2, 0x44), 0x001f01fe, 0x00000000,
170 PCI_ADDR(0, 0x18, 2, 0x48), 0x001f01fe, 0x00000000,
171 PCI_ADDR(0, 0x18, 2, 0x4C), 0x001f01fe, 0x00000000,
172 PCI_ADDR(0, 0x18, 2, 0x50), 0x001f01fe, 0x00000000,
173 PCI_ADDR(0, 0x18, 2, 0x54), 0x001f01fe, 0x00000000,
174 PCI_ADDR(0, 0x18, 2, 0x58), 0x001f01fe, 0x00000000,
175 PCI_ADDR(0, 0x18, 2, 0x5C), 0x001f01fe, 0x00000000,
176 /* DRAM CS Mask Address i Registers
185 * Select bits to exclude from comparison with the DRAM Base address register.
187 * [15: 9] Address Mask (19-13)
188 * Address to be excluded from the optimized case
190 * [29:21] Address Mask (33-25)
191 * The bits with an address mask of 1 are excluded from address comparison
195 PCI_ADDR(0, 0x18, 2, 0x60), 0xC01f01ff, 0x00000000,
196 PCI_ADDR(0, 0x18, 2, 0x64), 0xC01f01ff, 0x00000000,
197 PCI_ADDR(0, 0x18, 2, 0x68), 0xC01f01ff, 0x00000000,
198 PCI_ADDR(0, 0x18, 2, 0x6C), 0xC01f01ff, 0x00000000,
199 PCI_ADDR(0, 0x18, 2, 0x70), 0xC01f01ff, 0x00000000,
200 PCI_ADDR(0, 0x18, 2, 0x74), 0xC01f01ff, 0x00000000,
201 PCI_ADDR(0, 0x18, 2, 0x78), 0xC01f01ff, 0x00000000,
202 PCI_ADDR(0, 0x18, 2, 0x7C), 0xC01f01ff, 0x00000000,
203 /* DRAM Bank Address Mapping Register
205 * Specify the memory module size
210 * 000 = 32Mbyte (Rows = 12 & Col = 8)
211 * 001 = 64Mbyte (Rows = 12 & Col = 9)
212 * 010 = 128Mbyte (Rows = 13 & Col = 9)|(Rows = 12 & Col = 10)
213 * 011 = 256Mbyte (Rows = 13 & Col = 10)|(Rows = 12 & Col = 11)
214 * 100 = 512Mbyte (Rows = 13 & Col = 11)|(Rows = 14 & Col = 10)
215 * 101 = 1Gbyte (Rows = 14 & Col = 11)|(Rows = 13 & Col = 12)
216 * 110 = 2Gbyte (Rows = 14 & Col = 12)
223 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff8888, 0x00000000,
224 /* DRAM Timing Low Register
226 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
236 * [ 7: 4] Trc (Row Cycle Time, Ras#-active to Ras#-active/bank auto refresh)
237 * 0000 = 7 bus clocks
238 * 0001 = 8 bus clocks
240 * 1110 = 21 bus clocks
241 * 1111 = 22 bus clocks
242 * [11: 8] Trfc (Row refresh Cycle time, Auto-refresh-active to RAS#-active or RAS#auto-refresh)
243 * 0000 = 9 bus clocks
244 * 0010 = 10 bus clocks
246 * 1110 = 23 bus clocks
247 * 1111 = 24 bus clocks
248 * [14:12] Trcd (Ras#-active to Case#-read/write Delay)
258 * [18:16] Trrd (Ras# to Ras# Delay)
268 * [23:20] Tras (Minmum Ras# Active Time)
269 * 0000 to 0100 = reserved
270 * 0101 = 5 bus clocks
272 * 1111 = 15 bus clocks
273 * [26:24] Trp (Row Precharge Time)
283 * [28:28] Twr (Write Recovery Time)
288 PCI_ADDR(0, 0x18, 2, 0x88), 0xe8088008, 0x02522001 /* 0x03623125 */ ,
289 /* DRAM Timing High Register
291 * [ 0: 0] Twtr (Write to Read Delay)
295 * [ 6: 4] Trwt (Read to Write Delay)
305 * [12: 8] Tref (Refresh Rate)
306 * 00000 = 100Mhz 4K rows
307 * 00001 = 133Mhz 4K rows
308 * 00010 = 166Mhz 4K rows
309 * 00011 = 200Mhz 4K rows
310 * 01000 = 100Mhz 8K/16K rows
311 * 01001 = 133Mhz 8K/16K rows
312 * 01010 = 166Mhz 8K/16K rows
313 * 01011 = 200Mhz 8K/16K rows
315 * [22:20] Twcl (Write CAS Latency)
316 * 000 = 1 Mem clock after CAS# (Unbuffered Dimms)
317 * 001 = 2 Mem clocks after CAS# (Registered Dimms)
320 PCI_ADDR(0, 0x18, 2, 0x8c), 0xff8fe08e, (0 << 20)|(0 << 8)|(0 << 4)|(0 << 0),
321 /* DRAM Config Low Register
323 * [ 0: 0] DLL Disable
332 * [ 3: 3] Disable DQS Hystersis (FIXME handle this one carefully)
333 * 0 = Enable DQS input filter
334 * 1 = Disable DQS input filtering
337 * 0 = Initialization done or not yet started.
338 * 1 = Initiate DRAM intialization sequence
339 * [ 9: 9] SO-Dimm Enable
341 * 1 = SO-Dimms present
343 * 0 = DRAM not enabled
344 * 1 = DRAM initialized and enabled
345 * [11:11] Memory Clear Status
346 * 0 = Memory Clear function has not completed
347 * 1 = Memory Clear function has completed
348 * [12:12] Exit Self-Refresh
349 * 0 = Exit from self-refresh done or not yet started
350 * 1 = DRAM exiting from self refresh
351 * [13:13] Self-Refresh Status
352 * 0 = Normal Operation
353 * 1 = Self-refresh mode active
354 * [15:14] Read/Write Queue Bypass Count
359 * [16:16] 128-bit/64-Bit
360 * 0 = 64bit Interface to DRAM
361 * 1 = 128bit Interface to DRAM
362 * [17:17] DIMM ECC Enable
363 * 0 = Some DIMMs do not have ECC
364 * 1 = ALL DIMMS have ECC bits
365 * [18:18] UnBuffered DIMMs
367 * 1 = Unbuffered DIMMS
368 * [19:19] Enable 32-Byte Granularity
369 * 0 = Optimize for 64byte bursts
370 * 1 = Optimize for 32byte bursts
371 * [20:20] DIMM 0 is x4
372 * [21:21] DIMM 1 is x4
373 * [22:22] DIMM 2 is x4
374 * [23:23] DIMM 3 is x4
376 * 1 = x4 DIMM present
377 * [24:24] Disable DRAM Receivers
378 * 0 = Receivers enabled
379 * 1 = Receivers disabled
381 * 000 = Arbiters chois is always respected
382 * 001 = Oldest entry in DCQ can be bypassed 1 time
383 * 010 = Oldest entry in DCQ can be bypassed 2 times
384 * 011 = Oldest entry in DCQ can be bypassed 3 times
385 * 100 = Oldest entry in DCQ can be bypassed 4 times
386 * 101 = Oldest entry in DCQ can be bypassed 5 times
387 * 110 = Oldest entry in DCQ can be bypassed 6 times
388 * 111 = Oldest entry in DCQ can be bypassed 7 times
391 PCI_ADDR(0, 0x18, 2, 0x90), 0xf0000000,
393 (0 << 23)|(0 << 22)|(0 << 21)|(0 << 20)|
394 (1 << 19)|(0 << 18)|(1 << 17)|(0 << 16)|
395 (2 << 14)|(0 << 13)|(0 << 12)|
396 (0 << 11)|(0 << 10)|(0 << 9)|(0 << 8)|
397 (0 << 3) |(0 << 1) |(0 << 0),
398 /* DRAM Config High Register
400 * [ 0: 3] Maximum Asynchronous Latency
405 * [11: 8] Read Preamble
423 * [18:16] Idle Cycle Limit
432 * [19:19] Dynamic Idle Cycle Center Enable
433 * 0 = Use Idle Cycle Limit
434 * 1 = Generate a dynamic Idle cycle limit
435 * [22:20] DRAM MEMCLK Frequency
445 * [25:25] Memory Clock Ratio Valid (FIXME carefully enable memclk)
446 * 0 = Disable MemClks
448 * [26:26] Memory Clock 0 Enable
451 * [27:27] Memory Clock 1 Enable
454 * [28:28] Memory Clock 2 Enable
457 * [29:29] Memory Clock 3 Enable
462 PCI_ADDR(0, 0x18, 2, 0x94), 0xc180f0f0,
463 (0 << 29)|(0 << 28)|(0 << 27)|(0 << 26)|(0 << 25)|
464 (0 << 20)|(0 << 19)|(DCH_IDLE_LIMIT_16 << 16)|(0 << 8)|(0 << 0),
465 /* DRAM Delay Line Register
467 * Adjust the skew of the input DQS strobe relative to DATA
469 * [23:16] Delay Line Adjust
470 * Adjusts the DLL derived PDL delay by one or more delay stages
471 * in either the faster or slower direction.
472 * [24:24} Adjust Slower
474 * 1 = Adj is used to increase the PDL delay
475 * [25:25] Adjust Faster
477 * 1 = Adj is used to decrease the PDL delay
480 PCI_ADDR(0, 0x18, 2, 0x98), 0xfc00ffff, 0x00000000,
481 /* DRAM Scrub Control Register
483 * [ 4: 0] DRAM Scrube Rate
485 * [12: 8] L2 Scrub Rate
487 * [20:16] Dcache Scrub
490 * 00000 = Do not scrub
512 * All Others = Reserved
514 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
515 /* DRAM Scrub Address Low Register
517 * [ 0: 0] DRAM Scrubber Redirect Enable
519 * 1 = Scrubber Corrects errors found in normal operation
521 * [31: 6] DRAM Scrub Address 31-6
523 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
524 /* DRAM Scrub Address High Register
526 * [ 7: 0] DRAM Scrubb Address 39-32
529 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
535 if (!controller_present(ctrl)) {
536 // print_debug("No memory controller present\r\n");
540 print_spew("setting up CPU");
541 print_spew_hex8(ctrl->node_id);
542 print_spew(" northbridge registers\r\n");
543 max = sizeof(register_values)/sizeof(register_values[0]);
544 for(i = 0; i < max; i += 3) {
550 prink_debug("%08x <- %08x\r\n", register_values[i], register_values[i+2]);
552 print_spew_hex32(register_values[i]);
554 print_spew_hex32(register_values[i+2]);
558 dev = (register_values[i] & ~0xfff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
559 where = register_values[i] & 0xfff;
560 reg = pci_read_config32(dev, where);
561 reg &= register_values[i+1];
562 reg |= register_values[i+2];
563 pci_write_config32(dev, where, reg);
566 reg = pci_read_config32(register_values[i]);
567 reg &= register_values[i+1];
568 reg |= register_values[i+2];
569 pci_write_config32(register_values[i], reg);
572 print_spew("done.\r\n");
576 static void hw_enable_ecc(const struct mem_controller *ctrl)
579 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
580 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
581 dcl &= ~DCL_DimmEccEn;
582 if (nbcap & NBCAP_ECC) {
583 dcl |= DCL_DimmEccEn;
585 if (read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
586 dcl &= ~DCL_DimmEccEn;
588 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
592 static int is_dual_channel(const struct mem_controller *ctrl)
595 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
596 return dcl & DCL_128BitEn;
599 static int is_opteron(const struct mem_controller *ctrl)
601 /* Test to see if I am an Opteron.
602 * FIXME Testing dual channel capability is correct for now
603 * but a beter test is probably required.
605 #warning "FIXME implement a better test for opterons"
607 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
608 return !!(nbcap & NBCAP_128Bit);
611 static int is_registered(const struct mem_controller *ctrl)
613 /* Test to see if we are dealing with registered SDRAM.
614 * If we are not registered we are unbuffered.
615 * This function must be called after spd_handle_unbuffered_dimms.
618 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
619 return !(dcl & DCL_UnBufDimm);
627 #if K8_4RANK_DIMM_SUPPORT == 1
632 static struct dimm_size spd_get_dimm_size(unsigned device)
634 /* Calculate the log base 2 size of a DIMM in bits */
641 #if K8_4RANK_DIMM_SUPPORT == 1
645 /* Note it might be easier to use byte 31 here, it has the DIMM size as
646 * a multiple of 4MB. The way we do it now we can size both
647 * sides of an assymetric dimm.
649 value = spd_read_byte(device, 3); /* rows */
650 if (value < 0) goto hw_err;
651 if ((value & 0xf) == 0) goto val_err;
652 sz.side1 += value & 0xf;
653 sz.rows = value & 0xf;
655 value = spd_read_byte(device, 4); /* columns */
656 if (value < 0) goto hw_err;
657 if ((value & 0xf) == 0) goto val_err;
658 sz.side1 += value & 0xf;
659 sz.col = value & 0xf;
661 value = spd_read_byte(device, 17); /* banks */
662 if (value < 0) goto hw_err;
663 if ((value & 0xff) == 0) goto val_err;
664 sz.side1 += log2(value & 0xff);
666 /* Get the module data width and convert it to a power of two */
667 value = spd_read_byte(device, 7); /* (high byte) */
668 if (value < 0) goto hw_err;
672 low = spd_read_byte(device, 6); /* (low byte) */
673 if (low < 0) goto hw_err;
674 value = value | (low & 0xff);
675 if ((value != 72) && (value != 64)) goto val_err;
676 sz.side1 += log2(value);
679 value = spd_read_byte(device, 5); /* number of physical banks */
680 if (value < 0) goto hw_err;
681 if (value == 1) goto out;
682 if ((value != 2) && (value != 4 )) {
685 #if K8_4RANK_DIMM_SUPPORT == 1
689 /* Start with the symmetrical case */
692 value = spd_read_byte(device, 3); /* rows */
693 if (value < 0) goto hw_err;
694 if ((value & 0xf0) == 0) goto out; /* If symmetrical we are done */
695 sz.side2 -= (value & 0x0f); /* Subtract out rows on side 1 */
696 sz.side2 += ((value >> 4) & 0x0f); /* Add in rows on side 2 */
698 value = spd_read_byte(device, 4); /* columns */
699 if (value < 0) goto hw_err;
700 if ((value & 0xff) == 0) goto val_err;
701 sz.side2 -= (value & 0x0f); /* Subtract out columns on side 1 */
702 sz.side2 += ((value >> 4) & 0x0f); /* Add in columsn on side 2 */
707 die("Bad SPD value\r\n");
708 /* If an hw_error occurs report that I have no memory */
714 #if K8_4RANK_DIMM_SUPPORT == 1
722 static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
724 uint32_t base0, base1;
727 if (sz.side1 != sz.side2) {
731 /* For each base register.
732 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
733 * The initialize dimm size is in bits.
734 * Set the base enable bit0.
739 /* Make certain side1 of the dimm is at least 32MB */
740 if (sz.side1 >= (25 +3)) {
741 base0 = (1 << ((sz.side1 - (25 + 3)) + 21)) | 1;
744 /* Make certain side2 of the dimm is at least 32MB */
745 if (sz.side2 >= (25 + 3)) {
746 base1 = (1 << ((sz.side2 - (25 + 3)) + 21)) | 1;
749 /* Double the size if we are using dual channel memory */
750 if (is_dual_channel(ctrl)) {
751 base0 = (base0 << 1) | (base0 & 1);
752 base1 = (base1 << 1) | (base1 & 1);
755 /* Clear the reserved bits */
756 base0 &= ~0x001ffffe;
757 base1 &= ~0x001ffffe;
759 /* Set the appropriate DIMM base address register */
760 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
761 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
762 #if K8_4RANK_DIMM_SUPPORT == 1
764 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
765 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
769 /* Enable the memory clocks for this DIMM */
771 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
772 dch |= DCH_MEMCLK_EN0 << index;
773 #if K8_4RANK_DIMM_SUPPORT == 1
775 dch |= DCH_MEMCLK_EN0 << (index + 2);
778 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
782 static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
784 static const unsigned cs_map_aa[] = {
785 /* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
794 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
795 map &= ~(0xf << (index * 4));
796 #if K8_4RANK_DIMM_SUPPORT == 1
798 map &= ~(0xf << ( (index + 2) * 4));
803 /* Make certain side1 of the dimm is at least 32MB */
804 if (sz.side1 >= (25 +3)) {
805 if(is_cpu_pre_d0()) {
806 map |= (sz.side1 - (25 + 3)) << (index *4);
807 #if K8_4RANK_DIMM_SUPPORT == 1
809 map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
814 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
815 #if K8_4RANK_DIMM_SUPPORT == 1
817 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
823 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
827 static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
831 for(i = 0; i < DIMM_SOCKETS; i++) {
833 if (!(dimm_mask & (1 << i))) {
836 sz = spd_get_dimm_size(ctrl->channel0[i]);
838 return -1; /* Report SPD error */
840 set_dimm_size(ctrl, sz, i);
841 set_dimm_map (ctrl, sz, i);
846 static void route_dram_accesses(const struct mem_controller *ctrl,
847 unsigned long base_k, unsigned long limit_k)
849 /* Route the addresses to the controller node */
854 unsigned limit_reg, base_reg;
857 node_id = ctrl->node_id;
858 index = (node_id << 3);
859 limit = (limit_k << 2);
862 limit |= ( 0 << 8) | (node_id << 0);
863 base = (base_k << 2);
865 base |= (0 << 8) | (1<<1) | (1<<0);
867 limit_reg = 0x44 + index;
868 base_reg = 0x40 + index;
869 for(device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1); device += PCI_DEV(0, 1, 0)) {
870 pci_write_config32(device, limit_reg, limit);
871 pci_write_config32(device, base_reg, base);
875 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
877 /* Error if I don't have memory */
882 /* Report the amount of memory. */
883 print_spew("RAM: 0x");
884 print_spew_hex32(tom_k);
885 print_spew(" KB\r\n");
887 /* Now set top of memory */
889 if(tom_k > (4*1024*1024)) {
890 msr.lo = (tom_k & 0x003fffff) << 10;
891 msr.hi = (tom_k & 0xffc00000) >> 22;
892 wrmsr(TOP_MEM2, msr);
895 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
896 * so I can see my rom chip and other I/O devices.
898 if (tom_k >= 0x003f0000) {
899 #if HW_MEM_HOLE_SIZEK != 0
900 if(hole_startk != 0) {
906 msr.lo = (tom_k & 0x003fffff) << 10;
907 msr.hi = (tom_k & 0xffc00000) >> 22;
911 static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
914 static const uint8_t csbase_low_shift[] = {
917 /* 128MB */ (14 - 4),
918 /* 256MB */ (15 - 4),
919 /* 512MB */ (15 - 4),
924 static const uint8_t csbase_low_d0_shift[] = {
927 /* 128MB */ (14 - 4),
928 /* 128MB */ (15 - 4),
929 /* 256MB */ (15 - 4),
930 /* 512MB */ (15 - 4),
931 /* 256MB */ (16 - 4),
932 /* 512MB */ (16 - 4),
938 /* cs_base_high is not changed */
941 int chip_selects, index;
943 unsigned common_size;
944 unsigned common_cs_mode;
945 uint32_t csbase, csmask;
947 /* See if all of the memory chip selects are the same size
948 * and if so count them.
953 for(index = 0; index < 8; index++) {
958 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
966 if (common_size == 0) {
969 /* The size differed fail */
970 if (common_size != size) {
974 value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
975 cs_mode =( value >> ((index>>1)*4)) & 0xf;
976 if(cs_mode == 0 ) continue;
977 if(common_cs_mode == 0) {
978 common_cs_mode = cs_mode;
980 /* The size differed fail */
981 if(common_cs_mode != cs_mode) {
986 /* Chip selects can only be interleaved when there is
987 * more than one and their is a power of two of them.
989 bits = log2(chip_selects);
990 if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) {
994 /* Find the bits of csbase that we need to interleave on */
996 csbase_inc = 1 << csbase_low_shift[common_cs_mode];
997 if(is_dual_channel(ctrl)) {
998 /* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
999 if ((bits == 3) && (common_size == (1 << (32 - 3)))) {
1000 // print_debug("8 4GB chip selects cannot be interleaved\r\n");
1007 csbase_inc = 1 << csbase_low_d0_shift[common_cs_mode];
1008 if(is_dual_channel(ctrl)) {
1009 if( (bits==3) && (common_cs_mode > 8)) {
1010 // print_debug("8 cs_mode>8 chip selects cannot be interleaved\r\n");
1017 /* Compute the initial values for csbase and csbask.
1018 * In csbase just set the enable bit and the base to zero.
1019 * In csmask set the mask bits for the size and page level interleave.
1022 csmask = (((common_size << bits) - 1) << 21);
1023 csmask |= 0xfe00 & ~((csbase_inc << bits) - csbase_inc);
1024 for(index = 0; index < 8; index++) {
1027 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1028 /* Is it enabled? */
1032 pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
1033 pci_write_config32(ctrl->f2, DRAM_CSMASK + (index << 2), csmask);
1034 csbase += csbase_inc;
1037 print_spew("Interleaved\r\n");
1039 /* Return the memory size in K */
1040 return common_size << (15 + bits);
1043 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
1047 /* Remember which registers we have used in the high 8 bits of tom */
1050 /* Find the largest remaining canidate */
1051 unsigned index, canidate;
1052 uint32_t csbase, csmask;
1056 for(index = 0; index < 8; index++) {
1058 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1060 /* Is it enabled? */
1065 /* Is it greater? */
1066 if (value <= csbase) {
1070 /* Has it already been selected */
1071 if (tom & (1 << (index + 24))) {
1074 /* I have a new canidate */
1078 /* See if I have found a new canidate */
1083 /* Remember the dimm size */
1084 size = csbase >> 21;
1086 /* Remember I have used this register */
1087 tom |= (1 << (canidate + 24));
1089 /* Recompute the cs base register value */
1090 csbase = (tom << 21) | 1;
1092 /* Increment the top of memory */
1095 /* Compute the memory mask */
1096 csmask = ((size -1) << 21);
1097 csmask |= 0xfe00; /* For now don't optimize */
1099 /* Write the new base register */
1100 pci_write_config32(ctrl->f2, DRAM_CSBASE + (canidate << 2), csbase);
1101 /* Write the new mask register */
1102 pci_write_config32(ctrl->f2, DRAM_CSMASK + (canidate << 2), csmask);
1105 /* Return the memory size in K */
1106 return (tom & ~0xff000000) << 15;
1109 unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
1113 /* Find the last memory address used */
1115 for(node_id = 0; node_id < max_node_id; node_id++) {
1116 uint32_t limit, base;
1118 index = node_id << 3;
1119 base = pci_read_config32(ctrl->f1, 0x40 + index);
1120 /* Only look at the limit if the base is enabled */
1121 if ((base & 3) == 3) {
1122 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1123 end_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1129 static void order_dimms(const struct mem_controller *ctrl)
1131 unsigned long tom_k, base_k;
1133 if (read_option(CMOS_VSTART_interleave_chip_selects, CMOS_VLEN_interleave_chip_selects, 1) != 0) {
1134 tom_k = interleave_chip_selects(ctrl);
1136 print_debug("Interleaving disabled\r\n");
1140 tom_k = order_chip_selects(ctrl);
1142 /* Compute the memory base address */
1143 base_k = memory_end_k(ctrl, ctrl->node_id);
1145 route_dram_accesses(ctrl, base_k, tom_k);
1146 set_top_mem(tom_k, 0);
1149 static long disable_dimm(const struct mem_controller *ctrl, unsigned index, long dimm_mask)
1151 print_debug("disabling dimm");
1152 print_debug_hex8(index);
1153 print_debug("\r\n");
1154 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), 0);
1155 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), 0);
1156 dimm_mask &= ~(1 << index);
1160 static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl, long dimm_mask)
1168 for(i = 0; (i < DIMM_SOCKETS); i++) {
1170 if (!(dimm_mask & (1 << i))) {
1173 value = spd_read_byte(ctrl->channel0[i], 21);
1177 /* Registered dimm ? */
1178 if (value & (1 << 1)) {
1181 /* Otherwise it must be an unbuffered dimm */
1186 if (unbuffered && registered) {
1187 die("Mixed buffered and registered dimms not supported");
1190 //By yhlu for debug Athlon64 939 can do dual channel, but it use unbuffer DIMM
1191 if (unbuffered && is_opteron(ctrl)) {
1192 die("Unbuffered Dimms not supported on Opteron");
1196 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1197 dcl &= ~DCL_UnBufDimm;
1199 dcl |= DCL_UnBufDimm;
1201 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1203 if (is_registered(ctrl)) {
1204 print_debug("Registered\r\n");
1206 print_debug("Unbuffered\r\n");
1212 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
1217 for(i = 0; i < DIMM_SOCKETS; i++) {
1220 device = ctrl->channel0[i];
1222 byte = spd_read_byte(ctrl->channel0[i], 2); /* Type */
1224 dimm_mask |= (1 << i);
1227 device = ctrl->channel1[i];
1229 byte = spd_read_byte(ctrl->channel1[i], 2);
1231 dimm_mask |= (1 << (i + DIMM_SOCKETS));
1238 static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_mask)
1242 /* SPD addresses to verify are identical */
1243 static const uint8_t addresses[] = {
1244 2, /* Type should be DDR SDRAM */
1245 3, /* *Row addresses */
1246 4, /* *Column addresses */
1247 5, /* *Physical Banks */
1248 6, /* *Module Data Width low */
1249 7, /* *Module Data Width high */
1250 9, /* *Cycle time at highest CAS Latency CL=X */
1251 11, /* *SDRAM Type */
1252 13, /* *SDRAM Width */
1253 17, /* *Logical Banks */
1254 18, /* *Supported CAS Latencies */
1255 21, /* *SDRAM Module Attributes */
1256 23, /* *Cycle time at CAS Latnecy (CLX - 0.5) */
1257 26, /* *Cycle time at CAS Latnecy (CLX - 1.0) */
1258 27, /* *tRP Row precharge time */
1259 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1260 29, /* *tRCD RAS to CAS */
1261 30, /* *tRAS Activate to Precharge */
1262 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1263 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1265 /* If the dimms are not in pairs do not do dual channels */
1266 if ((dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1267 ((dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1268 goto single_channel;
1270 /* If the cpu is not capable of doing dual channels don't do dual channels */
1271 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1272 if (!(nbcap & NBCAP_128Bit)) {
1273 goto single_channel;
1275 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1276 unsigned device0, device1;
1279 /* If I don't have a dimm skip this one */
1280 if (!(dimm_mask & (1 << i))) {
1283 device0 = ctrl->channel0[i];
1284 device1 = ctrl->channel1[i];
1285 for(j = 0; j < sizeof(addresses)/sizeof(addresses[0]); j++) {
1287 addr = addresses[j];
1288 value0 = spd_read_byte(device0, addr);
1292 value1 = spd_read_byte(device1, addr);
1296 if (value0 != value1) {
1297 goto single_channel;
1301 print_spew("Enabling dual channel memory\r\n");
1303 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1304 dcl &= ~DCL_32ByteEn;
1305 dcl |= DCL_128BitEn;
1306 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1309 dimm_mask &= ~((1 << (DIMM_SOCKETS *2)) - (1 << DIMM_SOCKETS));
1315 uint8_t divisor; /* In 1/2 ns increments */
1318 uint32_t dch_memclk;
1319 uint16_t dch_tref4k, dch_tref8k;
1324 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1326 static const struct mem_param speed[] = {
1328 .name = "100Mhz\r\n",
1330 .divisor = (10 <<1),
1333 .dch_memclk = DCH_MEMCLK_100MHZ << DCH_MEMCLK_SHIFT,
1334 .dch_tref4k = DTH_TREF_100MHZ_4K,
1335 .dch_tref8k = DTH_TREF_100MHZ_8K,
1339 .name = "133Mhz\r\n",
1341 .divisor = (7<<1)+1,
1344 .dch_memclk = DCH_MEMCLK_133MHZ << DCH_MEMCLK_SHIFT,
1345 .dch_tref4k = DTH_TREF_133MHZ_4K,
1346 .dch_tref8k = DTH_TREF_133MHZ_8K,
1350 .name = "166Mhz\r\n",
1355 .dch_memclk = DCH_MEMCLK_166MHZ << DCH_MEMCLK_SHIFT,
1356 .dch_tref4k = DTH_TREF_166MHZ_4K,
1357 .dch_tref8k = DTH_TREF_166MHZ_8K,
1361 .name = "200Mhz\r\n",
1366 .dch_memclk = DCH_MEMCLK_200MHZ << DCH_MEMCLK_SHIFT,
1367 .dch_tref4k = DTH_TREF_200MHZ_4K,
1368 .dch_tref8k = DTH_TREF_200MHZ_8K,
1375 const struct mem_param *param;
1376 for(param = &speed[0]; param->cycle_time ; param++) {
1377 if (min_cycle_time > (param+1)->cycle_time) {
1381 if (!param->cycle_time) {
1382 die("min_cycle_time to low");
1384 print_spew(param->name);
1385 #ifdef DRAM_MIN_CYCLE_TIME
1386 print_debug(param->name);
1391 struct spd_set_memclk_result {
1392 const struct mem_param *param;
1395 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask)
1397 /* Compute the minimum cycle time for these dimms */
1398 struct spd_set_memclk_result result;
1399 unsigned min_cycle_time, min_latency, bios_cycle_time;
1403 static const uint8_t latency_indicies[] = { 26, 23, 9 };
1404 static const unsigned char min_cycle_times[] = {
1405 [NBCAP_MEMCLK_200MHZ] = 0x50, /* 5ns */
1406 [NBCAP_MEMCLK_166MHZ] = 0x60, /* 6ns */
1407 [NBCAP_MEMCLK_133MHZ] = 0x75, /* 7.5ns */
1408 [NBCAP_MEMCLK_100MHZ] = 0xa0, /* 10ns */
1412 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1413 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1414 bios_cycle_time = min_cycle_times[
1415 read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
1416 if (bios_cycle_time > min_cycle_time) {
1417 min_cycle_time = bios_cycle_time;
1421 /* Compute the least latency with the fastest clock supported
1422 * by both the memory controller and the dimms.
1424 for(i = 0; i < DIMM_SOCKETS; i++) {
1425 int new_cycle_time, new_latency;
1430 if (!(dimm_mask & (1 << i))) {
1434 /* First find the supported CAS latencies
1435 * Byte 18 for DDR SDRAM is interpreted:
1436 * bit 0 == CAS Latency = 1.0
1437 * bit 1 == CAS Latency = 1.5
1438 * bit 2 == CAS Latency = 2.0
1439 * bit 3 == CAS Latency = 2.5
1440 * bit 4 == CAS Latency = 3.0
1441 * bit 5 == CAS Latency = 3.5
1445 new_cycle_time = 0xa0;
1448 latencies = spd_read_byte(ctrl->channel0[i], 18);
1449 if (latencies <= 0) continue;
1451 /* Compute the lowest cas latency supported */
1452 latency = log2(latencies) -2;
1454 /* Loop through and find a fast clock with a low latency */
1455 for(index = 0; index < 3; index++, latency++) {
1457 if ((latency < 2) || (latency > 4) ||
1458 (!(latencies & (1 << latency)))) {
1461 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1466 /* Only increase the latency if we decreas the clock */
1467 if ((value >= min_cycle_time) && (value < new_cycle_time)) {
1468 new_cycle_time = value;
1469 new_latency = latency;
1472 if (new_latency > 4){
1475 /* Does min_latency need to be increased? */
1476 if (new_cycle_time > min_cycle_time) {
1477 min_cycle_time = new_cycle_time;
1479 /* Does min_cycle_time need to be increased? */
1480 if (new_latency > min_latency) {
1481 min_latency = new_latency;
1484 /* Make a second pass through the dimms and disable
1485 * any that cannot support the selected memclk and cas latency.
1488 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1493 if (!(dimm_mask & (1 << i))) {
1496 latencies = spd_read_byte(ctrl->channel0[i], 18);
1497 if (latencies < 0) goto hw_error;
1498 if (latencies == 0) {
1502 /* Compute the lowest cas latency supported */
1503 latency = log2(latencies) -2;
1505 /* Walk through searching for the selected latency */
1506 for(index = 0; index < 3; index++, latency++) {
1507 if (!(latencies & (1 << latency))) {
1510 if (latency == min_latency)
1513 /* If I can't find the latency or my index is bad error */
1514 if ((latency != min_latency) || (index >= 3)) {
1518 /* Read the min_cycle_time for this latency */
1519 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1520 if (value < 0) goto hw_error;
1522 /* All is good if the selected clock speed
1523 * is what I need or slower.
1525 if (value <= min_cycle_time) {
1528 /* Otherwise I have an error, disable the dimm */
1530 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
1533 //down speed for full load 4 rank support
1534 #if K8_4RANK_DIMM_SUPPORT
1535 if(dimm_mask == (3|(3<<DIMM_SOCKETS)) ) {
1537 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1539 if (!(dimm_mask & (1 << i))) {
1542 val = spd_read_byte(ctrl->channel0[i], 5);
1549 if(min_cycle_time <= 0x50 ) {
1550 min_cycle_time = 0x60;
1557 /* Now that I know the minimum cycle time lookup the memory parameters */
1558 result.param = get_mem_param(min_cycle_time);
1560 /* Update DRAM Config High with our selected memory speed */
1561 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1562 value &= ~(DCH_MEMCLK_MASK << DCH_MEMCLK_SHIFT);
1564 /* Improves DQS centering by correcting for case when core speed multiplier and MEMCLK speed result in odd clock divisor, by selecting the next lowest memory speed, required only at DDR400 and higher speeds with certain DIMM loadings ---- cheating???*/
1565 if(!is_cpu_pre_e0()) {
1566 if(min_cycle_time==0x50) {
1572 value |= result.param->dch_memclk;
1573 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1575 static const unsigned latencies[] = { DTL_CL_2, DTL_CL_2_5, DTL_CL_3 };
1576 /* Update DRAM Timing Low with our selected cas latency */
1577 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1578 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1579 value |= latencies[min_latency - 2] << DTL_TCL_SHIFT;
1580 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1582 result.dimm_mask = dimm_mask;
1585 result.param = (const struct mem_param *)0;
1586 result.dimm_mask = -1;
1591 static int update_dimm_Trc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1593 unsigned clocks, old_clocks;
1596 value = spd_read_byte(ctrl->channel0[i], 41);
1597 if (value < 0) return -1;
1598 if ((value == 0) || (value == 0xff)) {
1601 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1602 if (clocks < DTL_TRC_MIN) {
1603 clocks = DTL_TRC_MIN;
1605 if (clocks > DTL_TRC_MAX) {
1609 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1610 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
1611 if (old_clocks > clocks) {
1612 clocks = old_clocks;
1614 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
1615 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
1616 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1620 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1622 unsigned clocks, old_clocks;
1625 value = spd_read_byte(ctrl->channel0[i], 42);
1626 if (value < 0) return -1;
1627 if ((value == 0) || (value == 0xff)) {
1628 value = param->tRFC;
1630 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1631 if (clocks < DTL_TRFC_MIN) {
1632 clocks = DTL_TRFC_MIN;
1634 if (clocks > DTL_TRFC_MAX) {
1637 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1638 old_clocks = ((dtl >> DTL_TRFC_SHIFT) & DTL_TRFC_MASK) + DTL_TRFC_BASE;
1639 if (old_clocks > clocks) {
1640 clocks = old_clocks;
1642 dtl &= ~(DTL_TRFC_MASK << DTL_TRFC_SHIFT);
1643 dtl |= ((clocks - DTL_TRFC_BASE) << DTL_TRFC_SHIFT);
1644 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1649 static int update_dimm_Trcd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1651 unsigned clocks, old_clocks;
1654 value = spd_read_byte(ctrl->channel0[i], 29);
1655 if (value < 0) return -1;
1656 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1657 if (clocks < DTL_TRCD_MIN) {
1658 clocks = DTL_TRCD_MIN;
1660 if (clocks > DTL_TRCD_MAX) {
1663 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1664 old_clocks = ((dtl >> DTL_TRCD_SHIFT) & DTL_TRCD_MASK) + DTL_TRCD_BASE;
1665 if (old_clocks > clocks) {
1666 clocks = old_clocks;
1668 dtl &= ~(DTL_TRCD_MASK << DTL_TRCD_SHIFT);
1669 dtl |= ((clocks - DTL_TRCD_BASE) << DTL_TRCD_SHIFT);
1670 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1674 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1676 unsigned clocks, old_clocks;
1679 value = spd_read_byte(ctrl->channel0[i], 28);
1680 if (value < 0) return -1;
1681 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1682 if (clocks < DTL_TRRD_MIN) {
1683 clocks = DTL_TRRD_MIN;
1685 if (clocks > DTL_TRRD_MAX) {
1688 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1689 old_clocks = ((dtl >> DTL_TRRD_SHIFT) & DTL_TRRD_MASK) + DTL_TRRD_BASE;
1690 if (old_clocks > clocks) {
1691 clocks = old_clocks;
1693 dtl &= ~(DTL_TRRD_MASK << DTL_TRRD_SHIFT);
1694 dtl |= ((clocks - DTL_TRRD_BASE) << DTL_TRRD_SHIFT);
1695 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1699 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1701 unsigned clocks, old_clocks;
1704 value = spd_read_byte(ctrl->channel0[i], 30);
1705 if (value < 0) return -1;
1706 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1707 if (clocks < DTL_TRAS_MIN) {
1708 clocks = DTL_TRAS_MIN;
1710 if (clocks > DTL_TRAS_MAX) {
1713 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1714 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
1715 if (old_clocks > clocks) {
1716 clocks = old_clocks;
1718 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
1719 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
1720 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1724 static int update_dimm_Trp(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1726 unsigned clocks, old_clocks;
1729 value = spd_read_byte(ctrl->channel0[i], 27);
1730 if (value < 0) return -1;
1731 clocks = (value + (param->divisor << 1) - 1)/(param->divisor << 1);
1732 if (clocks < DTL_TRP_MIN) {
1733 clocks = DTL_TRP_MIN;
1735 if (clocks > DTL_TRP_MAX) {
1738 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1739 old_clocks = ((dtl >> DTL_TRP_SHIFT) & DTL_TRP_MASK) + DTL_TRP_BASE;
1740 if (old_clocks > clocks) {
1741 clocks = old_clocks;
1743 dtl &= ~(DTL_TRP_MASK << DTL_TRP_SHIFT);
1744 dtl |= ((clocks - DTL_TRP_BASE) << DTL_TRP_SHIFT);
1745 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1749 static void set_Twr(const struct mem_controller *ctrl, const struct mem_param *param)
1752 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1753 dtl &= ~(DTL_TWR_MASK << DTL_TWR_SHIFT);
1754 dtl |= (param->dtl_twr - DTL_TWR_BASE) << DTL_TWR_SHIFT;
1755 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1759 static void init_Tref(const struct mem_controller *ctrl, const struct mem_param *param)
1762 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1763 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1764 dth |= (param->dch_tref4k << DTH_TREF_SHIFT);
1765 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1768 static int update_dimm_Tref(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1772 unsigned tref, old_tref;
1773 value = spd_read_byte(ctrl->channel0[i], 3);
1774 if (value < 0) return -1;
1777 tref = param->dch_tref8k;
1779 tref = param->dch_tref4k;
1782 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1783 old_tref = (dth >> DTH_TREF_SHIFT) & DTH_TREF_MASK;
1784 if ((value == 12) && (old_tref == param->dch_tref4k)) {
1785 tref = param->dch_tref4k;
1787 tref = param->dch_tref8k;
1789 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1790 dth |= (tref << DTH_TREF_SHIFT);
1791 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1796 static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1800 #if K8_4RANK_DIMM_SUPPORT == 1
1804 value = spd_read_byte(ctrl->channel0[i], 13);
1809 #if K8_4RANK_DIMM_SUPPORT == 1
1810 rank = spd_read_byte(ctrl->channel0[i], 5); /* number of physical banks */
1816 dimm = 1<<(DCL_x4DIMM_SHIFT+i);
1817 #if K8_4RANK_DIMM_SUPPORT == 1
1819 dimm |= 1<<(DCL_x4DIMM_SHIFT+i+2);
1822 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1827 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1831 static int update_dimm_ecc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1835 value = spd_read_byte(ctrl->channel0[i], 11);
1840 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1841 dcl &= ~DCL_DimmEccEn;
1842 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1847 static int count_dimms(const struct mem_controller *ctrl)
1852 for(index = 0; index < 8; index += 2) {
1854 csbase = pci_read_config32(ctrl->f2, (DRAM_CSBASE + (index << 2)));
1862 static void set_Twtr(const struct mem_controller *ctrl, const struct mem_param *param)
1866 clocks = 1; /* AMD says hard code this */
1867 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1868 dth &= ~(DTH_TWTR_MASK << DTH_TWTR_SHIFT);
1869 dth |= ((clocks - DTH_TWTR_BASE) << DTH_TWTR_SHIFT);
1870 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1873 static void set_Trwt(const struct mem_controller *ctrl, const struct mem_param *param)
1881 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1882 latency = (dtl >> DTL_TCL_SHIFT) & DTL_TCL_MASK;
1883 divisor = param->divisor;
1885 if (is_opteron(ctrl)) {
1886 if (latency == DTL_CL_2) {
1887 if (divisor == ((6 << 0) + 0)) {
1891 else if (divisor > ((6 << 0)+0)) {
1892 /* 100Mhz && 133Mhz */
1896 else if (latency == DTL_CL_2_5) {
1899 else if (latency == DTL_CL_3) {
1900 if (divisor == ((6 << 0)+0)) {
1904 else if (divisor > ((6 << 0)+0)) {
1905 /* 100Mhz && 133Mhz */
1910 else /* Athlon64 */ {
1911 if (is_registered(ctrl)) {
1912 if (latency == DTL_CL_2) {
1915 else if (latency == DTL_CL_2_5) {
1918 else if (latency == DTL_CL_3) {
1922 else /* Unbuffered */{
1923 if (latency == DTL_CL_2) {
1926 else if (latency == DTL_CL_2_5) {
1929 else if (latency == DTL_CL_3) {
1934 if ((clocks < DTH_TRWT_MIN) || (clocks > DTH_TRWT_MAX)) {
1935 die("Unknown Trwt\r\n");
1938 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1939 dth &= ~(DTH_TRWT_MASK << DTH_TRWT_SHIFT);
1940 dth |= ((clocks - DTH_TRWT_BASE) << DTH_TRWT_SHIFT);
1941 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1945 static void set_Twcl(const struct mem_controller *ctrl, const struct mem_param *param)
1947 /* Memory Clocks after CAS# */
1950 if (is_registered(ctrl)) {
1955 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1956 dth &= ~(DTH_TWCL_MASK << DTH_TWCL_SHIFT);
1957 dth |= ((clocks - DTH_TWCL_BASE) << DTH_TWCL_SHIFT);
1958 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1962 static void set_read_preamble(const struct mem_controller *ctrl, const struct mem_param *param)
1966 unsigned rdpreamble;
1967 divisor = param->divisor;
1968 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1969 dch &= ~(DCH_RDPREAMBLE_MASK << DCH_RDPREAMBLE_SHIFT);
1971 if (is_registered(ctrl)) {
1972 if (divisor == ((10 << 1)+0)) {
1974 rdpreamble = ((9 << 1)+ 0);
1976 else if (divisor == ((7 << 1)+1)) {
1978 rdpreamble = ((8 << 1)+0);
1980 else if (divisor == ((6 << 1)+0)) {
1982 rdpreamble = ((7 << 1)+1);
1984 else if (divisor == ((5 << 1)+0)) {
1986 rdpreamble = ((7 << 1)+0);
1993 for(i = 0; i < 4; i++) {
1994 if (ctrl->channel0[i]) {
1998 if (divisor == ((10 << 1)+0)) {
2002 rdpreamble = ((9 << 1)+0);
2005 rdpreamble = ((14 << 1)+0);
2008 else if (divisor == ((7 << 1)+1)) {
2012 rdpreamble = ((7 << 1)+0);
2015 rdpreamble = ((11 << 1)+0);
2018 else if (divisor == ((6 << 1)+0)) {
2022 rdpreamble = ((7 << 1)+0);
2025 rdpreamble = ((9 << 1)+0);
2028 else if (divisor == ((5 << 1)+0)) {
2032 rdpreamble = ((5 << 1)+0);
2035 rdpreamble = ((7 << 1)+0);
2039 if ((rdpreamble < DCH_RDPREAMBLE_MIN) || (rdpreamble > DCH_RDPREAMBLE_MAX)) {
2040 die("Unknown rdpreamble");
2042 dch |= (rdpreamble - DCH_RDPREAMBLE_BASE) << DCH_RDPREAMBLE_SHIFT;
2043 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2046 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
2052 dimms = count_dimms(ctrl);
2054 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2055 dch &= ~(DCH_ASYNC_LAT_MASK << DCH_ASYNC_LAT_SHIFT);
2057 if (is_registered(ctrl)) {
2069 die("Too many unbuffered dimms");
2071 else if (dimms == 3) {
2080 dch |= ((async_lat - DCH_ASYNC_LAT_BASE) << DCH_ASYNC_LAT_SHIFT);
2081 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2084 static void set_idle_cycle_limit(const struct mem_controller *ctrl, const struct mem_param *param)
2087 /* AMD says to Hardcode this */
2088 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2089 dch &= ~(DCH_IDLE_LIMIT_MASK << DCH_IDLE_LIMIT_SHIFT);
2090 dch |= DCH_IDLE_LIMIT_16 << DCH_IDLE_LIMIT_SHIFT;
2091 dch |= DCH_DYN_IDLE_CTR_EN;
2092 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2095 static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param, long dimm_mask)
2099 init_Tref(ctrl, param);
2100 for(i = 0; i < DIMM_SOCKETS; i++) {
2102 if (!(dimm_mask & (1 << i))) {
2105 /* DRAM Timing Low Register */
2106 if ((rc = update_dimm_Trc (ctrl, param, i)) <= 0) goto dimm_err;
2107 if ((rc = update_dimm_Trfc(ctrl, param, i)) <= 0) goto dimm_err;
2108 if ((rc = update_dimm_Trcd(ctrl, param, i)) <= 0) goto dimm_err;
2109 if ((rc = update_dimm_Trrd(ctrl, param, i)) <= 0) goto dimm_err;
2110 if ((rc = update_dimm_Tras(ctrl, param, i)) <= 0) goto dimm_err;
2111 if ((rc = update_dimm_Trp (ctrl, param, i)) <= 0) goto dimm_err;
2113 /* DRAM Timing High Register */
2114 if ((rc = update_dimm_Tref(ctrl, param, i)) <= 0) goto dimm_err;
2117 /* DRAM Config Low */
2118 if ((rc = update_dimm_x4 (ctrl, param, i)) <= 0) goto dimm_err;
2119 if ((rc = update_dimm_ecc(ctrl, param, i)) <= 0) goto dimm_err;
2125 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
2127 /* DRAM Timing Low Register */
2128 set_Twr(ctrl, param);
2130 /* DRAM Timing High Register */
2131 set_Twtr(ctrl, param);
2132 set_Trwt(ctrl, param);
2133 set_Twcl(ctrl, param);
2135 /* DRAM Config High */
2136 set_read_preamble(ctrl, param);
2137 set_max_async_latency(ctrl, param);
2138 set_idle_cycle_limit(ctrl, param);
2142 static void sdram_set_spd_registers(const struct mem_controller *ctrl)
2144 struct spd_set_memclk_result result;
2145 const struct mem_param *param;
2148 if (!controller_present(ctrl)) {
2149 // print_debug("No memory controller present\r\n");
2153 hw_enable_ecc(ctrl);
2154 activate_spd_rom(ctrl);
2155 dimm_mask = spd_detect_dimms(ctrl);
2156 if (!(dimm_mask & ((1 << DIMM_SOCKETS) - 1))) {
2157 print_debug("No memory for this cpu\r\n");
2160 dimm_mask = spd_enable_2channels(ctrl, dimm_mask);
2163 dimm_mask = spd_set_ram_size(ctrl , dimm_mask);
2166 dimm_mask = spd_handle_unbuffered_dimms(ctrl, dimm_mask);
2169 result = spd_set_memclk(ctrl, dimm_mask);
2170 param = result.param;
2171 dimm_mask = result.dimm_mask;
2174 dimm_mask = spd_set_dram_timing(ctrl, param , dimm_mask);
2180 /* Unrecoverable error reading SPD data */
2181 print_err("SPD error - reset\r\n");
2186 #if HW_MEM_HOLE_SIZEK != 0
2187 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
2190 uint32_t carry_over;
2192 uint32_t base, limit;
2197 carry_over = (4*1024*1024) - hole_startk;
2199 for(ii=controllers - 1;ii>i;ii--) {
2200 base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
2201 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2204 limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
2205 for(j = 0; j < controllers; j++) {
2206 pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit + (carry_over << 2));
2207 pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base + (carry_over << 2));
2210 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2211 for(j = 0; j < controllers; j++) {
2212 pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit + (carry_over << 2));
2215 base = pci_read_config32(dev, 0x40 + (i << 3));
2216 basek = (base & 0xffff0000) >> 2;
2217 if(basek == hole_startk) {
2218 //don't need set memhole here, because hole off set will be 0, overflow
2219 //so need to change base reg instead, new basek will be 4*1024*1024
2221 base |= (4*1024*1024)<<2;
2222 for(j = 0; j < controllers; j++) {
2223 pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
2227 hoist = /* hole start address */
2228 ((hole_startk << 10) & 0xff000000) +
2229 /* hole address to memory controller address */
2230 (((basek + carry_over) >> 6) & 0x0000ff00) +
2233 pci_write_config32(dev, 0xf0, hoist);
2239 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
2242 uint32_t hole_startk;
2245 hole_startk = 4*1024*1024 - HW_MEM_HOLE_SIZEK;
2247 #if HW_MEM_HOLE_SIZE_AUTO_INC == 1
2248 //We need to double check if the hole_startk is valid, if it is equal to basek, we need to decrease it some
2250 for(i=0; i<controllers; i++) {
2253 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2254 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2257 base_k = (base & 0xffff0000) >> 2;
2258 if(base_k == hole_startk) {
2259 hole_startk -= (base_k - basek_pri)>>1; // decrease mem hole startk to make sure it is on middle of privous node
2260 break; //only one hole
2266 //find node index that need do set hole
2267 for(i=0; i<controllers; i++) {
2268 uint32_t base, limit;
2269 unsigned base_k, limit_k;
2270 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2271 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2274 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2275 base_k = (base & 0xffff0000) >> 2;
2276 limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
2277 if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
2279 hoist_memory(controllers, ctrl, hole_startk, i);
2280 end_k = memory_end_k(ctrl, controllers);
2281 set_top_mem(end_k, hole_startk);
2282 break; //only one hole
2290 #define TIMEOUT_LOOPS 300000
2291 static void sdram_enable(int controllers, const struct mem_controller *ctrl)
2295 /* Error if I don't have memory */
2296 if (memory_end_k(ctrl, controllers) == 0) {
2297 die("No memory\r\n");
2300 /* Before enabling memory start the memory clocks */
2301 for(i = 0; i < controllers; i++) {
2303 if (!controller_present(ctrl + i))
2305 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2306 if (dch & (DCH_MEMCLK_EN0|DCH_MEMCLK_EN1|DCH_MEMCLK_EN2|DCH_MEMCLK_EN3)) {
2307 dch |= DCH_MEMCLK_VALID;
2308 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2311 /* Disable dram receivers */
2313 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2314 dcl |= DCL_DisInRcvrs;
2315 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2319 /* And if necessary toggle the the reset on the dimms by hand */
2320 memreset(controllers, ctrl);
2322 for(i = 0; i < controllers; i++) {
2324 if (!controller_present(ctrl + i))
2326 /* Skip everything if I don't have any memory on this controller */
2327 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2328 if (!(dch & DCH_MEMCLK_VALID)) {
2332 /* Toggle DisDqsHys to get it working */
2333 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2334 if (dcl & DCL_DimmEccEn) {
2336 print_spew("ECC enabled\r\n");
2337 mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
2339 if (dcl & DCL_128BitEn) {
2340 mnc |= MNC_CHIPKILL_EN;
2342 pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
2344 dcl |= DCL_DisDqsHys;
2345 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2346 dcl &= ~DCL_DisDqsHys;
2347 dcl &= ~DCL_DLL_Disable;
2350 dcl |= DCL_DramInit;
2351 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2354 for(i = 0; i < controllers; i++) {
2356 if (!controller_present(ctrl + i))
2358 /* Skip everything if I don't have any memory on this controller */
2359 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2360 if (!(dch & DCH_MEMCLK_VALID)) {
2364 print_debug("Initializing memory: ");
2368 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2370 if ((loops & 1023) == 0) {
2373 } while(((dcl & DCL_DramInit) != 0) && (loops < TIMEOUT_LOOPS));
2374 if (loops >= TIMEOUT_LOOPS) {
2375 print_debug(" failed\r\n");
2379 if (!is_cpu_pre_c0()) {
2380 /* Wait until it is safe to touch memory */
2381 dcl &= ~(DCL_MemClrStatus | DCL_DramEnable);
2382 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2384 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2385 } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
2388 print_debug(" done\r\n");
2391 #if HW_MEM_HOLE_SIZEK != 0
2392 // init hw mem hole here
2393 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
2394 if(!is_cpu_pre_e0())
2395 set_hw_mem_hole(controllers, ctrl);
2398 //FIXME add enable node interleaving here -- yhlu
2400 1. check how many nodes we have , if not all has ram installed get out
2401 2. check cs_base lo is 0, node 0 f2 0x40,,,,, if any one is not using lo is CS_BASE, get out
2402 3. check if other node is the same as node 0 about f2 0x40,,,,, otherwise get out
2403 4. if all ready enable node_interleaving in f1 0x40..... of every node
2404 5. for node interleaving we need to set mem hole to every node ( need recalcute hole offset in f0 for every node)
2407 #if USE_DCACHE_RAM == 0
2408 /* Make certain the first 1M of memory is intialized */
2409 print_debug("Clearing initial memory region: ");
2411 /* Use write combine caching while we setup the first 1M */
2412 cache_lbmem(MTRR_TYPE_WRCOMB);
2414 /* clear memory 1meg */
2415 clear_memory((void *)0, CONFIG_LB_MEM_TOPK << 10);
2417 /* The first 1M is now setup, use it */
2418 cache_lbmem(MTRR_TYPE_WRBACK);
2420 print_debug(" done\r\n");
2424 static int mem_inited(int controllers, const struct mem_controller *ctrl)
2429 unsigned mask_inited = 0;
2431 for(i = 0; i < controllers; i++) {
2433 if (!controller_present(ctrl + i))
2437 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2439 if (!is_cpu_pre_c0()) { // B3
2441 if( (dcl & DCL_MemClrStatus) && (dcl & DCL_DramEnable) ) {
2442 mask_inited |= (1<<i);
2447 if(mask == mask_inited) return 1;
2452 #if USE_DCACHE_RAM == 1
2453 static void set_sysinfo_in_ram(unsigned val)
2457 static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a, const uint16_t *spd_addr)
2461 struct mem_controller *ctrl;
2462 for(i=0;i<controllers; i++) {
2465 ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
2466 ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
2467 ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
2468 ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
2470 if(spd_addr == (void *)0) continue;
2472 for(j=0;j<DIMM_SOCKETS;j++) {
2473 ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
2474 ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];