1 /* This should be done by Eric
2 2004.11 yhlu add 4 rank DIMM support
3 2004.12 yhlu add D0 support
4 2005.02 yhlu add E0 memory hole support
7 #include <cpu/x86/mem.h>
8 #include <cpu/x86/cache.h>
9 #include <cpu/x86/mtrr.h>
14 #if (CONFIG_RAMTOP & (CONFIG_RAMTOP -1)) != 0
15 # error "CONFIG_RAMTOP must be a power of 2"
18 #ifndef QRANK_DIMM_SUPPORT
19 #define QRANK_DIMM_SUPPORT 0
22 #if defined (__GNUC__)
23 static void hard_reset(void);
26 static void setup_resource_map(const unsigned int *register_values, int max)
29 // printk_debug("setting up resource map....");
30 for (i = 0; i < max; i += 3) {
34 dev = register_values[i] & ~0xfff;
35 where = register_values[i] & 0xfff;
36 reg = pci_read_config32(dev, where);
37 reg &= register_values[i+1];
38 reg |= register_values[i+2];
39 pci_write_config32(dev, where, reg);
41 // printk_debug("done.\n");
44 static int controller_present(const struct mem_controller *ctrl)
46 return pci_read_config32(ctrl->f0, 0) == 0x11001022;
49 #if RAMINIT_SYSINFO==1
50 static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
52 static void sdram_set_registers(const struct mem_controller *ctrl)
55 static const unsigned int register_values[] = {
57 /* Careful set limit registers before base registers which
58 contain the enables */
59 /* DRAM Limit i Registers
68 * [ 2: 0] Destination Node ID
78 * [10: 8] Interleave select
79 * specifies the values of A[14:12] to use with interleave enable.
81 * [31:16] DRAM Limit Address i Bits 39-24
82 * This field defines the upper address bits of a 40 bit address
83 * that define the end of the DRAM region.
85 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
86 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
87 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
88 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
89 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
90 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
91 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
92 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
93 /* DRAM Base i Registers
102 * [ 0: 0] Read Enable
105 * [ 1: 1] Write Enable
106 * 0 = Writes Disabled
109 * [10: 8] Interleave Enable
110 * 000 = No interleave
111 * 001 = Interleave on A[12] (2 nodes)
113 * 011 = Interleave on A[12] and A[14] (4 nodes)
117 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
119 * [13:16] DRAM Base Address i Bits 39-24
120 * This field defines the upper address bits of a 40-bit address
121 * that define the start of the DRAM region.
123 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
124 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
125 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
126 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
127 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
128 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
129 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
130 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
132 /* DRAM CS Base Address i Registers
141 * [ 0: 0] Chip-Select Bank Enable
145 * [15: 9] Base Address (19-13)
146 * An optimization used when all DIMM are the same size...
148 * [31:21] Base Address (35-25)
149 * This field defines the top 11 addresses bit of a 40-bit
150 * address that define the memory address space. These
151 * bits decode 32-MByte blocks of memory.
153 PCI_ADDR(0, 0x18, 2, 0x40), 0x001f01fe, 0x00000000,
154 PCI_ADDR(0, 0x18, 2, 0x44), 0x001f01fe, 0x00000000,
155 PCI_ADDR(0, 0x18, 2, 0x48), 0x001f01fe, 0x00000000,
156 PCI_ADDR(0, 0x18, 2, 0x4C), 0x001f01fe, 0x00000000,
157 PCI_ADDR(0, 0x18, 2, 0x50), 0x001f01fe, 0x00000000,
158 PCI_ADDR(0, 0x18, 2, 0x54), 0x001f01fe, 0x00000000,
159 PCI_ADDR(0, 0x18, 2, 0x58), 0x001f01fe, 0x00000000,
160 PCI_ADDR(0, 0x18, 2, 0x5C), 0x001f01fe, 0x00000000,
161 /* DRAM CS Mask Address i Registers
170 * Select bits to exclude from comparison with the DRAM Base address register.
172 * [15: 9] Address Mask (19-13)
173 * Address to be excluded from the optimized case
175 * [29:21] Address Mask (33-25)
176 * The bits with an address mask of 1 are excluded from address comparison
180 PCI_ADDR(0, 0x18, 2, 0x60), 0xC01f01ff, 0x00000000,
181 PCI_ADDR(0, 0x18, 2, 0x64), 0xC01f01ff, 0x00000000,
182 PCI_ADDR(0, 0x18, 2, 0x68), 0xC01f01ff, 0x00000000,
183 PCI_ADDR(0, 0x18, 2, 0x6C), 0xC01f01ff, 0x00000000,
184 PCI_ADDR(0, 0x18, 2, 0x70), 0xC01f01ff, 0x00000000,
185 PCI_ADDR(0, 0x18, 2, 0x74), 0xC01f01ff, 0x00000000,
186 PCI_ADDR(0, 0x18, 2, 0x78), 0xC01f01ff, 0x00000000,
187 PCI_ADDR(0, 0x18, 2, 0x7C), 0xC01f01ff, 0x00000000,
188 /* DRAM Bank Address Mapping Register
190 * Specify the memory module size
195 * 000 = 32Mbyte (Rows = 12 & Col = 8)
196 * 001 = 64Mbyte (Rows = 12 & Col = 9)
197 * 010 = 128Mbyte (Rows = 13 & Col = 9)|(Rows = 12 & Col = 10)
198 * 011 = 256Mbyte (Rows = 13 & Col = 10)|(Rows = 12 & Col = 11)
199 * 100 = 512Mbyte (Rows = 13 & Col = 11)|(Rows = 14 & Col = 10)
200 * 101 = 1Gbyte (Rows = 14 & Col = 11)|(Rows = 13 & Col = 12)
201 * 110 = 2Gbyte (Rows = 14 & Col = 12)
208 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff8888, 0x00000000,
209 /* DRAM Timing Low Register
211 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
221 * [ 7: 4] Trc (Row Cycle Time, Ras#-active to Ras#-active/bank auto refresh)
222 * 0000 = 7 bus clocks
223 * 0001 = 8 bus clocks
225 * 1110 = 21 bus clocks
226 * 1111 = 22 bus clocks
227 * [11: 8] Trfc (Row refresh Cycle time, Auto-refresh-active to RAS#-active or RAS#auto-refresh)
228 * 0000 = 9 bus clocks
229 * 0010 = 10 bus clocks
231 * 1110 = 23 bus clocks
232 * 1111 = 24 bus clocks
233 * [14:12] Trcd (Ras#-active to Case#-read/write Delay)
243 * [18:16] Trrd (Ras# to Ras# Delay)
253 * [23:20] Tras (Minmum Ras# Active Time)
254 * 0000 to 0100 = reserved
255 * 0101 = 5 bus clocks
257 * 1111 = 15 bus clocks
258 * [26:24] Trp (Row Precharge Time)
268 * [28:28] Twr (Write Recovery Time)
273 PCI_ADDR(0, 0x18, 2, 0x88), 0xe8088008, 0x02522001 /* 0x03623125 */ ,
274 /* DRAM Timing High Register
276 * [ 0: 0] Twtr (Write to Read Delay)
280 * [ 6: 4] Trwt (Read to Write Delay)
290 * [12: 8] Tref (Refresh Rate)
291 * 00000 = 100Mhz 4K rows
292 * 00001 = 133Mhz 4K rows
293 * 00010 = 166Mhz 4K rows
294 * 00011 = 200Mhz 4K rows
295 * 01000 = 100Mhz 8K/16K rows
296 * 01001 = 133Mhz 8K/16K rows
297 * 01010 = 166Mhz 8K/16K rows
298 * 01011 = 200Mhz 8K/16K rows
300 * [22:20] Twcl (Write CAS Latency)
301 * 000 = 1 Mem clock after CAS# (Unbuffered Dimms)
302 * 001 = 2 Mem clocks after CAS# (Registered Dimms)
305 PCI_ADDR(0, 0x18, 2, 0x8c), 0xff8fe08e, (0 << 20)|(0 << 8)|(0 << 4)|(0 << 0),
306 /* DRAM Config Low Register
308 * [ 0: 0] DLL Disable
317 * [ 3: 3] Disable DQS Hystersis (FIXME handle this one carefully)
318 * 0 = Enable DQS input filter
319 * 1 = Disable DQS input filtering
322 * 0 = Initialization done or not yet started.
323 * 1 = Initiate DRAM intialization sequence
324 * [ 9: 9] SO-Dimm Enable
326 * 1 = SO-Dimms present
328 * 0 = DRAM not enabled
329 * 1 = DRAM initialized and enabled
330 * [11:11] Memory Clear Status
331 * 0 = Memory Clear function has not completed
332 * 1 = Memory Clear function has completed
333 * [12:12] Exit Self-Refresh
334 * 0 = Exit from self-refresh done or not yet started
335 * 1 = DRAM exiting from self refresh
336 * [13:13] Self-Refresh Status
337 * 0 = Normal Operation
338 * 1 = Self-refresh mode active
339 * [15:14] Read/Write Queue Bypass Count
344 * [16:16] 128-bit/64-Bit
345 * 0 = 64bit Interface to DRAM
346 * 1 = 128bit Interface to DRAM
347 * [17:17] DIMM ECC Enable
348 * 0 = Some DIMMs do not have ECC
349 * 1 = ALL DIMMS have ECC bits
350 * [18:18] UnBuffered DIMMs
352 * 1 = Unbuffered DIMMS
353 * [19:19] Enable 32-Byte Granularity
354 * 0 = Optimize for 64byte bursts
355 * 1 = Optimize for 32byte bursts
356 * [20:20] DIMM 0 is x4
357 * [21:21] DIMM 1 is x4
358 * [22:22] DIMM 2 is x4
359 * [23:23] DIMM 3 is x4
361 * 1 = x4 DIMM present
362 * [24:24] Disable DRAM Receivers
363 * 0 = Receivers enabled
364 * 1 = Receivers disabled
366 * 000 = Arbiters chois is always respected
367 * 001 = Oldest entry in DCQ can be bypassed 1 time
368 * 010 = Oldest entry in DCQ can be bypassed 2 times
369 * 011 = Oldest entry in DCQ can be bypassed 3 times
370 * 100 = Oldest entry in DCQ can be bypassed 4 times
371 * 101 = Oldest entry in DCQ can be bypassed 5 times
372 * 110 = Oldest entry in DCQ can be bypassed 6 times
373 * 111 = Oldest entry in DCQ can be bypassed 7 times
376 PCI_ADDR(0, 0x18, 2, 0x90), 0xf0000000,
378 (0 << 23)|(0 << 22)|(0 << 21)|(0 << 20)|
379 (1 << 19)|(0 << 18)|(1 << 17)|(0 << 16)|
380 (2 << 14)|(0 << 13)|(0 << 12)|
381 (0 << 11)|(0 << 10)|(0 << 9)|(0 << 8)|
382 (0 << 3) |(0 << 1) |(0 << 0),
383 /* DRAM Config High Register
385 * [ 0: 3] Maximum Asynchronous Latency
390 * [11: 8] Read Preamble
408 * [18:16] Idle Cycle Limit
417 * [19:19] Dynamic Idle Cycle Center Enable
418 * 0 = Use Idle Cycle Limit
419 * 1 = Generate a dynamic Idle cycle limit
420 * [22:20] DRAM MEMCLK Frequency
430 * [25:25] Memory Clock Ratio Valid (FIXME carefully enable memclk)
431 * 0 = Disable MemClks
433 * [26:26] Memory Clock 0 Enable
436 * [27:27] Memory Clock 1 Enable
439 * [28:28] Memory Clock 2 Enable
442 * [29:29] Memory Clock 3 Enable
447 PCI_ADDR(0, 0x18, 2, 0x94), 0xc180f0f0,
448 (0 << 29)|(0 << 28)|(0 << 27)|(0 << 26)|(0 << 25)|
449 (0 << 20)|(0 << 19)|(DCH_IDLE_LIMIT_16 << 16)|(0 << 8)|(0 << 0),
450 /* DRAM Delay Line Register
452 * Adjust the skew of the input DQS strobe relative to DATA
454 * [23:16] Delay Line Adjust
455 * Adjusts the DLL derived PDL delay by one or more delay stages
456 * in either the faster or slower direction.
457 * [24:24} Adjust Slower
459 * 1 = Adj is used to increase the PDL delay
460 * [25:25] Adjust Faster
462 * 1 = Adj is used to decrease the PDL delay
465 PCI_ADDR(0, 0x18, 2, 0x98), 0xfc00ffff, 0x00000000,
466 /* MCA NB Status Low reg */
467 PCI_ADDR(0, 0x18, 3, 0x48), 0x00f00000, 0x00000000,
468 /* MCA NB Status high reg */
469 PCI_ADDR(0, 0x18, 3, 0x4c), 0x01801e8c, 0x00000000,
470 /* MCA NB address Low reg */
471 PCI_ADDR(0, 0x18, 3, 0x50), 0x00000007, 0x00000000,
472 /* MCA NB address high reg */
473 PCI_ADDR(0, 0x18, 3, 0x54), 0xffffff00, 0x00000000,
474 /* DRAM Scrub Control Register
476 * [ 4: 0] DRAM Scrube Rate
478 * [12: 8] L2 Scrub Rate
480 * [20:16] Dcache Scrub
483 * 00000 = Do not scrub
505 * All Others = Reserved
507 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
508 /* DRAM Scrub Address Low Register
510 * [ 0: 0] DRAM Scrubber Redirect Enable
512 * 1 = Scrubber Corrects errors found in normal operation
514 * [31: 6] DRAM Scrub Address 31-6
516 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
517 /* DRAM Scrub Address High Register
519 * [ 7: 0] DRAM Scrubb Address 39-32
522 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
527 if (!controller_present(ctrl)) {
528 // printk_debug("No memory controller present\n");
531 printk_spew("setting up CPU%02x northbridge registers\n", ctrl->node_id);
532 max = ARRAY_SIZE(register_values);
533 for (i = 0; i < max; i += 3) {
537 dev = (register_values[i] & ~0xfff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
538 where = register_values[i] & 0xfff;
539 reg = pci_read_config32(dev, where);
540 reg &= register_values[i+1];
541 reg |= register_values[i+2];
542 pci_write_config32(dev, where, reg);
544 printk_spew("done.\n");
547 static void hw_enable_ecc(const struct mem_controller *ctrl)
550 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
551 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
552 dcl &= ~DCL_DimmEccEn;
553 if (nbcap & NBCAP_ECC) {
554 dcl |= DCL_DimmEccEn;
556 if (CONFIG_HAVE_OPTION_TABLE &&
557 read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
558 dcl &= ~DCL_DimmEccEn;
560 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
563 static int is_dual_channel(const struct mem_controller *ctrl)
566 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
567 return dcl & DCL_128BitEn;
570 static int is_opteron(const struct mem_controller *ctrl)
572 /* Test to see if I am an Opteron.
573 * FIXME Socket 939 based Athlon64 have dual channel capability,
574 * too, so we need a better test for Opterons
576 #warning "FIXME: Implement a better test for Opterons"
578 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
579 return !!(nbcap & NBCAP_128Bit);
582 static int is_registered(const struct mem_controller *ctrl)
584 /* Test to see if we are dealing with registered SDRAM.
585 * If we are not registered we are unbuffered.
586 * This function must be called after spd_handle_unbuffered_dimms.
589 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
590 return !(dcl & DCL_UnBuffDimm);
598 #if QRANK_DIMM_SUPPORT == 1
603 static struct dimm_size spd_get_dimm_size(unsigned device)
605 /* Calculate the log base 2 size of a DIMM in bits */
612 #if QRANK_DIMM_SUPPORT == 1
616 /* Note it might be easier to use byte 31 here, it has the DIMM size as
617 * a multiple of 4MB. The way we do it now we can size both
618 * sides of an assymetric dimm.
620 value = spd_read_byte(device, 3); /* rows */
621 if (value < 0) goto hw_err;
622 if ((value & 0xf) == 0) goto val_err;
623 sz.side1 += value & 0xf;
624 sz.rows = value & 0xf;
626 value = spd_read_byte(device, 4); /* columns */
627 if (value < 0) goto hw_err;
628 if ((value & 0xf) == 0) goto val_err;
629 sz.side1 += value & 0xf;
630 sz.col = value & 0xf;
632 value = spd_read_byte(device, 17); /* banks */
633 if (value < 0) goto hw_err;
634 if ((value & 0xff) == 0) goto val_err;
635 sz.side1 += log2(value & 0xff);
637 /* Get the module data width and convert it to a power of two */
638 value = spd_read_byte(device, 7); /* (high byte) */
639 if (value < 0) goto hw_err;
643 low = spd_read_byte(device, 6); /* (low byte) */
644 if (low < 0) goto hw_err;
645 value = value | (low & 0xff);
646 if ((value != 72) && (value != 64)) goto val_err;
647 sz.side1 += log2(value);
650 value = spd_read_byte(device, 5); /* number of physical banks */
651 if (value < 0) goto hw_err;
652 if (value == 1) goto out;
653 if ((value != 2) && (value != 4 )) {
656 #if QRANK_DIMM_SUPPORT == 1
660 /* Start with the symmetrical case */
663 value = spd_read_byte(device, 3); /* rows */
664 if (value < 0) goto hw_err;
665 if ((value & 0xf0) == 0) goto out; /* If symmetrical we are done */
666 sz.side2 -= (value & 0x0f); /* Subtract out rows on side 1 */
667 sz.side2 += ((value >> 4) & 0x0f); /* Add in rows on side 2 */
669 value = spd_read_byte(device, 4); /* columns */
670 if (value < 0) goto hw_err;
671 if ((value & 0xff) == 0) goto val_err;
672 sz.side2 -= (value & 0x0f); /* Subtract out columns on side 1 */
673 sz.side2 += ((value >> 4) & 0x0f); /* Add in columsn on side 2 */
678 die("Bad SPD value\n");
679 /* If an hw_error occurs report that I have no memory */
685 #if QRANK_DIMM_SUPPORT == 1
693 static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
695 uint32_t base0, base1;
698 if (sz.side1 != sz.side2) {
702 /* For each base register.
703 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
704 * The initialize dimm size is in bits.
705 * Set the base enable bit0.
710 /* Make certain side1 of the dimm is at least 32MB */
711 if (sz.side1 >= (25 +3)) {
712 base0 = (1 << ((sz.side1 - (25 + 3)) + 21)) | 1;
715 /* Make certain side2 of the dimm is at least 32MB */
716 if (sz.side2 >= (25 + 3)) {
717 base1 = (1 << ((sz.side2 - (25 + 3)) + 21)) | 1;
720 /* Double the size if we are using dual channel memory */
721 if (is_dual_channel(ctrl)) {
722 base0 = (base0 << 1) | (base0 & 1);
723 base1 = (base1 << 1) | (base1 & 1);
726 /* Clear the reserved bits */
727 base0 &= ~0x001ffffe;
728 base1 &= ~0x001ffffe;
730 /* Set the appropriate DIMM base address register */
731 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
732 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
733 #if QRANK_DIMM_SUPPORT == 1
735 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
736 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
740 /* Enable the memory clocks for this DIMM */
742 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
743 dch |= DCH_MEMCLK_EN0 << index;
744 #if QRANK_DIMM_SUPPORT == 1
746 dch |= DCH_MEMCLK_EN0 << (index + 2);
749 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
753 static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
755 static const unsigned cs_map_aa[] = {
756 /* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
764 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
765 map &= ~(0xf << (index * 4));
766 #if QRANK_DIMM_SUPPORT == 1
768 map &= ~(0xf << ( (index + 2) * 4));
773 /* Make certain side1 of the dimm is at least 32MB */
774 if (sz.side1 >= (25 +3)) {
775 if (is_cpu_pre_d0()) {
776 map |= (sz.side1 - (25 + 3)) << (index *4);
777 #if QRANK_DIMM_SUPPORT == 1
779 map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
784 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
785 #if QRANK_DIMM_SUPPORT == 1
787 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
793 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
797 static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
801 for (i = 0; i < DIMM_SOCKETS; i++) {
803 if (!(dimm_mask & (1 << i))) {
806 sz = spd_get_dimm_size(ctrl->channel0[i]);
808 return -1; /* Report SPD error */
810 set_dimm_size(ctrl, sz, i);
811 set_dimm_map (ctrl, sz, i);
816 static void route_dram_accesses(const struct mem_controller *ctrl,
817 unsigned long base_k, unsigned long limit_k)
819 /* Route the addresses to the controller node */
824 unsigned limit_reg, base_reg;
827 node_id = ctrl->node_id;
828 index = (node_id << 3);
829 limit = (limit_k << 2);
832 limit |= ( 0 << 8) | (node_id << 0);
833 base = (base_k << 2);
835 base |= (0 << 8) | (1<<1) | (1<<0);
837 limit_reg = 0x44 + index;
838 base_reg = 0x40 + index;
839 for (device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1); device += PCI_DEV(0, 1, 0)) {
840 pci_write_config32(device, limit_reg, limit);
841 pci_write_config32(device, base_reg, base);
845 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
847 /* Error if I don't have memory */
852 /* Report the amount of memory. */
853 printk_debug("RAM end at 0x%08x kB\n", tom_k);
855 /* Now set top of memory */
857 if (tom_k > (4*1024*1024)) {
858 printk_spew("Handling memory mapped above 4 GB\n");
859 printk_spew("Upper RAM end at 0x%08x kB\n", tom_k);
860 msr.lo = (tom_k & 0x003fffff) << 10;
861 msr.hi = (tom_k & 0xffc00000) >> 22;
862 wrmsr(TOP_MEM2, msr);
863 printk_spew("Correcting memory amount mapped below 4 GB\n");
866 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
867 * so I can see my rom chip and other I/O devices.
869 if (tom_k >= 0x003f0000) {
870 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
871 if (hole_startk != 0) {
876 printk_spew("Adjusting lower RAM end\n");
878 printk_spew("Lower RAM end at 0x%08x kB\n", tom_k);
879 msr.lo = (tom_k & 0x003fffff) << 10;
880 msr.hi = (tom_k & 0xffc00000) >> 22;
884 static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
887 static const uint8_t csbase_low_shift[] = {
890 /* 128MB */ (14 - 4),
891 /* 256MB */ (15 - 4),
892 /* 512MB */ (15 - 4),
897 static const uint8_t csbase_low_d0_shift[] = {
900 /* 128MB */ (14 - 4),
901 /* 128MB */ (15 - 4),
902 /* 256MB */ (15 - 4),
903 /* 512MB */ (15 - 4),
904 /* 256MB */ (16 - 4),
905 /* 512MB */ (16 - 4),
911 /* cs_base_high is not changed */
914 int chip_selects, index;
916 unsigned common_size;
917 unsigned common_cs_mode;
918 uint32_t csbase, csmask;
920 /* See if all of the memory chip selects are the same size
921 * and if so count them.
926 for (index = 0; index < 8; index++) {
931 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
939 if (common_size == 0) {
942 /* The size differed fail */
943 if (common_size != size) {
947 value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
948 cs_mode =( value >> ((index>>1)*4)) & 0xf;
949 if (cs_mode == 0 ) continue;
950 if (common_cs_mode == 0) {
951 common_cs_mode = cs_mode;
953 /* The cs_mode differed fail */
954 if (common_cs_mode != cs_mode) {
959 /* Chip selects can only be interleaved when there is
960 * more than one and their is a power of two of them.
962 bits = log2(chip_selects);
963 if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) {
967 /* Find the bits of csbase that we need to interleave on */
968 if (is_cpu_pre_d0()){
969 csbase_inc = 1 << csbase_low_shift[common_cs_mode];
970 if (is_dual_channel(ctrl)) {
971 /* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
972 if ((bits == 3) && (common_size == (1 << (32 - 3)))) {
973 // printk_debug("8 4GB chip selects cannot be interleaved\n");
980 csbase_inc = 1 << csbase_low_d0_shift[common_cs_mode];
981 if (is_dual_channel(ctrl)) {
982 if ( (bits==3) && (common_cs_mode > 8)) {
983 // printk_debug("8 cs_mode>8 chip selects cannot be interleaved\n");
990 /* Compute the initial values for csbase and csbask.
991 * In csbase just set the enable bit and the base to zero.
992 * In csmask set the mask bits for the size and page level interleave.
995 csmask = (((common_size << bits) - 1) << 21);
996 csmask |= 0xfe00 & ~((csbase_inc << bits) - csbase_inc);
997 for (index = 0; index < 8; index++) {
1000 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1001 /* Is it enabled? */
1005 pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
1006 pci_write_config32(ctrl->f2, DRAM_CSMASK + (index << 2), csmask);
1007 csbase += csbase_inc;
1010 printk_spew("Interleaved\n");
1012 /* Return the memory size in K */
1013 return common_size << (15 + bits);
1016 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
1020 /* Remember which registers we have used in the high 8 bits of tom */
1023 /* Find the largest remaining candidate */
1024 unsigned index, candidate;
1025 uint32_t csbase, csmask;
1029 for (index = 0; index < 8; index++) {
1031 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1033 /* Is it enabled? */
1038 /* Is it greater? */
1039 if (value <= csbase) {
1043 /* Has it already been selected */
1044 if (tom & (1 << (index + 24))) {
1047 /* I have a new candidate */
1052 /* See if I have found a new candidate */
1057 /* Remember the dimm size */
1058 size = csbase >> 21;
1060 /* Remember I have used this register */
1061 tom |= (1 << (candidate + 24));
1063 /* Recompute the cs base register value */
1064 csbase = (tom << 21) | 1;
1066 /* Increment the top of memory */
1069 /* Compute the memory mask */
1070 csmask = ((size -1) << 21);
1071 csmask |= 0xfe00; /* For now don't optimize */
1073 /* Write the new base register */
1074 pci_write_config32(ctrl->f2, DRAM_CSBASE + (candidate << 2), csbase);
1075 /* Write the new mask register */
1076 pci_write_config32(ctrl->f2, DRAM_CSMASK + (candidate << 2), csmask);
1079 /* Return the memory size in K */
1080 return (tom & ~0xff000000) << 15;
1083 unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
1087 /* Find the last memory address used */
1089 for (node_id = 0; node_id < max_node_id; node_id++) {
1090 uint32_t limit, base;
1092 index = node_id << 3;
1093 base = pci_read_config32(ctrl->f1, 0x40 + index);
1094 /* Only look at the limit if the base is enabled */
1095 if ((base & 3) == 3) {
1096 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1097 end_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1103 static void order_dimms(const struct mem_controller *ctrl)
1105 unsigned long tom_k, base_k;
1107 if ((!CONFIG_HAVE_OPTION_TABLE) ||
1108 read_option(CMOS_VSTART_interleave_chip_selects, CMOS_VLEN_interleave_chip_selects, 1) != 0) {
1109 tom_k = interleave_chip_selects(ctrl);
1111 printk_debug("Interleaving disabled\n");
1116 tom_k = order_chip_selects(ctrl);
1119 /* Compute the memory base address */
1120 base_k = memory_end_k(ctrl, ctrl->node_id);
1122 route_dram_accesses(ctrl, base_k, tom_k);
1123 set_top_mem(tom_k, 0);
1126 static long disable_dimm(const struct mem_controller *ctrl, unsigned index, long dimm_mask)
1128 printk_debug("disabling dimm %02x\n", index);
1129 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), 0);
1130 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), 0);
1131 dimm_mask &= ~(1 << index);
1135 static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl,
1141 int has_dualch = is_opteron(ctrl);
1145 for (i = 0; (i < DIMM_SOCKETS); i++) {
1147 if (!(dimm_mask & (1 << i))) {
1150 value = spd_read_byte(ctrl->channel0[i], 21);
1155 /* Registered dimm ? */
1156 if (value & (1 << 1)) {
1159 /* Otherwise it must be an unbuffered dimm */
1164 if (unbuffered && registered) {
1165 die("Mixed buffered and registered dimms not supported");
1168 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1169 dcl &= ~DCL_UnBuffDimm;
1171 if ((has_dualch) && (!is_cpu_pre_d0())) {
1172 dcl |= DCL_UnBuffDimm; /* set DCL_DualDIMMen too? */
1174 /* set DCL_En2T if you have non-equal DDR mem types! */
1176 if ((cpuid_eax(1) & 0x30) == 0x30) {
1177 /* CS[7:4] is copy of CS[3:0], should be set for 939 socket */
1178 dcl |= DCL_UpperCSMap;
1181 dcl |= DCL_UnBuffDimm;
1184 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1186 if (is_registered(ctrl)) {
1187 printk_spew("Registered\n");
1189 printk_spew("Unbuffered\n");
1195 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
1200 for (i = 0; i < DIMM_SOCKETS; i++) {
1203 device = ctrl->channel0[i];
1205 byte = spd_read_byte(ctrl->channel0[i], 2); /* Type */
1207 dimm_mask |= (1 << i);
1210 device = ctrl->channel1[i];
1212 byte = spd_read_byte(ctrl->channel1[i], 2);
1214 dimm_mask |= (1 << (i + DIMM_SOCKETS));
1221 static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_mask)
1225 /* SPD addresses to verify are identical */
1226 static const uint8_t addresses[] = {
1227 2, /* Type should be DDR SDRAM */
1228 3, /* *Row addresses */
1229 4, /* *Column addresses */
1230 5, /* *Physical Banks */
1231 6, /* *Module Data Width low */
1232 7, /* *Module Data Width high */
1233 9, /* *Cycle time at highest CAS Latency CL=X */
1234 11, /* *SDRAM Type */
1235 13, /* *SDRAM Width */
1236 17, /* *Logical Banks */
1237 18, /* *Supported CAS Latencies */
1238 21, /* *SDRAM Module Attributes */
1239 23, /* *Cycle time at CAS Latnecy (CLX - 0.5) */
1240 26, /* *Cycle time at CAS Latnecy (CLX - 1.0) */
1241 27, /* *tRP Row precharge time */
1242 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1243 29, /* *tRCD RAS to CAS */
1244 30, /* *tRAS Activate to Precharge */
1245 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1246 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1248 /* If the dimms are not in pairs do not do dual channels */
1249 if ((dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1250 ((dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1251 goto single_channel;
1253 /* If the cpu is not capable of doing dual channels don't do dual channels */
1254 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1255 if (!(nbcap & NBCAP_128Bit)) {
1256 goto single_channel;
1258 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1259 unsigned device0, device1;
1262 /* If I don't have a dimm skip this one */
1263 if (!(dimm_mask & (1 << i))) {
1266 device0 = ctrl->channel0[i];
1267 device1 = ctrl->channel1[i];
1268 for (j = 0; j < ARRAY_SIZE(addresses); j++) {
1270 addr = addresses[j];
1271 value0 = spd_read_byte(device0, addr);
1275 value1 = spd_read_byte(device1, addr);
1279 if (value0 != value1) {
1280 goto single_channel;
1284 printk_spew("Enabling dual channel memory\n");
1286 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1287 dcl &= ~DCL_32ByteEn;
1288 dcl |= DCL_128BitEn;
1289 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1292 dimm_mask &= ~((1 << (DIMM_SOCKETS *2)) - (1 << DIMM_SOCKETS));
1298 uint8_t divisor; /* In 1/2 ns increments */
1301 uint32_t dch_memclk;
1302 uint16_t dch_tref4k, dch_tref8k;
1305 uint8_t dtl_trwt[3][3]; /* first index is CAS_LAT 2/2.5/3 and 128/registered64/64 */
1306 uint8_t rdpreamble[4]; /* 0 is for registered, 1 for 1-2 DIMMS, 2 and 3 for 3 or 4 unreg dimm slots */
1310 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1312 static const struct mem_param speed[] = {
1316 .divisor = (10 <<1),
1319 .dch_memclk = DCH_MEMCLK_100MHZ << DCH_MEMCLK_SHIFT,
1320 .dch_tref4k = DTH_TREF_100MHZ_4K,
1321 .dch_tref8k = DTH_TREF_100MHZ_8K,
1324 .dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1325 .rdpreamble = { ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0) }
1330 .divisor = (7<<1)+1,
1333 .dch_memclk = DCH_MEMCLK_133MHZ << DCH_MEMCLK_SHIFT,
1334 .dch_tref4k = DTH_TREF_133MHZ_4K,
1335 .dch_tref8k = DTH_TREF_133MHZ_8K,
1338 .dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1339 .rdpreamble = { ((8 << 1) + 0), ((7 << 1) + 0), ((7 << 1) + 1), ((7 << 1) + 0) }
1347 .dch_memclk = DCH_MEMCLK_166MHZ << DCH_MEMCLK_SHIFT,
1348 .dch_tref4k = DTH_TREF_166MHZ_4K,
1349 .dch_tref8k = DTH_TREF_166MHZ_8K,
1352 .dtl_trwt = { { 3, 2, 3 }, { 3, 3, 4 }, { 4, 3, 4 }},
1353 .rdpreamble = { ((7 << 1) + 1), ((6 << 1) + 0), ((6 << 1) + 1), ((6 << 1) + 0) }
1361 .dch_memclk = DCH_MEMCLK_200MHZ << DCH_MEMCLK_SHIFT,
1362 .dch_tref4k = DTH_TREF_200MHZ_4K,
1363 .dch_tref8k = DTH_TREF_200MHZ_8K,
1366 .dtl_trwt = { { 0, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1367 .rdpreamble = { ((7 << 1) + 0), ((5 << 1) + 0), ((5 << 1) + 1), ((5 << 1) + 1) }
1373 const struct mem_param *param;
1374 for (param = &speed[0]; param->cycle_time ; param++) {
1375 if (min_cycle_time > (param+1)->cycle_time) {
1379 if (!param->cycle_time) {
1380 die("min_cycle_time to low");
1382 printk_spew("%s\n", param->name);
1386 struct spd_set_memclk_result {
1387 const struct mem_param *param;
1390 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask)
1392 /* Compute the minimum cycle time for these dimms */
1393 struct spd_set_memclk_result result;
1394 unsigned min_cycle_time, min_latency, bios_cycle_time;
1398 static const uint8_t latency_indicies[] = { 26, 23, 9 };
1399 static const unsigned char min_cycle_times[] = {
1400 [NBCAP_MEMCLK_200MHZ] = 0x50, /* 5ns */
1401 [NBCAP_MEMCLK_166MHZ] = 0x60, /* 6ns */
1402 [NBCAP_MEMCLK_133MHZ] = 0x75, /* 7.5ns */
1403 [NBCAP_MEMCLK_100MHZ] = 0xa0, /* 10ns */
1406 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1408 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1409 bios_cycle_time = min_cycle_times[
1410 read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
1411 if (CONFIG_HAVE_OPTION_TABLE && bios_cycle_time > min_cycle_time) {
1412 min_cycle_time = bios_cycle_time;
1416 /* Compute the least latency with the fastest clock supported
1417 * by both the memory controller and the dimms.
1419 for (i = 0; i < DIMM_SOCKETS; i++) {
1420 int new_cycle_time, new_latency;
1425 if (!(dimm_mask & (1 << i))) {
1429 /* First find the supported CAS latencies
1430 * Byte 18 for DDR SDRAM is interpreted:
1431 * bit 0 == CAS Latency = 1.0
1432 * bit 1 == CAS Latency = 1.5
1433 * bit 2 == CAS Latency = 2.0
1434 * bit 3 == CAS Latency = 2.5
1435 * bit 4 == CAS Latency = 3.0
1436 * bit 5 == CAS Latency = 3.5
1440 new_cycle_time = 0xa0;
1443 latencies = spd_read_byte(ctrl->channel0[i], 18);
1444 if (latencies <= 0) continue;
1446 /* Compute the lowest cas latency supported */
1447 latency = log2(latencies) -2;
1449 /* Loop through and find a fast clock with a low latency */
1450 for (index = 0; index < 3; index++, latency++) {
1452 if ((latency < 2) || (latency > 4) ||
1453 (!(latencies & (1 << latency)))) {
1456 spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1457 if (spd_value < 0) {
1461 /* Only increase the latency if we decreas the clock */
1462 if ((spd_value >= min_cycle_time) && (spd_value < new_cycle_time)) {
1463 new_cycle_time = spd_value;
1464 new_latency = latency;
1467 if (new_latency > 4){
1470 /* Does min_latency need to be increased? */
1471 if (new_cycle_time > min_cycle_time) {
1472 min_cycle_time = new_cycle_time;
1474 /* Does min_cycle_time need to be increased? */
1475 if (new_latency > min_latency) {
1476 min_latency = new_latency;
1479 /* Make a second pass through the dimms and disable
1480 * any that cannot support the selected memclk and cas latency.
1483 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1488 if (!(dimm_mask & (1 << i))) {
1492 latencies = spd_read_byte(ctrl->channel0[i], 18);
1493 if (latencies < 0) goto hw_error;
1494 if (latencies == 0) {
1498 /* Compute the lowest cas latency supported */
1499 latency = log2(latencies) -2;
1501 /* Walk through searching for the selected latency */
1502 for (index = 0; index < 3; index++, latency++) {
1503 if (!(latencies & (1 << latency))) {
1506 if (latency == min_latency)
1509 /* If I can't find the latency or my index is bad error */
1510 if ((latency != min_latency) || (index >= 3)) {
1514 /* Read the min_cycle_time for this latency */
1515 spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1516 if (spd_value < 0) goto hw_error;
1518 /* All is good if the selected clock speed
1519 * is what I need or slower.
1521 if (spd_value <= min_cycle_time) {
1524 /* Otherwise I have an error, disable the dimm */
1526 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
1529 //down speed for full load 4 rank support
1530 #if QRANK_DIMM_SUPPORT
1531 if (dimm_mask == (3|(3<<DIMM_SOCKETS)) ) {
1533 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1535 if (!(dimm_mask & (1 << i))) {
1538 val = spd_read_byte(ctrl->channel0[i], 5);
1545 if (min_cycle_time <= 0x50 ) {
1546 min_cycle_time = 0x60;
1553 /* Now that I know the minimum cycle time lookup the memory parameters */
1554 result.param = get_mem_param(min_cycle_time);
1556 /* Update DRAM Config High with our selected memory speed */
1557 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1558 value &= ~(DCH_MEMCLK_MASK << DCH_MEMCLK_SHIFT);
1560 /* Improves DQS centering by correcting for case when core speed multiplier and MEMCLK speed result in odd clock divisor, by selecting the next lowest memory speed, required only at DDR400 and higher speeds with certain DIMM loadings ---- cheating???*/
1561 if (!is_cpu_pre_e0()) {
1562 if (min_cycle_time==0x50) {
1568 value |= result.param->dch_memclk;
1569 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1571 static const unsigned latencies[] = { DTL_CL_2, DTL_CL_2_5, DTL_CL_3 };
1573 /* Update DRAM Timing Low with our selected cas latency */
1574 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1575 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1576 value |= latencies[min_latency - 2] << DTL_TCL_SHIFT;
1577 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1579 result.dimm_mask = dimm_mask;
1582 result.param = (const struct mem_param *)0;
1583 result.dimm_mask = -1;
1588 static int update_dimm_Trc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1590 unsigned clocks, old_clocks;
1593 value = spd_read_byte(ctrl->channel0[i], 41);
1594 if (value < 0) return -1;
1595 if ((value == 0) || (value == 0xff)) {
1598 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1599 if (clocks < DTL_TRC_MIN) {
1600 clocks = DTL_TRC_MIN;
1602 if (clocks > DTL_TRC_MAX) {
1606 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1607 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
1608 if (old_clocks > clocks) {
1609 clocks = old_clocks;
1611 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
1612 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
1613 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1617 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1619 unsigned clocks, old_clocks;
1622 value = spd_read_byte(ctrl->channel0[i], 42);
1623 if (value < 0) return -1;
1624 if ((value == 0) || (value == 0xff)) {
1625 value = param->tRFC;
1627 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1628 if (clocks < DTL_TRFC_MIN) {
1629 clocks = DTL_TRFC_MIN;
1631 if (clocks > DTL_TRFC_MAX) {
1634 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1635 old_clocks = ((dtl >> DTL_TRFC_SHIFT) & DTL_TRFC_MASK) + DTL_TRFC_BASE;
1636 if (old_clocks > clocks) {
1637 clocks = old_clocks;
1639 dtl &= ~(DTL_TRFC_MASK << DTL_TRFC_SHIFT);
1640 dtl |= ((clocks - DTL_TRFC_BASE) << DTL_TRFC_SHIFT);
1641 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1646 static int update_dimm_Trcd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1648 unsigned clocks, old_clocks;
1651 value = spd_read_byte(ctrl->channel0[i], 29);
1652 if (value < 0) return -1;
1653 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1654 if (clocks < DTL_TRCD_MIN) {
1655 clocks = DTL_TRCD_MIN;
1657 if (clocks > DTL_TRCD_MAX) {
1660 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1661 old_clocks = ((dtl >> DTL_TRCD_SHIFT) & DTL_TRCD_MASK) + DTL_TRCD_BASE;
1662 if (old_clocks > clocks) {
1663 clocks = old_clocks;
1665 dtl &= ~(DTL_TRCD_MASK << DTL_TRCD_SHIFT);
1666 dtl |= ((clocks - DTL_TRCD_BASE) << DTL_TRCD_SHIFT);
1667 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1671 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1673 unsigned clocks, old_clocks;
1676 value = spd_read_byte(ctrl->channel0[i], 28);
1677 if (value < 0) return -1;
1678 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1679 if (clocks < DTL_TRRD_MIN) {
1680 clocks = DTL_TRRD_MIN;
1682 if (clocks > DTL_TRRD_MAX) {
1685 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1686 old_clocks = ((dtl >> DTL_TRRD_SHIFT) & DTL_TRRD_MASK) + DTL_TRRD_BASE;
1687 if (old_clocks > clocks) {
1688 clocks = old_clocks;
1690 dtl &= ~(DTL_TRRD_MASK << DTL_TRRD_SHIFT);
1691 dtl |= ((clocks - DTL_TRRD_BASE) << DTL_TRRD_SHIFT);
1692 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1696 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1698 unsigned clocks, old_clocks;
1701 value = spd_read_byte(ctrl->channel0[i], 30);
1702 if (value < 0) return -1;
1703 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1704 if (clocks < DTL_TRAS_MIN) {
1705 clocks = DTL_TRAS_MIN;
1707 if (clocks > DTL_TRAS_MAX) {
1710 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1711 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
1712 if (old_clocks > clocks) {
1713 clocks = old_clocks;
1715 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
1716 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
1717 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1721 static int update_dimm_Trp(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1723 unsigned clocks, old_clocks;
1726 value = spd_read_byte(ctrl->channel0[i], 27);
1727 if (value < 0) return -1;
1728 clocks = (value + (param->divisor << 1) - 1)/(param->divisor << 1);
1729 if (clocks < DTL_TRP_MIN) {
1730 clocks = DTL_TRP_MIN;
1732 if (clocks > DTL_TRP_MAX) {
1735 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1736 old_clocks = ((dtl >> DTL_TRP_SHIFT) & DTL_TRP_MASK) + DTL_TRP_BASE;
1737 if (old_clocks > clocks) {
1738 clocks = old_clocks;
1740 dtl &= ~(DTL_TRP_MASK << DTL_TRP_SHIFT);
1741 dtl |= ((clocks - DTL_TRP_BASE) << DTL_TRP_SHIFT);
1742 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1746 static void set_Twr(const struct mem_controller *ctrl, const struct mem_param *param)
1749 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1750 dtl &= ~(DTL_TWR_MASK << DTL_TWR_SHIFT);
1751 dtl |= (param->dtl_twr - DTL_TWR_BASE) << DTL_TWR_SHIFT;
1752 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1756 static void init_Tref(const struct mem_controller *ctrl, const struct mem_param *param)
1759 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1760 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1761 dth |= (param->dch_tref4k << DTH_TREF_SHIFT);
1762 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1765 static int update_dimm_Tref(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1769 unsigned tref, old_tref;
1770 value = spd_read_byte(ctrl->channel0[i], 3);
1771 if (value < 0) return -1;
1774 tref = param->dch_tref8k;
1776 tref = param->dch_tref4k;
1779 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1780 old_tref = (dth >> DTH_TREF_SHIFT) & DTH_TREF_MASK;
1781 if ((value == 12) && (old_tref == param->dch_tref4k)) {
1782 tref = param->dch_tref4k;
1784 tref = param->dch_tref8k;
1786 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1787 dth |= (tref << DTH_TREF_SHIFT);
1788 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1793 static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1797 #if QRANK_DIMM_SUPPORT == 1
1801 value = spd_read_byte(ctrl->channel0[i], 13);
1806 #if QRANK_DIMM_SUPPORT == 1
1807 rank = spd_read_byte(ctrl->channel0[i], 5); /* number of physical banks */
1813 dimm = 1<<(DCL_x4DIMM_SHIFT+i);
1814 #if QRANK_DIMM_SUPPORT == 1
1816 dimm |= 1<<(DCL_x4DIMM_SHIFT+i+2);
1819 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1824 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1828 static int update_dimm_ecc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1832 value = spd_read_byte(ctrl->channel0[i], 11);
1837 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1838 dcl &= ~DCL_DimmEccEn;
1839 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1844 static int count_dimms(const struct mem_controller *ctrl)
1849 for (index = 0; index < 8; index += 2) {
1851 csbase = pci_read_config32(ctrl->f2, (DRAM_CSBASE + (index << 2)));
1859 static void set_Twtr(const struct mem_controller *ctrl, const struct mem_param *param)
1863 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1864 dth &= ~(DTH_TWTR_MASK << DTH_TWTR_SHIFT);
1865 dth |= ((param->dtl_twtr - DTH_TWTR_BASE) << DTH_TWTR_SHIFT);
1866 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1869 static void set_Trwt(const struct mem_controller *ctrl, const struct mem_param *param)
1877 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1878 latency = (dtl >> DTL_TCL_SHIFT) & DTL_TCL_MASK;
1880 if (is_opteron(ctrl)) {
1881 mtype = 0; /* dual channel */
1882 } else if (is_registered(ctrl)) {
1883 mtype = 1; /* registered 64bit interface */
1885 mtype = 2; /* unbuffered 64bit interface */
1899 die("Unknown LAT for Trwt");
1902 clocks = param->dtl_trwt[lat][mtype];
1903 if ((clocks < DTH_TRWT_MIN) || (clocks > DTH_TRWT_MAX)) {
1904 die("Unknown Trwt\n");
1907 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1908 dth &= ~(DTH_TRWT_MASK << DTH_TRWT_SHIFT);
1909 dth |= ((clocks - DTH_TRWT_BASE) << DTH_TRWT_SHIFT);
1910 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1914 static void set_Twcl(const struct mem_controller *ctrl, const struct mem_param *param)
1916 /* Memory Clocks after CAS# */
1919 if (is_registered(ctrl)) {
1924 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1925 dth &= ~(DTH_TWCL_MASK << DTH_TWCL_SHIFT);
1926 dth |= ((clocks - DTH_TWCL_BASE) << DTH_TWCL_SHIFT);
1927 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1931 static void set_read_preamble(const struct mem_controller *ctrl, const struct mem_param *param)
1934 unsigned rdpreamble;
1939 for (i = 0; i < 4; i++) {
1940 if (ctrl->channel0[i]) {
1945 /* map to index to param.rdpreamble array */
1946 if (is_registered(ctrl)) {
1948 } else if (slots < 3) {
1950 } else if (slots == 3) {
1952 } else if (slots == 4) {
1955 die("Unknown rdpreamble for this nr of slots");
1958 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1959 dch &= ~(DCH_RDPREAMBLE_MASK << DCH_RDPREAMBLE_SHIFT);
1960 rdpreamble = param->rdpreamble[i];
1962 if ((rdpreamble < DCH_RDPREAMBLE_MIN) || (rdpreamble > DCH_RDPREAMBLE_MAX)) {
1963 die("Unknown rdpreamble");
1966 dch |= (rdpreamble - DCH_RDPREAMBLE_BASE) << DCH_RDPREAMBLE_SHIFT;
1967 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
1970 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
1976 dimms = count_dimms(ctrl);
1978 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1979 dch &= ~(DCH_ASYNC_LAT_MASK << DCH_ASYNC_LAT_SHIFT);
1981 if (is_registered(ctrl)) {
1993 die("Too many unbuffered dimms");
1995 else if (dimms == 3) {
2004 dch |= ((async_lat - DCH_ASYNC_LAT_BASE) << DCH_ASYNC_LAT_SHIFT);
2005 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2008 static void set_idle_cycle_limit(const struct mem_controller *ctrl, const struct mem_param *param)
2011 /* AMD says to Hardcode this */
2012 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2013 dch &= ~(DCH_IDLE_LIMIT_MASK << DCH_IDLE_LIMIT_SHIFT);
2014 dch |= DCH_IDLE_LIMIT_16 << DCH_IDLE_LIMIT_SHIFT;
2015 dch |= DCH_DYN_IDLE_CTR_EN;
2016 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2019 static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param, long dimm_mask)
2023 init_Tref(ctrl, param);
2024 for (i = 0; i < DIMM_SOCKETS; i++) {
2026 if (!(dimm_mask & (1 << i))) {
2029 /* DRAM Timing Low Register */
2030 if ((rc = update_dimm_Trc (ctrl, param, i)) <= 0) goto dimm_err;
2031 if ((rc = update_dimm_Trfc(ctrl, param, i)) <= 0) goto dimm_err;
2032 if ((rc = update_dimm_Trcd(ctrl, param, i)) <= 0) goto dimm_err;
2033 if ((rc = update_dimm_Trrd(ctrl, param, i)) <= 0) goto dimm_err;
2034 if ((rc = update_dimm_Tras(ctrl, param, i)) <= 0) goto dimm_err;
2035 if ((rc = update_dimm_Trp (ctrl, param, i)) <= 0) goto dimm_err;
2037 /* DRAM Timing High Register */
2038 if ((rc = update_dimm_Tref(ctrl, param, i)) <= 0) goto dimm_err;
2041 /* DRAM Config Low */
2042 if ((rc = update_dimm_x4 (ctrl, param, i)) <= 0) goto dimm_err;
2043 if ((rc = update_dimm_ecc(ctrl, param, i)) <= 0) goto dimm_err;
2049 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
2051 /* DRAM Timing Low Register */
2052 set_Twr(ctrl, param);
2054 /* DRAM Timing High Register */
2055 set_Twtr(ctrl, param);
2056 set_Trwt(ctrl, param);
2057 set_Twcl(ctrl, param);
2059 /* DRAM Config High */
2060 set_read_preamble(ctrl, param);
2061 set_max_async_latency(ctrl, param);
2062 set_idle_cycle_limit(ctrl, param);
2066 #if RAMINIT_SYSINFO==1
2067 static void sdram_set_spd_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
2069 static void sdram_set_spd_registers(const struct mem_controller *ctrl)
2072 struct spd_set_memclk_result result;
2073 const struct mem_param *param;
2076 if (!controller_present(ctrl)) {
2077 // printk_debug("No memory controller present\n");
2081 hw_enable_ecc(ctrl);
2082 activate_spd_rom(ctrl);
2083 dimm_mask = spd_detect_dimms(ctrl);
2084 if (!(dimm_mask & ((1 << DIMM_SOCKETS) - 1))) {
2085 printk_debug("No memory for this cpu\n");
2088 dimm_mask = spd_enable_2channels(ctrl, dimm_mask);
2091 dimm_mask = spd_set_ram_size(ctrl , dimm_mask);
2094 dimm_mask = spd_handle_unbuffered_dimms(ctrl, dimm_mask);
2097 result = spd_set_memclk(ctrl, dimm_mask);
2098 param = result.param;
2099 dimm_mask = result.dimm_mask;
2102 dimm_mask = spd_set_dram_timing(ctrl, param , dimm_mask);
2108 /* Unrecoverable error reading SPD data */
2109 printk_err("SPD error - reset\n");
2114 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
2115 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
2118 uint32_t carry_over;
2120 uint32_t base, limit;
2125 carry_over = (4*1024*1024) - hole_startk;
2127 for (ii=controllers - 1;ii>i;ii--) {
2128 base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
2129 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2132 limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
2133 for (j = 0; j < controllers; j++) {
2134 pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit + (carry_over << 2));
2135 pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base + (carry_over << 2));
2138 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2139 for (j = 0; j < controllers; j++) {
2140 pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit + (carry_over << 2));
2143 base = pci_read_config32(dev, 0x40 + (i << 3));
2144 basek = (base & 0xffff0000) >> 2;
2145 if (basek == hole_startk) {
2146 //don't need set memhole here, because hole off set will be 0, overflow
2147 //so need to change base reg instead, new basek will be 4*1024*1024
2149 base |= (4*1024*1024)<<2;
2150 for (j = 0; j < controllers; j++) {
2151 pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
2155 hoist = /* hole start address */
2156 ((hole_startk << 10) & 0xff000000) +
2157 /* hole address to memory controller address */
2158 (((basek + carry_over) >> 6) & 0x0000ff00) +
2161 pci_write_config32(dev, 0xf0, hoist);
2167 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
2170 uint32_t hole_startk;
2173 hole_startk = 4*1024*1024 - CONFIG_HW_MEM_HOLE_SIZEK;
2175 printk_spew("Handling memory hole at 0x%08x (default)\n", hole_startk);
2176 #if CONFIG_HW_MEM_HOLE_SIZE_AUTO_INC == 1
2177 /* We need to double check if hole_startk is valid.
2178 * If it is equal to the dram base address in K (base_k),
2179 * we need to decrease it.
2182 for (i=0; i<controllers; i++) {
2185 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2186 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2189 base_k = (base & 0xffff0000) >> 2;
2190 if (base_k == hole_startk) {
2191 /* decrease memory hole startk to make sure it is
2192 * in the middle of the previous node
2194 hole_startk -= (base_k - basek_pri)>>1;
2195 break; /* only one hole */
2200 printk_spew("Handling memory hole at 0x%08x (adjusted)\n", hole_startk);
2202 /* Find node number that needs the memory hole configured */
2203 for (i=0; i<controllers; i++) {
2204 uint32_t base, limit;
2205 unsigned base_k, limit_k;
2206 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2207 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2210 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2211 base_k = (base & 0xffff0000) >> 2;
2212 limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
2213 if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
2215 hoist_memory(controllers, ctrl, hole_startk, i);
2216 end_k = memory_end_k(ctrl, controllers);
2217 set_top_mem(end_k, hole_startk);
2218 break; /* only one hole */
2226 #define TIMEOUT_LOOPS 300000
2227 #if RAMINIT_SYSINFO == 1
2228 static void sdram_enable(int controllers, const struct mem_controller *ctrl, struct sys_info *sysinfo)
2230 static void sdram_enable(int controllers, const struct mem_controller *ctrl)
2235 /* Error if I don't have memory */
2236 if (memory_end_k(ctrl, controllers) == 0) {
2240 /* Before enabling memory start the memory clocks */
2241 for (i = 0; i < controllers; i++) {
2243 if (!controller_present(ctrl + i))
2245 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2246 if (dch & (DCH_MEMCLK_EN0|DCH_MEMCLK_EN1|DCH_MEMCLK_EN2|DCH_MEMCLK_EN3)) {
2247 dch |= DCH_MEMCLK_VALID;
2248 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2251 /* Disable dram receivers */
2253 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2254 dcl |= DCL_DisInRcvrs;
2255 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2259 /* We need to wait a minimum of 20 MEMCLKS to enable the InitDram */
2260 /* And if necessary toggle the the reset on the dimms by hand */
2261 memreset(controllers, ctrl);
2263 for (i = 0; i < controllers; i++) {
2265 if (!controller_present(ctrl + i))
2267 /* Skip everything if I don't have any memory on this controller */
2268 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2269 if (!(dch & DCH_MEMCLK_VALID)) {
2273 /* Toggle DisDqsHys to get it working */
2274 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2275 if (dcl & DCL_DimmEccEn) {
2277 printk_spew("ECC enabled\n");
2278 mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
2280 if (dcl & DCL_128BitEn) {
2281 mnc |= MNC_CHIPKILL_EN;
2283 pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
2285 dcl |= DCL_DisDqsHys;
2286 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2287 dcl &= ~DCL_DisDqsHys;
2288 dcl &= ~DCL_DLL_Disable;
2291 dcl |= DCL_DramInit;
2292 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2295 for (i = 0; i < controllers; i++) {
2297 if (!controller_present(ctrl + i))
2299 /* Skip everything if I don't have any memory on this controller */
2300 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2301 if (!(dch & DCH_MEMCLK_VALID)) {
2305 printk_debug("Initializing memory: ");
2308 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2310 if ((loops & 1023) == 0) {
2313 } while(((dcl & DCL_DramInit) != 0) && (loops < TIMEOUT_LOOPS));
2314 if (loops >= TIMEOUT_LOOPS) {
2315 printk_debug(" failed\n");
2319 if (!is_cpu_pre_c0()) {
2320 /* Wait until it is safe to touch memory */
2321 dcl &= ~(DCL_MemClrStatus | DCL_DramEnable);
2322 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2324 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2325 } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
2328 printk_debug(" done\n");
2331 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
2332 // init hw mem hole here
2333 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
2334 if (!is_cpu_pre_e0())
2335 set_hw_mem_hole(controllers, ctrl);
2338 //FIXME add enable node interleaving here -- yhlu
2340 1. check how many nodes we have , if not all has ram installed get out
2341 2. check cs_base lo is 0, node 0 f2 0x40,,,,, if any one is not using lo is CS_BASE, get out
2342 3. check if other node is the same as node 0 about f2 0x40,,,,, otherwise get out
2343 4. if all ready enable node_interleaving in f1 0x40..... of every node
2344 5. for node interleaving we need to set mem hole to every node ( need recalcute hole offset in f0 for every node)
2349 static void set_sysinfo_in_ram(unsigned val)
2353 static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
2354 const uint16_t *spd_addr)
2358 struct mem_controller *ctrl;
2359 for (i=0;i<controllers; i++) {
2362 ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
2363 ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
2364 ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
2365 ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
2367 if (spd_addr == (void *)0) continue;
2369 for (j=0;j<DIMM_SOCKETS;j++) {
2370 ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
2371 ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];