1 /* This should be done by Eric
2 2004.11 yhlu add 4 rank DIMM support
3 2004.12 yhlu add D0 support
4 2005.02 yhlu add E0 memory hole support
7 #include <cpu/x86/mem.h>
8 #include <cpu/x86/cache.h>
9 #include <cpu/x86/mtrr.h>
13 #if (CONFIG_LB_MEM_TOPK & (CONFIG_LB_MEM_TOPK -1)) != 0
14 # error "CONFIG_LB_MEM_TOPK must be a power of 2"
17 #ifndef K8_4RANK_DIMM_SUPPORT
18 #define K8_4RANK_DIMM_SUPPORT 0
22 static void setup_resource_map(const unsigned int *register_values, int max)
25 // print_debug("setting up resource map....");
29 for(i = 0; i < max; i += 3) {
35 prink_debug("%08x <- %08x\r\n", register_values[i], register_values[i+2]);
37 print_debug_hex32(register_values[i]);
39 print_debug_hex32(register_values[i+2]);
43 dev = register_values[i] & ~0xff;
44 where = register_values[i] & 0xff;
45 reg = pci_read_config32(dev, where);
46 reg &= register_values[i+1];
47 reg |= register_values[i+2];
48 pci_write_config32(dev, where, reg);
50 reg = pci_read_config32(register_values[i]);
51 reg &= register_values[i+1];
52 reg |= register_values[i+2] & ~register_values[i+1];
53 pci_write_config32(register_values[i], reg);
56 // print_debug("done.\r\n");
60 static int controller_present(const struct mem_controller *ctrl)
62 return pci_read_config32(ctrl->f0, 0) == 0x11001022;
65 static void sdram_set_registers(const struct mem_controller *ctrl)
67 static const unsigned int register_values[] = {
69 /* Careful set limit registers before base registers which contain the enables */
70 /* DRAM Limit i Registers
79 * [ 2: 0] Destination Node ID
89 * [10: 8] Interleave select
90 * specifies the values of A[14:12] to use with interleave enable.
92 * [31:16] DRAM Limit Address i Bits 39-24
93 * This field defines the upper address bits of a 40 bit address
94 * that define the end of the DRAM region.
96 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
97 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
98 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
99 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
100 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
101 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
102 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
103 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
104 /* DRAM Base i Registers
113 * [ 0: 0] Read Enable
116 * [ 1: 1] Write Enable
117 * 0 = Writes Disabled
120 * [10: 8] Interleave Enable
121 * 000 = No interleave
122 * 001 = Interleave on A[12] (2 nodes)
124 * 011 = Interleave on A[12] and A[14] (4 nodes)
128 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
130 * [13:16] DRAM Base Address i Bits 39-24
131 * This field defines the upper address bits of a 40-bit address
132 * that define the start of the DRAM region.
134 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
135 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
136 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
137 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
138 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
139 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
140 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
141 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
143 /* DRAM CS Base Address i Registers
152 * [ 0: 0] Chip-Select Bank Enable
156 * [15: 9] Base Address (19-13)
157 * An optimization used when all DIMM are the same size...
159 * [31:21] Base Address (35-25)
160 * This field defines the top 11 addresses bit of a 40-bit
161 * address that define the memory address space. These
162 * bits decode 32-MByte blocks of memory.
164 PCI_ADDR(0, 0x18, 2, 0x40), 0x001f01fe, 0x00000000,
165 PCI_ADDR(0, 0x18, 2, 0x44), 0x001f01fe, 0x00000000,
166 PCI_ADDR(0, 0x18, 2, 0x48), 0x001f01fe, 0x00000000,
167 PCI_ADDR(0, 0x18, 2, 0x4C), 0x001f01fe, 0x00000000,
168 PCI_ADDR(0, 0x18, 2, 0x50), 0x001f01fe, 0x00000000,
169 PCI_ADDR(0, 0x18, 2, 0x54), 0x001f01fe, 0x00000000,
170 PCI_ADDR(0, 0x18, 2, 0x58), 0x001f01fe, 0x00000000,
171 PCI_ADDR(0, 0x18, 2, 0x5C), 0x001f01fe, 0x00000000,
172 /* DRAM CS Mask Address i Registers
181 * Select bits to exclude from comparison with the DRAM Base address register.
183 * [15: 9] Address Mask (19-13)
184 * Address to be excluded from the optimized case
186 * [29:21] Address Mask (33-25)
187 * The bits with an address mask of 1 are excluded from address comparison
191 PCI_ADDR(0, 0x18, 2, 0x60), 0xC01f01ff, 0x00000000,
192 PCI_ADDR(0, 0x18, 2, 0x64), 0xC01f01ff, 0x00000000,
193 PCI_ADDR(0, 0x18, 2, 0x68), 0xC01f01ff, 0x00000000,
194 PCI_ADDR(0, 0x18, 2, 0x6C), 0xC01f01ff, 0x00000000,
195 PCI_ADDR(0, 0x18, 2, 0x70), 0xC01f01ff, 0x00000000,
196 PCI_ADDR(0, 0x18, 2, 0x74), 0xC01f01ff, 0x00000000,
197 PCI_ADDR(0, 0x18, 2, 0x78), 0xC01f01ff, 0x00000000,
198 PCI_ADDR(0, 0x18, 2, 0x7C), 0xC01f01ff, 0x00000000,
199 /* DRAM Bank Address Mapping Register
201 * Specify the memory module size
206 * 000 = 32Mbyte (Rows = 12 & Col = 8)
207 * 001 = 64Mbyte (Rows = 12 & Col = 9)
208 * 010 = 128Mbyte (Rows = 13 & Col = 9)|(Rows = 12 & Col = 10)
209 * 011 = 256Mbyte (Rows = 13 & Col = 10)|(Rows = 12 & Col = 11)
210 * 100 = 512Mbyte (Rows = 13 & Col = 11)|(Rows = 14 & Col = 10)
211 * 101 = 1Gbyte (Rows = 14 & Col = 11)|(Rows = 13 & Col = 12)
212 * 110 = 2Gbyte (Rows = 14 & Col = 12)
219 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff8888, 0x00000000,
220 /* DRAM Timing Low Register
222 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
232 * [ 7: 4] Trc (Row Cycle Time, Ras#-active to Ras#-active/bank auto refresh)
233 * 0000 = 7 bus clocks
234 * 0001 = 8 bus clocks
236 * 1110 = 21 bus clocks
237 * 1111 = 22 bus clocks
238 * [11: 8] Trfc (Row refresh Cycle time, Auto-refresh-active to RAS#-active or RAS#auto-refresh)
239 * 0000 = 9 bus clocks
240 * 0010 = 10 bus clocks
242 * 1110 = 23 bus clocks
243 * 1111 = 24 bus clocks
244 * [14:12] Trcd (Ras#-active to Case#-read/write Delay)
254 * [18:16] Trrd (Ras# to Ras# Delay)
264 * [23:20] Tras (Minmum Ras# Active Time)
265 * 0000 to 0100 = reserved
266 * 0101 = 5 bus clocks
268 * 1111 = 15 bus clocks
269 * [26:24] Trp (Row Precharge Time)
279 * [28:28] Twr (Write Recovery Time)
284 PCI_ADDR(0, 0x18, 2, 0x88), 0xe8088008, 0x02522001 /* 0x03623125 */ ,
285 /* DRAM Timing High Register
287 * [ 0: 0] Twtr (Write to Read Delay)
291 * [ 6: 4] Trwt (Read to Write Delay)
301 * [12: 8] Tref (Refresh Rate)
302 * 00000 = 100Mhz 4K rows
303 * 00001 = 133Mhz 4K rows
304 * 00010 = 166Mhz 4K rows
305 * 00011 = 200Mhz 4K rows
306 * 01000 = 100Mhz 8K/16K rows
307 * 01001 = 133Mhz 8K/16K rows
308 * 01010 = 166Mhz 8K/16K rows
309 * 01011 = 200Mhz 8K/16K rows
311 * [22:20] Twcl (Write CAS Latency)
312 * 000 = 1 Mem clock after CAS# (Unbuffered Dimms)
313 * 001 = 2 Mem clocks after CAS# (Registered Dimms)
316 PCI_ADDR(0, 0x18, 2, 0x8c), 0xff8fe08e, (0 << 20)|(0 << 8)|(0 << 4)|(0 << 0),
317 /* DRAM Config Low Register
319 * [ 0: 0] DLL Disable
328 * [ 3: 3] Disable DQS Hystersis (FIXME handle this one carefully)
329 * 0 = Enable DQS input filter
330 * 1 = Disable DQS input filtering
333 * 0 = Initialization done or not yet started.
334 * 1 = Initiate DRAM intialization sequence
335 * [ 9: 9] SO-Dimm Enable
337 * 1 = SO-Dimms present
339 * 0 = DRAM not enabled
340 * 1 = DRAM initialized and enabled
341 * [11:11] Memory Clear Status
342 * 0 = Memory Clear function has not completed
343 * 1 = Memory Clear function has completed
344 * [12:12] Exit Self-Refresh
345 * 0 = Exit from self-refresh done or not yet started
346 * 1 = DRAM exiting from self refresh
347 * [13:13] Self-Refresh Status
348 * 0 = Normal Operation
349 * 1 = Self-refresh mode active
350 * [15:14] Read/Write Queue Bypass Count
355 * [16:16] 128-bit/64-Bit
356 * 0 = 64bit Interface to DRAM
357 * 1 = 128bit Interface to DRAM
358 * [17:17] DIMM ECC Enable
359 * 0 = Some DIMMs do not have ECC
360 * 1 = ALL DIMMS have ECC bits
361 * [18:18] UnBuffered DIMMs
363 * 1 = Unbuffered DIMMS
364 * [19:19] Enable 32-Byte Granularity
365 * 0 = Optimize for 64byte bursts
366 * 1 = Optimize for 32byte bursts
367 * [20:20] DIMM 0 is x4
368 * [21:21] DIMM 1 is x4
369 * [22:22] DIMM 2 is x4
370 * [23:23] DIMM 3 is x4
372 * 1 = x4 DIMM present
373 * [24:24] Disable DRAM Receivers
374 * 0 = Receivers enabled
375 * 1 = Receivers disabled
377 * 000 = Arbiters chois is always respected
378 * 001 = Oldest entry in DCQ can be bypassed 1 time
379 * 010 = Oldest entry in DCQ can be bypassed 2 times
380 * 011 = Oldest entry in DCQ can be bypassed 3 times
381 * 100 = Oldest entry in DCQ can be bypassed 4 times
382 * 101 = Oldest entry in DCQ can be bypassed 5 times
383 * 110 = Oldest entry in DCQ can be bypassed 6 times
384 * 111 = Oldest entry in DCQ can be bypassed 7 times
387 PCI_ADDR(0, 0x18, 2, 0x90), 0xf0000000,
389 (0 << 23)|(0 << 22)|(0 << 21)|(0 << 20)|
390 (1 << 19)|(0 << 18)|(1 << 17)|(0 << 16)|
391 (2 << 14)|(0 << 13)|(0 << 12)|
392 (0 << 11)|(0 << 10)|(0 << 9)|(0 << 8)|
393 (0 << 3) |(0 << 1) |(0 << 0),
394 /* DRAM Config High Register
396 * [ 0: 3] Maximum Asynchronous Latency
401 * [11: 8] Read Preamble
419 * [18:16] Idle Cycle Limit
428 * [19:19] Dynamic Idle Cycle Center Enable
429 * 0 = Use Idle Cycle Limit
430 * 1 = Generate a dynamic Idle cycle limit
431 * [22:20] DRAM MEMCLK Frequency
441 * [25:25] Memory Clock Ratio Valid (FIXME carefully enable memclk)
442 * 0 = Disable MemClks
444 * [26:26] Memory Clock 0 Enable
447 * [27:27] Memory Clock 1 Enable
450 * [28:28] Memory Clock 2 Enable
453 * [29:29] Memory Clock 3 Enable
458 PCI_ADDR(0, 0x18, 2, 0x94), 0xc180f0f0,
459 (0 << 29)|(0 << 28)|(0 << 27)|(0 << 26)|(0 << 25)|
460 (0 << 20)|(0 << 19)|(DCH_IDLE_LIMIT_16 << 16)|(0 << 8)|(0 << 0),
461 /* DRAM Delay Line Register
463 * Adjust the skew of the input DQS strobe relative to DATA
465 * [23:16] Delay Line Adjust
466 * Adjusts the DLL derived PDL delay by one or more delay stages
467 * in either the faster or slower direction.
468 * [24:24} Adjust Slower
470 * 1 = Adj is used to increase the PDL delay
471 * [25:25] Adjust Faster
473 * 1 = Adj is used to decrease the PDL delay
476 PCI_ADDR(0, 0x18, 2, 0x98), 0xfc00ffff, 0x00000000,
477 /* DRAM Scrub Control Register
479 * [ 4: 0] DRAM Scrube Rate
481 * [12: 8] L2 Scrub Rate
483 * [20:16] Dcache Scrub
486 * 00000 = Do not scrub
508 * All Others = Reserved
510 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
511 /* DRAM Scrub Address Low Register
513 * [ 0: 0] DRAM Scrubber Redirect Enable
515 * 1 = Scrubber Corrects errors found in normal operation
517 * [31: 6] DRAM Scrub Address 31-6
519 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
520 /* DRAM Scrub Address High Register
522 * [ 7: 0] DRAM Scrubb Address 39-32
525 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
531 if (!controller_present(ctrl)) {
532 // print_debug("No memory controller present\r\n");
536 print_spew("setting up CPU");
537 print_spew_hex8(ctrl->node_id);
538 print_spew(" northbridge registers\r\n");
539 max = sizeof(register_values)/sizeof(register_values[0]);
540 for(i = 0; i < max; i += 3) {
546 prink_debug("%08x <- %08x\r\n", register_values[i], register_values[i+2]);
548 print_spew_hex32(register_values[i]);
550 print_spew_hex32(register_values[i+2]);
554 dev = (register_values[i] & ~0xff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
555 where = register_values[i] & 0xff;
556 reg = pci_read_config32(dev, where);
557 reg &= register_values[i+1];
558 reg |= register_values[i+2];
559 pci_write_config32(dev, where, reg);
562 reg = pci_read_config32(register_values[i]);
563 reg &= register_values[i+1];
564 reg |= register_values[i+2];
565 pci_write_config32(register_values[i], reg);
568 print_spew("done.\r\n");
572 static void hw_enable_ecc(const struct mem_controller *ctrl)
575 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
576 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
577 dcl &= ~DCL_DimmEccEn;
578 if (nbcap & NBCAP_ECC) {
579 dcl |= DCL_DimmEccEn;
581 if (read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
582 dcl &= ~DCL_DimmEccEn;
584 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
588 static int is_dual_channel(const struct mem_controller *ctrl)
591 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
592 return dcl & DCL_128BitEn;
595 static int is_opteron(const struct mem_controller *ctrl)
597 /* Test to see if I am an Opteron.
598 * FIXME Testing dual channel capability is correct for now
599 * but a beter test is probably required.
601 #warning "FIXME implement a better test for opterons"
603 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
604 return !!(nbcap & NBCAP_128Bit);
607 static int is_registered(const struct mem_controller *ctrl)
609 /* Test to see if we are dealing with registered SDRAM.
610 * If we are not registered we are unbuffered.
611 * This function must be called after spd_handle_unbuffered_dimms.
614 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
615 return !(dcl & DCL_UnBufDimm);
623 #if K8_4RANK_DIMM_SUPPORT == 1
628 static struct dimm_size spd_get_dimm_size(unsigned device)
630 /* Calculate the log base 2 size of a DIMM in bits */
637 #if K8_4RANK_DIMM_SUPPORT == 1
641 /* Note it might be easier to use byte 31 here, it has the DIMM size as
642 * a multiple of 4MB. The way we do it now we can size both
643 * sides of an assymetric dimm.
645 value = spd_read_byte(device, 3); /* rows */
646 if (value < 0) goto hw_err;
647 if ((value & 0xf) == 0) goto val_err;
648 sz.side1 += value & 0xf;
649 sz.rows = value & 0xf;
651 value = spd_read_byte(device, 4); /* columns */
652 if (value < 0) goto hw_err;
653 if ((value & 0xf) == 0) goto val_err;
654 sz.side1 += value & 0xf;
655 sz.col = value & 0xf;
657 value = spd_read_byte(device, 17); /* banks */
658 if (value < 0) goto hw_err;
659 if ((value & 0xff) == 0) goto val_err;
660 sz.side1 += log2(value & 0xff);
662 /* Get the module data width and convert it to a power of two */
663 value = spd_read_byte(device, 7); /* (high byte) */
664 if (value < 0) goto hw_err;
668 low = spd_read_byte(device, 6); /* (low byte) */
669 if (low < 0) goto hw_err;
670 value = value | (low & 0xff);
671 if ((value != 72) && (value != 64)) goto val_err;
672 sz.side1 += log2(value);
675 value = spd_read_byte(device, 5); /* number of physical banks */
676 if (value < 0) goto hw_err;
677 if (value == 1) goto out;
678 if ((value != 2) && (value != 4 )) {
681 #if K8_4RANK_DIMM_SUPPORT == 1
685 /* Start with the symmetrical case */
688 value = spd_read_byte(device, 3); /* rows */
689 if (value < 0) goto hw_err;
690 if ((value & 0xf0) == 0) goto out; /* If symmetrical we are done */
691 sz.side2 -= (value & 0x0f); /* Subtract out rows on side 1 */
692 sz.side2 += ((value >> 4) & 0x0f); /* Add in rows on side 2 */
694 value = spd_read_byte(device, 4); /* columns */
695 if (value < 0) goto hw_err;
696 if ((value & 0xff) == 0) goto val_err;
697 sz.side2 -= (value & 0x0f); /* Subtract out columns on side 1 */
698 sz.side2 += ((value >> 4) & 0x0f); /* Add in columsn on side 2 */
703 die("Bad SPD value\r\n");
704 /* If an hw_error occurs report that I have no memory */
710 #if K8_4RANK_DIMM_SUPPORT == 1
718 static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
720 uint32_t base0, base1;
723 if (sz.side1 != sz.side2) {
727 /* For each base register.
728 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
729 * The initialize dimm size is in bits.
730 * Set the base enable bit0.
735 /* Make certain side1 of the dimm is at least 32MB */
736 if (sz.side1 >= (25 +3)) {
737 base0 = (1 << ((sz.side1 - (25 + 3)) + 21)) | 1;
740 /* Make certain side2 of the dimm is at least 32MB */
741 if (sz.side2 >= (25 + 3)) {
742 base1 = (1 << ((sz.side2 - (25 + 3)) + 21)) | 1;
745 /* Double the size if we are using dual channel memory */
746 if (is_dual_channel(ctrl)) {
747 base0 = (base0 << 1) | (base0 & 1);
748 base1 = (base1 << 1) | (base1 & 1);
751 /* Clear the reserved bits */
752 base0 &= ~0x001ffffe;
753 base1 &= ~0x001ffffe;
755 /* Set the appropriate DIMM base address register */
756 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
757 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
758 #if K8_4RANK_DIMM_SUPPORT == 1
760 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
761 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
765 /* Enable the memory clocks for this DIMM */
767 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
768 dch |= DCH_MEMCLK_EN0 << index;
769 #if K8_4RANK_DIMM_SUPPORT == 1
771 dch |= DCH_MEMCLK_EN0 << (index + 2);
774 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
778 static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
780 static const unsigned cs_map_aa[] = {
781 /* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
790 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
791 map &= ~(0xf << (index * 4));
792 #if K8_4RANK_DIMM_SUPPORT == 1
794 map &= ~(0xf << ( (index + 2) * 4));
799 /* Make certain side1 of the dimm is at least 32MB */
800 if (sz.side1 >= (25 +3)) {
801 if(is_cpu_pre_d0()) {
802 map |= (sz.side1 - (25 + 3)) << (index *4);
803 #if K8_4RANK_DIMM_SUPPORT == 1
805 map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
810 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
811 #if K8_4RANK_DIMM_SUPPORT == 1
813 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
819 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
823 static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
827 for(i = 0; i < DIMM_SOCKETS; i++) {
829 if (!(dimm_mask & (1 << i))) {
832 sz = spd_get_dimm_size(ctrl->channel0[i]);
834 return -1; /* Report SPD error */
836 set_dimm_size(ctrl, sz, i);
837 set_dimm_map (ctrl, sz, i);
842 static void route_dram_accesses(const struct mem_controller *ctrl,
843 unsigned long base_k, unsigned long limit_k)
845 /* Route the addresses to the controller node */
850 unsigned limit_reg, base_reg;
853 node_id = ctrl->node_id;
854 index = (node_id << 3);
855 limit = (limit_k << 2);
858 limit |= ( 0 << 8) | (node_id << 0);
859 base = (base_k << 2);
861 base |= (0 << 8) | (1<<1) | (1<<0);
863 limit_reg = 0x44 + index;
864 base_reg = 0x40 + index;
865 for(device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1); device += PCI_DEV(0, 1, 0)) {
866 pci_write_config32(device, limit_reg, limit);
867 pci_write_config32(device, base_reg, base);
871 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
873 /* Error if I don't have memory */
878 /* Report the amount of memory. */
879 print_spew("RAM: 0x");
880 print_spew_hex32(tom_k);
881 print_spew(" KB\r\n");
883 /* Now set top of memory */
885 if(tom_k>(4*1024*1024)) {
886 msr.lo = (tom_k & 0x003fffff) << 10;
887 msr.hi = (tom_k & 0xffc00000) >> 22;
888 wrmsr(TOP_MEM2, msr);
891 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
892 * so I can see my rom chip and other I/O devices.
894 if (tom_k >= 0x003f0000) {
895 #if K8_HW_MEM_HOLE_SIZEK != 0
896 if(hole_startk != 0) {
902 msr.lo = (tom_k & 0x003fffff) << 10;
903 msr.hi = (tom_k & 0xffc00000) >> 22;
907 static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
910 static const uint8_t csbase_low_shift[] = {
913 /* 128MB */ (14 - 4),
914 /* 256MB */ (15 - 4),
915 /* 512MB */ (15 - 4),
920 static const uint8_t csbase_low_d0_shift[] = {
923 /* 128MB */ (14 - 4),
924 /* 128MB */ (15 - 4),
925 /* 256MB */ (15 - 4),
926 /* 512MB */ (15 - 4),
927 /* 256MB */ (16 - 4),
928 /* 512MB */ (16 - 4),
934 /* cs_base_high is not changed */
937 int chip_selects, index;
939 unsigned common_size;
940 unsigned common_cs_mode;
941 uint32_t csbase, csmask;
943 /* See if all of the memory chip selects are the same size
944 * and if so count them.
949 for(index = 0; index < 8; index++) {
954 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
962 if (common_size == 0) {
965 /* The size differed fail */
966 if (common_size != size) {
970 value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
971 cs_mode =( value >> ((index>>1)*4)) & 0xf;
972 if(cs_mode == 0 ) continue;
973 if(common_cs_mode == 0) {
974 common_cs_mode = cs_mode;
976 /* The size differed fail */
977 if(common_cs_mode != cs_mode) {
982 /* Chip selects can only be interleaved when there is
983 * more than one and their is a power of two of them.
985 bits = log2(chip_selects);
986 if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) {
990 /* Find the bits of csbase that we need to interleave on */
992 csbase_inc = 1 << csbase_low_shift[common_cs_mode];
993 if(is_dual_channel(ctrl)) {
994 /* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
995 if ((bits == 3) && (common_size == (1 << (32 - 3)))) {
996 // print_debug("8 4GB chip selects cannot be interleaved\r\n");
1003 csbase_inc = 1 << csbase_low_d0_shift[common_cs_mode];
1004 if(is_dual_channel(ctrl)) {
1005 if( (bits==3) && (common_cs_mode > 8)) {
1006 // print_debug("8 cs_mode>8 chip selects cannot be interleaved\r\n");
1013 /* Compute the initial values for csbase and csbask.
1014 * In csbase just set the enable bit and the base to zero.
1015 * In csmask set the mask bits for the size and page level interleave.
1018 csmask = (((common_size << bits) - 1) << 21);
1019 csmask |= 0xfe00 & ~((csbase_inc << bits) - csbase_inc);
1020 for(index = 0; index < 8; index++) {
1023 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1024 /* Is it enabled? */
1028 pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
1029 pci_write_config32(ctrl->f2, DRAM_CSMASK + (index << 2), csmask);
1030 csbase += csbase_inc;
1033 print_spew("Interleaved\r\n");
1035 /* Return the memory size in K */
1036 return common_size << (15 + bits);
1039 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
1043 /* Remember which registers we have used in the high 8 bits of tom */
1046 /* Find the largest remaining canidate */
1047 unsigned index, canidate;
1048 uint32_t csbase, csmask;
1052 for(index = 0; index < 8; index++) {
1054 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1056 /* Is it enabled? */
1061 /* Is it greater? */
1062 if (value <= csbase) {
1066 /* Has it already been selected */
1067 if (tom & (1 << (index + 24))) {
1070 /* I have a new canidate */
1074 /* See if I have found a new canidate */
1079 /* Remember the dimm size */
1080 size = csbase >> 21;
1082 /* Remember I have used this register */
1083 tom |= (1 << (canidate + 24));
1085 /* Recompute the cs base register value */
1086 csbase = (tom << 21) | 1;
1088 /* Increment the top of memory */
1091 /* Compute the memory mask */
1092 csmask = ((size -1) << 21);
1093 csmask |= 0xfe00; /* For now don't optimize */
1095 /* Write the new base register */
1096 pci_write_config32(ctrl->f2, DRAM_CSBASE + (canidate << 2), csbase);
1097 /* Write the new mask register */
1098 pci_write_config32(ctrl->f2, DRAM_CSMASK + (canidate << 2), csmask);
1101 /* Return the memory size in K */
1102 return (tom & ~0xff000000) << 15;
1105 unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
1109 /* Find the last memory address used */
1111 for(node_id = 0; node_id < max_node_id; node_id++) {
1112 uint32_t limit, base;
1114 index = node_id << 3;
1115 base = pci_read_config32(ctrl->f1, 0x40 + index);
1116 /* Only look at the limit if the base is enabled */
1117 if ((base & 3) == 3) {
1118 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1119 end_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1125 static void order_dimms(const struct mem_controller *ctrl)
1127 unsigned long tom_k, base_k;
1129 if (read_option(CMOS_VSTART_interleave_chip_selects, CMOS_VLEN_interleave_chip_selects, 1) != 0) {
1130 tom_k = interleave_chip_selects(ctrl);
1132 print_debug("Interleaving disabled\r\n");
1136 tom_k = order_chip_selects(ctrl);
1138 /* Compute the memory base address */
1139 base_k = memory_end_k(ctrl, ctrl->node_id);
1141 route_dram_accesses(ctrl, base_k, tom_k);
1142 set_top_mem(tom_k, 0);
1145 static long disable_dimm(const struct mem_controller *ctrl, unsigned index, long dimm_mask)
1147 print_debug("disabling dimm");
1148 print_debug_hex8(index);
1149 print_debug("\r\n");
1150 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), 0);
1151 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), 0);
1152 dimm_mask &= ~(1 << index);
1156 static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl, long dimm_mask)
1164 for(i = 0; (i < DIMM_SOCKETS); i++) {
1166 if (!(dimm_mask & (1 << i))) {
1169 value = spd_read_byte(ctrl->channel0[i], 21);
1173 /* Registered dimm ? */
1174 if (value & (1 << 1)) {
1177 /* Otherwise it must be an unbuffered dimm */
1182 if (unbuffered && registered) {
1183 die("Mixed buffered and registered dimms not supported");
1186 //By yhlu for debug Athlon64 939 can do dual channel, but it use unbuffer DIMM
1187 if (unbuffered && is_opteron(ctrl)) {
1188 die("Unbuffered Dimms not supported on Opteron");
1192 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1193 dcl &= ~DCL_UnBufDimm;
1195 dcl |= DCL_UnBufDimm;
1197 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1199 if (is_registered(ctrl)) {
1200 print_debug("Registered\r\n");
1202 print_debug("Unbuffered\r\n");
1208 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
1213 for(i = 0; i < DIMM_SOCKETS; i++) {
1216 device = ctrl->channel0[i];
1218 byte = spd_read_byte(ctrl->channel0[i], 2); /* Type */
1220 dimm_mask |= (1 << i);
1223 device = ctrl->channel1[i];
1225 byte = spd_read_byte(ctrl->channel1[i], 2);
1227 dimm_mask |= (1 << (i + DIMM_SOCKETS));
1234 static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_mask)
1238 /* SPD addresses to verify are identical */
1239 static const uint8_t addresses[] = {
1240 2, /* Type should be DDR SDRAM */
1241 3, /* *Row addresses */
1242 4, /* *Column addresses */
1243 5, /* *Physical Banks */
1244 6, /* *Module Data Width low */
1245 7, /* *Module Data Width high */
1246 9, /* *Cycle time at highest CAS Latency CL=X */
1247 11, /* *SDRAM Type */
1248 13, /* *SDRAM Width */
1249 17, /* *Logical Banks */
1250 18, /* *Supported CAS Latencies */
1251 21, /* *SDRAM Module Attributes */
1252 23, /* *Cycle time at CAS Latnecy (CLX - 0.5) */
1253 26, /* *Cycle time at CAS Latnecy (CLX - 1.0) */
1254 27, /* *tRP Row precharge time */
1255 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1256 29, /* *tRCD RAS to CAS */
1257 30, /* *tRAS Activate to Precharge */
1258 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1259 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1261 /* If the dimms are not in pairs do not do dual channels */
1262 if ((dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1263 ((dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1264 goto single_channel;
1266 /* If the cpu is not capable of doing dual channels don't do dual channels */
1267 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1268 if (!(nbcap & NBCAP_128Bit)) {
1269 goto single_channel;
1271 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1272 unsigned device0, device1;
1275 /* If I don't have a dimm skip this one */
1276 if (!(dimm_mask & (1 << i))) {
1279 device0 = ctrl->channel0[i];
1280 device1 = ctrl->channel1[i];
1281 for(j = 0; j < sizeof(addresses)/sizeof(addresses[0]); j++) {
1283 addr = addresses[j];
1284 value0 = spd_read_byte(device0, addr);
1288 value1 = spd_read_byte(device1, addr);
1292 if (value0 != value1) {
1293 goto single_channel;
1297 print_spew("Enabling dual channel memory\r\n");
1299 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1300 dcl &= ~DCL_32ByteEn;
1301 dcl |= DCL_128BitEn;
1302 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1305 dimm_mask &= ~((1 << (DIMM_SOCKETS *2)) - (1 << DIMM_SOCKETS));
1311 uint8_t divisor; /* In 1/2 ns increments */
1314 uint32_t dch_memclk;
1315 uint16_t dch_tref4k, dch_tref8k;
1320 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1322 static const struct mem_param speed[] = {
1324 .name = "100Mhz\r\n",
1326 .divisor = (10 <<1),
1329 .dch_memclk = DCH_MEMCLK_100MHZ << DCH_MEMCLK_SHIFT,
1330 .dch_tref4k = DTH_TREF_100MHZ_4K,
1331 .dch_tref8k = DTH_TREF_100MHZ_8K,
1335 .name = "133Mhz\r\n",
1337 .divisor = (7<<1)+1,
1340 .dch_memclk = DCH_MEMCLK_133MHZ << DCH_MEMCLK_SHIFT,
1341 .dch_tref4k = DTH_TREF_133MHZ_4K,
1342 .dch_tref8k = DTH_TREF_133MHZ_8K,
1346 .name = "166Mhz\r\n",
1351 .dch_memclk = DCH_MEMCLK_166MHZ << DCH_MEMCLK_SHIFT,
1352 .dch_tref4k = DTH_TREF_166MHZ_4K,
1353 .dch_tref8k = DTH_TREF_166MHZ_8K,
1357 .name = "200Mhz\r\n",
1362 .dch_memclk = DCH_MEMCLK_200MHZ << DCH_MEMCLK_SHIFT,
1363 .dch_tref4k = DTH_TREF_200MHZ_4K,
1364 .dch_tref8k = DTH_TREF_200MHZ_8K,
1371 const struct mem_param *param;
1372 for(param = &speed[0]; param->cycle_time ; param++) {
1373 if (min_cycle_time > (param+1)->cycle_time) {
1377 if (!param->cycle_time) {
1378 die("min_cycle_time to low");
1380 print_spew(param->name);
1381 #ifdef DRAM_MIN_CYCLE_TIME
1382 print_debug(param->name);
1387 struct spd_set_memclk_result {
1388 const struct mem_param *param;
1391 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask)
1393 /* Compute the minimum cycle time for these dimms */
1394 struct spd_set_memclk_result result;
1395 unsigned min_cycle_time, min_latency, bios_cycle_time;
1399 static const uint8_t latency_indicies[] = { 26, 23, 9 };
1400 static const unsigned char min_cycle_times[] = {
1401 [NBCAP_MEMCLK_200MHZ] = 0x50, /* 5ns */
1402 [NBCAP_MEMCLK_166MHZ] = 0x60, /* 6ns */
1403 [NBCAP_MEMCLK_133MHZ] = 0x75, /* 7.5ns */
1404 [NBCAP_MEMCLK_100MHZ] = 0xa0, /* 10ns */
1408 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1409 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1410 bios_cycle_time = min_cycle_times[
1411 read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
1412 if (bios_cycle_time > min_cycle_time) {
1413 min_cycle_time = bios_cycle_time;
1417 /* Compute the least latency with the fastest clock supported
1418 * by both the memory controller and the dimms.
1420 for(i = 0; i < DIMM_SOCKETS; i++) {
1421 int new_cycle_time, new_latency;
1426 if (!(dimm_mask & (1 << i))) {
1430 /* First find the supported CAS latencies
1431 * Byte 18 for DDR SDRAM is interpreted:
1432 * bit 0 == CAS Latency = 1.0
1433 * bit 1 == CAS Latency = 1.5
1434 * bit 2 == CAS Latency = 2.0
1435 * bit 3 == CAS Latency = 2.5
1436 * bit 4 == CAS Latency = 3.0
1437 * bit 5 == CAS Latency = 3.5
1441 new_cycle_time = 0xa0;
1444 latencies = spd_read_byte(ctrl->channel0[i], 18);
1445 if (latencies <= 0) continue;
1447 /* Compute the lowest cas latency supported */
1448 latency = log2(latencies) -2;
1450 /* Loop through and find a fast clock with a low latency */
1451 for(index = 0; index < 3; index++, latency++) {
1453 if ((latency < 2) || (latency > 4) ||
1454 (!(latencies & (1 << latency)))) {
1457 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1462 /* Only increase the latency if we decreas the clock */
1463 if ((value >= min_cycle_time) && (value < new_cycle_time)) {
1464 new_cycle_time = value;
1465 new_latency = latency;
1468 if (new_latency > 4){
1471 /* Does min_latency need to be increased? */
1472 if (new_cycle_time > min_cycle_time) {
1473 min_cycle_time = new_cycle_time;
1475 /* Does min_cycle_time need to be increased? */
1476 if (new_latency > min_latency) {
1477 min_latency = new_latency;
1480 /* Make a second pass through the dimms and disable
1481 * any that cannot support the selected memclk and cas latency.
1484 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1489 if (!(dimm_mask & (1 << i))) {
1492 latencies = spd_read_byte(ctrl->channel0[i], 18);
1493 if (latencies < 0) goto hw_error;
1494 if (latencies == 0) {
1498 /* Compute the lowest cas latency supported */
1499 latency = log2(latencies) -2;
1501 /* Walk through searching for the selected latency */
1502 for(index = 0; index < 3; index++, latency++) {
1503 if (!(latencies & (1 << latency))) {
1506 if (latency == min_latency)
1509 /* If I can't find the latency or my index is bad error */
1510 if ((latency != min_latency) || (index >= 3)) {
1514 /* Read the min_cycle_time for this latency */
1515 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1516 if (value < 0) goto hw_error;
1518 /* All is good if the selected clock speed
1519 * is what I need or slower.
1521 if (value <= min_cycle_time) {
1524 /* Otherwise I have an error, disable the dimm */
1526 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
1529 //down speed for full load 4 rank support
1530 #if K8_4RANK_DIMM_SUPPORT
1531 if(dimm_mask == (3|(3<<DIMM_SOCKETS)) ) {
1533 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1535 if (!(dimm_mask & (1 << i))) {
1538 val = spd_read_byte(ctrl->channel0[i], 5);
1545 if(min_cycle_time <= 0x50 ) {
1546 min_cycle_time = 0x60;
1553 /* Now that I know the minimum cycle time lookup the memory parameters */
1554 result.param = get_mem_param(min_cycle_time);
1556 /* Update DRAM Config High with our selected memory speed */
1557 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1558 value &= ~(DCH_MEMCLK_MASK << DCH_MEMCLK_SHIFT);
1560 /* Improves DQS centering by correcting for case when core speed multiplier and MEMCLK speed result in odd clock divisor, by selecting the next lowest memory speed, required only at DDR400 and higher speeds with certain DIMM loadings ---- cheating???*/
1561 if(!is_cpu_pre_e0()) {
1562 if(min_cycle_time==0x50) {
1568 value |= result.param->dch_memclk;
1569 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1571 static const unsigned latencies[] = { DTL_CL_2, DTL_CL_2_5, DTL_CL_3 };
1572 /* Update DRAM Timing Low with our selected cas latency */
1573 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1574 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1575 value |= latencies[min_latency - 2] << DTL_TCL_SHIFT;
1576 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1578 result.dimm_mask = dimm_mask;
1581 result.param = (const struct mem_param *)0;
1582 result.dimm_mask = -1;
1587 static int update_dimm_Trc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1589 unsigned clocks, old_clocks;
1592 value = spd_read_byte(ctrl->channel0[i], 41);
1593 if (value < 0) return -1;
1594 if ((value == 0) || (value == 0xff)) {
1597 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1598 if (clocks < DTL_TRC_MIN) {
1599 clocks = DTL_TRC_MIN;
1601 if (clocks > DTL_TRC_MAX) {
1605 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1606 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
1607 if (old_clocks > clocks) {
1608 clocks = old_clocks;
1610 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
1611 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
1612 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1616 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1618 unsigned clocks, old_clocks;
1621 value = spd_read_byte(ctrl->channel0[i], 42);
1622 if (value < 0) return -1;
1623 if ((value == 0) || (value == 0xff)) {
1624 value = param->tRFC;
1626 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1627 if (clocks < DTL_TRFC_MIN) {
1628 clocks = DTL_TRFC_MIN;
1630 if (clocks > DTL_TRFC_MAX) {
1633 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1634 old_clocks = ((dtl >> DTL_TRFC_SHIFT) & DTL_TRFC_MASK) + DTL_TRFC_BASE;
1635 if (old_clocks > clocks) {
1636 clocks = old_clocks;
1638 dtl &= ~(DTL_TRFC_MASK << DTL_TRFC_SHIFT);
1639 dtl |= ((clocks - DTL_TRFC_BASE) << DTL_TRFC_SHIFT);
1640 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1645 static int update_dimm_Trcd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1647 unsigned clocks, old_clocks;
1650 value = spd_read_byte(ctrl->channel0[i], 29);
1651 if (value < 0) return -1;
1652 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1653 if (clocks < DTL_TRCD_MIN) {
1654 clocks = DTL_TRCD_MIN;
1656 if (clocks > DTL_TRCD_MAX) {
1659 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1660 old_clocks = ((dtl >> DTL_TRCD_SHIFT) & DTL_TRCD_MASK) + DTL_TRCD_BASE;
1661 if (old_clocks > clocks) {
1662 clocks = old_clocks;
1664 dtl &= ~(DTL_TRCD_MASK << DTL_TRCD_SHIFT);
1665 dtl |= ((clocks - DTL_TRCD_BASE) << DTL_TRCD_SHIFT);
1666 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1670 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1672 unsigned clocks, old_clocks;
1675 value = spd_read_byte(ctrl->channel0[i], 28);
1676 if (value < 0) return -1;
1677 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1678 if (clocks < DTL_TRRD_MIN) {
1679 clocks = DTL_TRRD_MIN;
1681 if (clocks > DTL_TRRD_MAX) {
1684 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1685 old_clocks = ((dtl >> DTL_TRRD_SHIFT) & DTL_TRRD_MASK) + DTL_TRRD_BASE;
1686 if (old_clocks > clocks) {
1687 clocks = old_clocks;
1689 dtl &= ~(DTL_TRRD_MASK << DTL_TRRD_SHIFT);
1690 dtl |= ((clocks - DTL_TRRD_BASE) << DTL_TRRD_SHIFT);
1691 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1695 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1697 unsigned clocks, old_clocks;
1700 value = spd_read_byte(ctrl->channel0[i], 30);
1701 if (value < 0) return -1;
1702 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1703 if (clocks < DTL_TRAS_MIN) {
1704 clocks = DTL_TRAS_MIN;
1706 if (clocks > DTL_TRAS_MAX) {
1709 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1710 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
1711 if (old_clocks > clocks) {
1712 clocks = old_clocks;
1714 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
1715 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
1716 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1720 static int update_dimm_Trp(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1722 unsigned clocks, old_clocks;
1725 value = spd_read_byte(ctrl->channel0[i], 27);
1726 if (value < 0) return -1;
1727 clocks = (value + (param->divisor << 1) - 1)/(param->divisor << 1);
1728 if (clocks < DTL_TRP_MIN) {
1729 clocks = DTL_TRP_MIN;
1731 if (clocks > DTL_TRP_MAX) {
1734 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1735 old_clocks = ((dtl >> DTL_TRP_SHIFT) & DTL_TRP_MASK) + DTL_TRP_BASE;
1736 if (old_clocks > clocks) {
1737 clocks = old_clocks;
1739 dtl &= ~(DTL_TRP_MASK << DTL_TRP_SHIFT);
1740 dtl |= ((clocks - DTL_TRP_BASE) << DTL_TRP_SHIFT);
1741 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1745 static void set_Twr(const struct mem_controller *ctrl, const struct mem_param *param)
1748 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1749 dtl &= ~(DTL_TWR_MASK << DTL_TWR_SHIFT);
1750 dtl |= (param->dtl_twr - DTL_TWR_BASE) << DTL_TWR_SHIFT;
1751 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1755 static void init_Tref(const struct mem_controller *ctrl, const struct mem_param *param)
1758 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1759 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1760 dth |= (param->dch_tref4k << DTH_TREF_SHIFT);
1761 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1764 static int update_dimm_Tref(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1768 unsigned tref, old_tref;
1769 value = spd_read_byte(ctrl->channel0[i], 3);
1770 if (value < 0) return -1;
1773 tref = param->dch_tref8k;
1775 tref = param->dch_tref4k;
1778 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1779 old_tref = (dth >> DTH_TREF_SHIFT) & DTH_TREF_MASK;
1780 if ((value == 12) && (old_tref == param->dch_tref4k)) {
1781 tref = param->dch_tref4k;
1783 tref = param->dch_tref8k;
1785 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1786 dth |= (tref << DTH_TREF_SHIFT);
1787 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1792 static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1796 #if K8_4RANK_DIMM_SUPPORT == 1
1800 value = spd_read_byte(ctrl->channel0[i], 13);
1805 #if K8_4RANK_DIMM_SUPPORT == 1
1806 rank = spd_read_byte(ctrl->channel0[i], 5); /* number of physical banks */
1812 dimm = 1<<(DCL_x4DIMM_SHIFT+i);
1813 #if K8_4RANK_DIMM_SUPPORT == 1
1815 dimm |= 1<<(DCL_x4DIMM_SHIFT+i+2);
1818 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1823 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1827 static int update_dimm_ecc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1831 value = spd_read_byte(ctrl->channel0[i], 11);
1836 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1837 dcl &= ~DCL_DimmEccEn;
1838 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1843 static int count_dimms(const struct mem_controller *ctrl)
1848 for(index = 0; index < 8; index += 2) {
1850 csbase = pci_read_config32(ctrl->f2, (DRAM_CSBASE + (index << 2)));
1858 static void set_Twtr(const struct mem_controller *ctrl, const struct mem_param *param)
1862 clocks = 1; /* AMD says hard code this */
1863 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1864 dth &= ~(DTH_TWTR_MASK << DTH_TWTR_SHIFT);
1865 dth |= ((clocks - DTH_TWTR_BASE) << DTH_TWTR_SHIFT);
1866 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1869 static void set_Trwt(const struct mem_controller *ctrl, const struct mem_param *param)
1877 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1878 latency = (dtl >> DTL_TCL_SHIFT) & DTL_TCL_MASK;
1879 divisor = param->divisor;
1881 if (is_opteron(ctrl)) {
1882 if (latency == DTL_CL_2) {
1883 if (divisor == ((6 << 0) + 0)) {
1887 else if (divisor > ((6 << 0)+0)) {
1888 /* 100Mhz && 133Mhz */
1892 else if (latency == DTL_CL_2_5) {
1895 else if (latency == DTL_CL_3) {
1896 if (divisor == ((6 << 0)+0)) {
1900 else if (divisor > ((6 << 0)+0)) {
1901 /* 100Mhz && 133Mhz */
1906 else /* Athlon64 */ {
1907 if (is_registered(ctrl)) {
1908 if (latency == DTL_CL_2) {
1911 else if (latency == DTL_CL_2_5) {
1914 else if (latency == DTL_CL_3) {
1918 else /* Unbuffered */{
1919 if (latency == DTL_CL_2) {
1922 else if (latency == DTL_CL_2_5) {
1925 else if (latency == DTL_CL_3) {
1930 if ((clocks < DTH_TRWT_MIN) || (clocks > DTH_TRWT_MAX)) {
1931 die("Unknown Trwt\r\n");
1934 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1935 dth &= ~(DTH_TRWT_MASK << DTH_TRWT_SHIFT);
1936 dth |= ((clocks - DTH_TRWT_BASE) << DTH_TRWT_SHIFT);
1937 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1941 static void set_Twcl(const struct mem_controller *ctrl, const struct mem_param *param)
1943 /* Memory Clocks after CAS# */
1946 if (is_registered(ctrl)) {
1951 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1952 dth &= ~(DTH_TWCL_MASK << DTH_TWCL_SHIFT);
1953 dth |= ((clocks - DTH_TWCL_BASE) << DTH_TWCL_SHIFT);
1954 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1958 static void set_read_preamble(const struct mem_controller *ctrl, const struct mem_param *param)
1962 unsigned rdpreamble;
1963 divisor = param->divisor;
1964 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1965 dch &= ~(DCH_RDPREAMBLE_MASK << DCH_RDPREAMBLE_SHIFT);
1967 if (is_registered(ctrl)) {
1968 if (divisor == ((10 << 1)+0)) {
1970 rdpreamble = ((9 << 1)+ 0);
1972 else if (divisor == ((7 << 1)+1)) {
1974 rdpreamble = ((8 << 1)+0);
1976 else if (divisor == ((6 << 1)+0)) {
1978 rdpreamble = ((7 << 1)+1);
1980 else if (divisor == ((5 << 1)+0)) {
1982 rdpreamble = ((7 << 1)+0);
1989 for(i = 0; i < 4; i++) {
1990 if (ctrl->channel0[i]) {
1994 if (divisor == ((10 << 1)+0)) {
1998 rdpreamble = ((9 << 1)+0);
2001 rdpreamble = ((14 << 1)+0);
2004 else if (divisor == ((7 << 1)+1)) {
2008 rdpreamble = ((7 << 1)+0);
2011 rdpreamble = ((11 << 1)+0);
2014 else if (divisor == ((6 << 1)+0)) {
2018 rdpreamble = ((7 << 1)+0);
2021 rdpreamble = ((9 << 1)+0);
2024 else if (divisor == ((5 << 1)+0)) {
2028 rdpreamble = ((5 << 1)+0);
2031 rdpreamble = ((7 << 1)+0);
2035 if ((rdpreamble < DCH_RDPREAMBLE_MIN) || (rdpreamble > DCH_RDPREAMBLE_MAX)) {
2036 die("Unknown rdpreamble");
2038 dch |= (rdpreamble - DCH_RDPREAMBLE_BASE) << DCH_RDPREAMBLE_SHIFT;
2039 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2042 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
2048 dimms = count_dimms(ctrl);
2050 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2051 dch &= ~(DCH_ASYNC_LAT_MASK << DCH_ASYNC_LAT_SHIFT);
2053 if (is_registered(ctrl)) {
2065 die("Too many unbuffered dimms");
2067 else if (dimms == 3) {
2076 dch |= ((async_lat - DCH_ASYNC_LAT_BASE) << DCH_ASYNC_LAT_SHIFT);
2077 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2080 static void set_idle_cycle_limit(const struct mem_controller *ctrl, const struct mem_param *param)
2083 /* AMD says to Hardcode this */
2084 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2085 dch &= ~(DCH_IDLE_LIMIT_MASK << DCH_IDLE_LIMIT_SHIFT);
2086 dch |= DCH_IDLE_LIMIT_16 << DCH_IDLE_LIMIT_SHIFT;
2087 dch |= DCH_DYN_IDLE_CTR_EN;
2088 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2091 static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param, long dimm_mask)
2095 init_Tref(ctrl, param);
2096 for(i = 0; i < DIMM_SOCKETS; i++) {
2098 if (!(dimm_mask & (1 << i))) {
2101 /* DRAM Timing Low Register */
2102 if ((rc = update_dimm_Trc (ctrl, param, i)) <= 0) goto dimm_err;
2103 if ((rc = update_dimm_Trfc(ctrl, param, i)) <= 0) goto dimm_err;
2104 if ((rc = update_dimm_Trcd(ctrl, param, i)) <= 0) goto dimm_err;
2105 if ((rc = update_dimm_Trrd(ctrl, param, i)) <= 0) goto dimm_err;
2106 if ((rc = update_dimm_Tras(ctrl, param, i)) <= 0) goto dimm_err;
2107 if ((rc = update_dimm_Trp (ctrl, param, i)) <= 0) goto dimm_err;
2109 /* DRAM Timing High Register */
2110 if ((rc = update_dimm_Tref(ctrl, param, i)) <= 0) goto dimm_err;
2113 /* DRAM Config Low */
2114 if ((rc = update_dimm_x4 (ctrl, param, i)) <= 0) goto dimm_err;
2115 if ((rc = update_dimm_ecc(ctrl, param, i)) <= 0) goto dimm_err;
2121 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
2123 /* DRAM Timing Low Register */
2124 set_Twr(ctrl, param);
2126 /* DRAM Timing High Register */
2127 set_Twtr(ctrl, param);
2128 set_Trwt(ctrl, param);
2129 set_Twcl(ctrl, param);
2131 /* DRAM Config High */
2132 set_read_preamble(ctrl, param);
2133 set_max_async_latency(ctrl, param);
2134 set_idle_cycle_limit(ctrl, param);
2138 static void sdram_set_spd_registers(const struct mem_controller *ctrl)
2140 struct spd_set_memclk_result result;
2141 const struct mem_param *param;
2144 if (!controller_present(ctrl)) {
2145 // print_debug("No memory controller present\r\n");
2149 hw_enable_ecc(ctrl);
2150 activate_spd_rom(ctrl);
2151 dimm_mask = spd_detect_dimms(ctrl);
2152 if (!(dimm_mask & ((1 << DIMM_SOCKETS) - 1))) {
2153 print_debug("No memory for this cpu\r\n");
2156 dimm_mask = spd_enable_2channels(ctrl, dimm_mask);
2159 dimm_mask = spd_set_ram_size(ctrl , dimm_mask);
2162 dimm_mask = spd_handle_unbuffered_dimms(ctrl, dimm_mask);
2165 result = spd_set_memclk(ctrl, dimm_mask);
2166 param = result.param;
2167 dimm_mask = result.dimm_mask;
2170 dimm_mask = spd_set_dram_timing(ctrl, param , dimm_mask);
2176 /* Unrecoverable error reading SPD data */
2177 print_err("SPD error - reset\r\n");
2182 #if K8_HW_MEM_HOLE_SIZEK != 0
2183 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
2186 uint32_t carry_over;
2188 uint32_t base, limit;
2193 carry_over = (4*1024*1024) - hole_startk;
2195 for(ii=controllers - 1;ii>i;ii--) {
2196 base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
2197 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2200 limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
2201 for(j = 0; j < controllers; j++) {
2202 pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit + (carry_over << 2));
2203 pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base + (carry_over << 2));
2206 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2207 for(j = 0; j < controllers; j++) {
2208 pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit + (carry_over << 2));
2211 base = pci_read_config32(dev, 0x40 + (i << 3));
2212 basek = (base & 0xffff0000) >> 2;
2213 if(basek == hole_startk) {
2214 //don't need set memhole here, because hole off set will be 0, overflow
2215 //so need to change base reg instead, new basek will be 4*1024*1024
2217 base |= (4*1024*1024)<<2;
2218 for(j = 0; j < controllers; j++) {
2219 pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
2223 hoist = /* hole start address */
2224 ((hole_startk << 10) & 0xff000000) +
2225 /* hole address to memory controller address */
2226 (((basek + carry_over) >> 6) & 0x0000ff00) +
2229 pci_write_config32(dev, 0xf0, hoist);
2235 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
2238 uint32_t hole_startk;
2241 hole_startk = 4*1024*1024 - K8_HW_MEM_HOLE_SIZEK;
2243 #if K8_HW_MEM_HOLE_SIZE_AUTO_INC == 1
2244 //We need to double check if the hole_startk is valid, if it is equal to basek, we need to decrease it some
2246 for(i=0; i<controllers; i++) {
2249 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2250 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2253 base_k = (base & 0xffff0000) >> 2;
2254 if(base_k == hole_startk) {
2255 hole_startk -= (base_k - basek_pri)>>1; // decrease mem hole startk to make sure it is on middle of privous node
2256 break; //only one hole
2262 //find node index that need do set hole
2263 for(i=0; i<controllers; i++) {
2264 uint32_t base, limit;
2265 unsigned base_k, limit_k;
2266 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2267 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2270 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2271 base_k = (base & 0xffff0000) >> 2;
2272 limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
2273 if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
2275 hoist_memory(controllers, ctrl, hole_startk, i);
2276 end_k = memory_end_k(ctrl, controllers);
2277 set_top_mem(end_k, hole_startk);
2278 break; //only one hole
2286 #define TIMEOUT_LOOPS 300000
2287 static void sdram_enable(int controllers, const struct mem_controller *ctrl)
2291 /* Error if I don't have memory */
2292 if (memory_end_k(ctrl, controllers) == 0) {
2293 die("No memory\r\n");
2296 /* Before enabling memory start the memory clocks */
2297 for(i = 0; i < controllers; i++) {
2299 if (!controller_present(ctrl + i))
2301 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2302 if (dch & (DCH_MEMCLK_EN0|DCH_MEMCLK_EN1|DCH_MEMCLK_EN2|DCH_MEMCLK_EN3)) {
2303 dch |= DCH_MEMCLK_VALID;
2304 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2307 /* Disable dram receivers */
2309 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2310 dcl |= DCL_DisInRcvrs;
2311 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2315 /* And if necessary toggle the the reset on the dimms by hand */
2316 memreset(controllers, ctrl);
2318 for(i = 0; i < controllers; i++) {
2320 if (!controller_present(ctrl + i))
2322 /* Skip everything if I don't have any memory on this controller */
2323 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2324 if (!(dch & DCH_MEMCLK_VALID)) {
2328 /* Toggle DisDqsHys to get it working */
2329 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2330 if (dcl & DCL_DimmEccEn) {
2332 print_spew("ECC enabled\r\n");
2333 mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
2335 if (dcl & DCL_128BitEn) {
2336 mnc |= MNC_CHIPKILL_EN;
2338 pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
2340 dcl |= DCL_DisDqsHys;
2341 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2342 dcl &= ~DCL_DisDqsHys;
2343 dcl &= ~DCL_DLL_Disable;
2346 dcl |= DCL_DramInit;
2347 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2350 for(i = 0; i < controllers; i++) {
2352 if (!controller_present(ctrl + i))
2354 /* Skip everything if I don't have any memory on this controller */
2355 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2356 if (!(dch & DCH_MEMCLK_VALID)) {
2360 print_debug("Initializing memory: ");
2364 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2366 if ((loops & 1023) == 0) {
2369 } while(((dcl & DCL_DramInit) != 0) && (loops < TIMEOUT_LOOPS));
2370 if (loops >= TIMEOUT_LOOPS) {
2371 print_debug(" failed\r\n");
2375 if (!is_cpu_pre_c0()) {
2376 /* Wait until it is safe to touch memory */
2377 dcl &= ~(DCL_MemClrStatus | DCL_DramEnable);
2378 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2380 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2381 } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
2384 print_debug(" done\r\n");
2387 #if K8_HW_MEM_HOLE_SIZEK != 0
2388 // init hw mem hole here
2389 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
2390 if(!is_cpu_pre_e0())
2391 set_hw_mem_hole(controllers, ctrl);
2394 //FIXME add enable node interleaving here -- yhlu
2396 1. check how many nodes we have , if not all has ram installed get out
2397 2. check cs_base lo is 0, node 0 f2 0x40,,,,, if any one is not using lo is CS_BASE, get out
2398 3. check if other node is the same as node 0 about f2 0x40,,,,, otherwise get out
2399 4. if all ready enable node_interleaving in f1 0x40..... of every node
2400 5. for node interleaving we need to set mem hole to every node ( need recalcute hole offset in f0 for every node)
2403 #if USE_DCACHE_RAM == 0
2404 /* Make certain the first 1M of memory is intialized */
2405 print_debug("Clearing initial memory region: ");
2407 /* Use write combine caching while we setup the first 1M */
2408 cache_lbmem(MTRR_TYPE_WRCOMB);
2410 /* clear memory 1meg */
2411 clear_memory((void *)0, CONFIG_LB_MEM_TOPK << 10);
2413 /* The first 1M is now setup, use it */
2414 cache_lbmem(MTRR_TYPE_WRBACK);
2416 print_debug(" done\r\n");
2420 static int mem_inited(int controllers, const struct mem_controller *ctrl)
2425 unsigned mask_inited = 0;
2427 for(i = 0; i < controllers; i++) {
2429 if (!controller_present(ctrl + i))
2433 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2435 if (!is_cpu_pre_c0()) { // B3
2437 if( (dcl & DCL_MemClrStatus) && (dcl & DCL_DramEnable) ) {
2438 mask_inited |= (1<<i);
2443 if(mask == mask_inited) return 1;
2448 #if USE_DCACHE_RAM == 1
2449 static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a, const uint16_t *spd_addr)
2453 struct mem_controller *ctrl;
2454 for(i=0;i<controllers; i++) {
2457 ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
2458 ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
2459 ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
2460 ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
2462 if(spd_addr == (void *)0) continue;
2464 for(j=0;j<DIMM_SOCKETS;j++) {
2465 ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
2466 ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];