1 /* This should be done by Eric
2 2004.11 yhlu add 4 rank DIMM support
3 2004.12 yhlu add D0 support
4 2005.02 yhlu add E0 memory hole support
7 #include <cpu/x86/mem.h>
8 #include <cpu/x86/cache.h>
9 #include <cpu/x86/mtrr.h>
13 #if (CONFIG_LB_MEM_TOPK & (CONFIG_LB_MEM_TOPK -1)) != 0
14 # error "CONFIG_LB_MEM_TOPK must be a power of 2"
17 #ifndef K8_4RANK_DIMM_SUPPORT
18 #define K8_4RANK_DIMM_SUPPORT 0
22 static void setup_resource_map(const unsigned int *register_values, int max)
25 // print_debug("setting up resource map....");
29 for(i = 0; i < max; i += 3) {
35 prink_debug("%08x <- %08x\r\n", register_values[i], register_values[i+2]);
37 print_debug_hex32(register_values[i]);
39 print_debug_hex32(register_values[i+2]);
43 dev = register_values[i] & ~0xff;
44 where = register_values[i] & 0xff;
45 reg = pci_read_config32(dev, where);
46 reg &= register_values[i+1];
47 reg |= register_values[i+2];
48 pci_write_config32(dev, where, reg);
50 reg = pci_read_config32(register_values[i]);
51 reg &= register_values[i+1];
52 reg |= register_values[i+2] & ~register_values[i+1];
53 pci_write_config32(register_values[i], reg);
56 // print_debug("done.\r\n");
60 static int controller_present(const struct mem_controller *ctrl)
62 return pci_read_config32(ctrl->f0, 0) == 0x11001022;
65 static void sdram_set_registers(const struct mem_controller *ctrl)
67 static const unsigned int register_values[] = {
69 /* Careful set limit registers before base registers which contain the enables */
70 /* DRAM Limit i Registers
79 * [ 2: 0] Destination Node ID
89 * [10: 8] Interleave select
90 * specifies the values of A[14:12] to use with interleave enable.
92 * [31:16] DRAM Limit Address i Bits 39-24
93 * This field defines the upper address bits of a 40 bit address
94 * that define the end of the DRAM region.
96 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
97 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
98 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
99 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
100 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
101 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
102 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
103 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
104 /* DRAM Base i Registers
113 * [ 0: 0] Read Enable
116 * [ 1: 1] Write Enable
117 * 0 = Writes Disabled
120 * [10: 8] Interleave Enable
121 * 000 = No interleave
122 * 001 = Interleave on A[12] (2 nodes)
124 * 011 = Interleave on A[12] and A[14] (4 nodes)
128 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
130 * [13:16] DRAM Base Address i Bits 39-24
131 * This field defines the upper address bits of a 40-bit address
132 * that define the start of the DRAM region.
134 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
135 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
136 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
137 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
138 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
139 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
140 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
141 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
143 /* DRAM CS Base Address i Registers
152 * [ 0: 0] Chip-Select Bank Enable
156 * [15: 9] Base Address (19-13)
157 * An optimization used when all DIMM are the same size...
159 * [31:21] Base Address (35-25)
160 * This field defines the top 11 addresses bit of a 40-bit
161 * address that define the memory address space. These
162 * bits decode 32-MByte blocks of memory.
164 PCI_ADDR(0, 0x18, 2, 0x40), 0x001f01fe, 0x00000000,
165 PCI_ADDR(0, 0x18, 2, 0x44), 0x001f01fe, 0x00000000,
166 PCI_ADDR(0, 0x18, 2, 0x48), 0x001f01fe, 0x00000000,
167 PCI_ADDR(0, 0x18, 2, 0x4C), 0x001f01fe, 0x00000000,
168 PCI_ADDR(0, 0x18, 2, 0x50), 0x001f01fe, 0x00000000,
169 PCI_ADDR(0, 0x18, 2, 0x54), 0x001f01fe, 0x00000000,
170 PCI_ADDR(0, 0x18, 2, 0x58), 0x001f01fe, 0x00000000,
171 PCI_ADDR(0, 0x18, 2, 0x5C), 0x001f01fe, 0x00000000,
172 /* DRAM CS Mask Address i Registers
181 * Select bits to exclude from comparison with the DRAM Base address register.
183 * [15: 9] Address Mask (19-13)
184 * Address to be excluded from the optimized case
186 * [29:21] Address Mask (33-25)
187 * The bits with an address mask of 1 are excluded from address comparison
191 PCI_ADDR(0, 0x18, 2, 0x60), 0xC01f01ff, 0x00000000,
192 PCI_ADDR(0, 0x18, 2, 0x64), 0xC01f01ff, 0x00000000,
193 PCI_ADDR(0, 0x18, 2, 0x68), 0xC01f01ff, 0x00000000,
194 PCI_ADDR(0, 0x18, 2, 0x6C), 0xC01f01ff, 0x00000000,
195 PCI_ADDR(0, 0x18, 2, 0x70), 0xC01f01ff, 0x00000000,
196 PCI_ADDR(0, 0x18, 2, 0x74), 0xC01f01ff, 0x00000000,
197 PCI_ADDR(0, 0x18, 2, 0x78), 0xC01f01ff, 0x00000000,
198 PCI_ADDR(0, 0x18, 2, 0x7C), 0xC01f01ff, 0x00000000,
199 /* DRAM Bank Address Mapping Register
201 * Specify the memory module size
206 * 000 = 32Mbyte (Rows = 12 & Col = 8)
207 * 001 = 64Mbyte (Rows = 12 & Col = 9)
208 * 010 = 128Mbyte (Rows = 13 & Col = 9)|(Rows = 12 & Col = 10)
209 * 011 = 256Mbyte (Rows = 13 & Col = 10)|(Rows = 12 & Col = 11)
210 * 100 = 512Mbyte (Rows = 13 & Col = 11)|(Rows = 14 & Col = 10)
211 * 101 = 1Gbyte (Rows = 14 & Col = 11)|(Rows = 13 & Col = 12)
212 * 110 = 2Gbyte (Rows = 14 & Col = 12)
219 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff8888, 0x00000000,
220 /* DRAM Timing Low Register
222 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
232 * [ 7: 4] Trc (Row Cycle Time, Ras#-active to Ras#-active/bank auto refresh)
233 * 0000 = 7 bus clocks
234 * 0001 = 8 bus clocks
236 * 1110 = 21 bus clocks
237 * 1111 = 22 bus clocks
238 * [11: 8] Trfc (Row refresh Cycle time, Auto-refresh-active to RAS#-active or RAS#auto-refresh)
239 * 0000 = 9 bus clocks
240 * 0010 = 10 bus clocks
242 * 1110 = 23 bus clocks
243 * 1111 = 24 bus clocks
244 * [14:12] Trcd (Ras#-active to Case#-read/write Delay)
254 * [18:16] Trrd (Ras# to Ras# Delay)
264 * [23:20] Tras (Minmum Ras# Active Time)
265 * 0000 to 0100 = reserved
266 * 0101 = 5 bus clocks
268 * 1111 = 15 bus clocks
269 * [26:24] Trp (Row Precharge Time)
279 * [28:28] Twr (Write Recovery Time)
284 PCI_ADDR(0, 0x18, 2, 0x88), 0xe8088008, 0x02522001 /* 0x03623125 */ ,
285 /* DRAM Timing High Register
287 * [ 0: 0] Twtr (Write to Read Delay)
291 * [ 6: 4] Trwt (Read to Write Delay)
301 * [12: 8] Tref (Refresh Rate)
302 * 00000 = 100Mhz 4K rows
303 * 00001 = 133Mhz 4K rows
304 * 00010 = 166Mhz 4K rows
305 * 00011 = 200Mhz 4K rows
306 * 01000 = 100Mhz 8K/16K rows
307 * 01001 = 133Mhz 8K/16K rows
308 * 01010 = 166Mhz 8K/16K rows
309 * 01011 = 200Mhz 8K/16K rows
311 * [22:20] Twcl (Write CAS Latency)
312 * 000 = 1 Mem clock after CAS# (Unbuffered Dimms)
313 * 001 = 2 Mem clocks after CAS# (Registered Dimms)
316 PCI_ADDR(0, 0x18, 2, 0x8c), 0xff8fe08e, (0 << 20)|(0 << 8)|(0 << 4)|(0 << 0),
317 /* DRAM Config Low Register
319 * [ 0: 0] DLL Disable
328 * [ 3: 3] Disable DQS Hystersis (FIXME handle this one carefully)
329 * 0 = Enable DQS input filter
330 * 1 = Disable DQS input filtering
333 * 0 = Initialization done or not yet started.
334 * 1 = Initiate DRAM intialization sequence
335 * [ 9: 9] SO-Dimm Enable
337 * 1 = SO-Dimms present
339 * 0 = DRAM not enabled
340 * 1 = DRAM initialized and enabled
341 * [11:11] Memory Clear Status
342 * 0 = Memory Clear function has not completed
343 * 1 = Memory Clear function has completed
344 * [12:12] Exit Self-Refresh
345 * 0 = Exit from self-refresh done or not yet started
346 * 1 = DRAM exiting from self refresh
347 * [13:13] Self-Refresh Status
348 * 0 = Normal Operation
349 * 1 = Self-refresh mode active
350 * [15:14] Read/Write Queue Bypass Count
355 * [16:16] 128-bit/64-Bit
356 * 0 = 64bit Interface to DRAM
357 * 1 = 128bit Interface to DRAM
358 * [17:17] DIMM ECC Enable
359 * 0 = Some DIMMs do not have ECC
360 * 1 = ALL DIMMS have ECC bits
361 * [18:18] UnBuffered DIMMs
363 * 1 = Unbuffered DIMMS
364 * [19:19] Enable 32-Byte Granularity
365 * 0 = Optimize for 64byte bursts
366 * 1 = Optimize for 32byte bursts
367 * [20:20] DIMM 0 is x4
368 * [21:21] DIMM 1 is x4
369 * [22:22] DIMM 2 is x4
370 * [23:23] DIMM 3 is x4
372 * 1 = x4 DIMM present
373 * [24:24] Disable DRAM Receivers
374 * 0 = Receivers enabled
375 * 1 = Receivers disabled
377 * 000 = Arbiters chois is always respected
378 * 001 = Oldest entry in DCQ can be bypassed 1 time
379 * 010 = Oldest entry in DCQ can be bypassed 2 times
380 * 011 = Oldest entry in DCQ can be bypassed 3 times
381 * 100 = Oldest entry in DCQ can be bypassed 4 times
382 * 101 = Oldest entry in DCQ can be bypassed 5 times
383 * 110 = Oldest entry in DCQ can be bypassed 6 times
384 * 111 = Oldest entry in DCQ can be bypassed 7 times
387 PCI_ADDR(0, 0x18, 2, 0x90), 0xf0000000,
389 (0 << 23)|(0 << 22)|(0 << 21)|(0 << 20)|
390 (1 << 19)|(0 << 18)|(1 << 17)|(0 << 16)|
391 (2 << 14)|(0 << 13)|(0 << 12)|
392 (0 << 11)|(0 << 10)|(0 << 9)|(0 << 8)|
393 (0 << 3) |(0 << 1) |(0 << 0),
394 /* DRAM Config High Register
396 * [ 0: 3] Maximum Asynchronous Latency
401 * [11: 8] Read Preamble
419 * [18:16] Idle Cycle Limit
428 * [19:19] Dynamic Idle Cycle Center Enable
429 * 0 = Use Idle Cycle Limit
430 * 1 = Generate a dynamic Idle cycle limit
431 * [22:20] DRAM MEMCLK Frequency
441 * [25:25] Memory Clock Ratio Valid (FIXME carefully enable memclk)
442 * 0 = Disable MemClks
444 * [26:26] Memory Clock 0 Enable
447 * [27:27] Memory Clock 1 Enable
450 * [28:28] Memory Clock 2 Enable
453 * [29:29] Memory Clock 3 Enable
458 PCI_ADDR(0, 0x18, 2, 0x94), 0xc180f0f0,
459 (0 << 29)|(0 << 28)|(0 << 27)|(0 << 26)|(0 << 25)|
460 (0 << 20)|(0 << 19)|(DCH_IDLE_LIMIT_16 << 16)|(0 << 8)|(0 << 0),
461 /* DRAM Delay Line Register
463 * Adjust the skew of the input DQS strobe relative to DATA
465 * [23:16] Delay Line Adjust
466 * Adjusts the DLL derived PDL delay by one or more delay stages
467 * in either the faster or slower direction.
468 * [24:24} Adjust Slower
470 * 1 = Adj is used to increase the PDL delay
471 * [25:25] Adjust Faster
473 * 1 = Adj is used to decrease the PDL delay
476 PCI_ADDR(0, 0x18, 2, 0x98), 0xfc00ffff, 0x00000000,
477 /* DRAM Scrub Control Register
479 * [ 4: 0] DRAM Scrube Rate
481 * [12: 8] L2 Scrub Rate
483 * [20:16] Dcache Scrub
486 * 00000 = Do not scrub
508 * All Others = Reserved
510 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
511 /* DRAM Scrub Address Low Register
513 * [ 0: 0] DRAM Scrubber Redirect Enable
515 * 1 = Scrubber Corrects errors found in normal operation
517 * [31: 6] DRAM Scrub Address 31-6
519 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
520 /* DRAM Scrub Address High Register
522 * [ 7: 0] DRAM Scrubb Address 39-32
525 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
531 if (!controller_present(ctrl)) {
532 // print_debug("No memory controller present\r\n");
536 print_spew("setting up CPU");
537 print_spew_hex8(ctrl->node_id);
538 print_spew(" northbridge registers\r\n");
539 max = sizeof(register_values)/sizeof(register_values[0]);
540 for(i = 0; i < max; i += 3) {
546 prink_debug("%08x <- %08x\r\n", register_values[i], register_values[i+2]);
548 print_spew_hex32(register_values[i]);
550 print_spew_hex32(register_values[i+2]);
554 dev = (register_values[i] & ~0xff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
555 where = register_values[i] & 0xff;
556 reg = pci_read_config32(dev, where);
557 reg &= register_values[i+1];
558 reg |= register_values[i+2];
559 pci_write_config32(dev, where, reg);
562 reg = pci_read_config32(register_values[i]);
563 reg &= register_values[i+1];
564 reg |= register_values[i+2];
565 pci_write_config32(register_values[i], reg);
568 print_spew("done.\r\n");
572 static void hw_enable_ecc(const struct mem_controller *ctrl)
575 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
576 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
577 dcl &= ~DCL_DimmEccEn;
578 if (nbcap & NBCAP_ECC) {
579 dcl |= DCL_DimmEccEn;
581 if (read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
582 dcl &= ~DCL_DimmEccEn;
584 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
588 static int is_dual_channel(const struct mem_controller *ctrl)
591 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
592 return dcl & DCL_128BitEn;
595 static int is_opteron(const struct mem_controller *ctrl)
597 /* Test to see if I am an Opteron.
598 * FIXME Testing dual channel capability is correct for now
599 * but a beter test is probably required.
601 #warning "FIXME implement a better test for opterons"
603 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
604 return !!(nbcap & NBCAP_128Bit);
607 static int is_registered(const struct mem_controller *ctrl)
609 /* Test to see if we are dealing with registered SDRAM.
610 * If we are not registered we are unbuffered.
611 * This function must be called after spd_handle_unbuffered_dimms.
614 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
615 return !(dcl & DCL_UnBufDimm);
623 #if K8_4RANK_DIMM_SUPPORT == 1
628 static struct dimm_size spd_get_dimm_size(unsigned device)
630 /* Calculate the log base 2 size of a DIMM in bits */
637 #if K8_4RANK_DIMM_SUPPORT == 1
641 /* Note it might be easier to use byte 31 here, it has the DIMM size as
642 * a multiple of 4MB. The way we do it now we can size both
643 * sides of an assymetric dimm.
645 value = spd_read_byte(device, 3); /* rows */
646 if (value < 0) goto hw_err;
647 if ((value & 0xf) == 0) goto val_err;
648 sz.side1 += value & 0xf;
649 sz.rows = value & 0xf;
651 value = spd_read_byte(device, 4); /* columns */
652 if (value < 0) goto hw_err;
653 if ((value & 0xf) == 0) goto val_err;
654 sz.side1 += value & 0xf;
655 sz.col = value & 0xf;
657 value = spd_read_byte(device, 17); /* banks */
658 if (value < 0) goto hw_err;
659 if ((value & 0xff) == 0) goto val_err;
660 sz.side1 += log2(value & 0xff);
662 /* Get the module data width and convert it to a power of two */
663 value = spd_read_byte(device, 7); /* (high byte) */
664 if (value < 0) goto hw_err;
668 low = spd_read_byte(device, 6); /* (low byte) */
669 if (low < 0) goto hw_err;
670 value = value | (low & 0xff);
671 if ((value != 72) && (value != 64)) goto val_err;
672 sz.side1 += log2(value);
675 value = spd_read_byte(device, 5); /* number of physical banks */
676 if (value < 0) goto hw_err;
677 if (value == 1) goto out;
678 if ((value != 2) && (value != 4 )) {
681 #if K8_4RANK_DIMM_SUPPORT == 1
685 /* Start with the symmetrical case */
688 value = spd_read_byte(device, 3); /* rows */
689 if (value < 0) goto hw_err;
690 if ((value & 0xf0) == 0) goto out; /* If symmetrical we are done */
691 sz.side2 -= (value & 0x0f); /* Subtract out rows on side 1 */
692 sz.side2 += ((value >> 4) & 0x0f); /* Add in rows on side 2 */
694 value = spd_read_byte(device, 4); /* columns */
695 if (value < 0) goto hw_err;
696 if ((value & 0xff) == 0) goto val_err;
697 sz.side2 -= (value & 0x0f); /* Subtract out columns on side 1 */
698 sz.side2 += ((value >> 4) & 0x0f); /* Add in columsn on side 2 */
703 die("Bad SPD value\r\n");
704 /* If an hw_error occurs report that I have no memory */
710 #if K8_4RANK_DIMM_SUPPORT == 1
718 static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
720 uint32_t base0, base1;
723 if (sz.side1 != sz.side2) {
727 /* For each base register.
728 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
729 * The initialize dimm size is in bits.
730 * Set the base enable bit0.
735 /* Make certain side1 of the dimm is at least 32MB */
736 if (sz.side1 >= (25 +3)) {
737 base0 = (1 << ((sz.side1 - (25 + 3)) + 21)) | 1;
740 /* Make certain side2 of the dimm is at least 32MB */
741 if (sz.side2 >= (25 + 3)) {
742 base1 = (1 << ((sz.side2 - (25 + 3)) + 21)) | 1;
745 /* Double the size if we are using dual channel memory */
746 if (is_dual_channel(ctrl)) {
747 base0 = (base0 << 1) | (base0 & 1);
748 base1 = (base1 << 1) | (base1 & 1);
751 /* Clear the reserved bits */
752 base0 &= ~0x001ffffe;
753 base1 &= ~0x001ffffe;
755 /* Set the appropriate DIMM base address register */
756 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
757 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
758 #if K8_4RANK_DIMM_SUPPORT == 1
760 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
761 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
765 /* Enable the memory clocks for this DIMM */
767 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
768 dch |= DCH_MEMCLK_EN0 << index;
769 #if K8_4RANK_DIMM_SUPPORT == 1
771 dch |= DCH_MEMCLK_EN0 << (index + 2);
774 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
778 static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
780 static const unsigned cs_map_aa[] = {
781 /* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
790 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
791 map &= ~(0xf << (index * 4));
792 #if K8_4RANK_DIMM_SUPPORT == 1
794 map &= ~(0xf << ( (index + 2) * 4));
799 /* Make certain side1 of the dimm is at least 32MB */
800 if (sz.side1 >= (25 +3)) {
801 if(is_cpu_pre_d0()) {
802 map |= (sz.side1 - (25 + 3)) << (index *4);
803 #if K8_4RANK_DIMM_SUPPORT == 1
805 map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
810 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
811 #if K8_4RANK_DIMM_SUPPORT == 1
813 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
819 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
823 static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
827 for(i = 0; i < DIMM_SOCKETS; i++) {
829 if (!(dimm_mask & (1 << i))) {
832 sz = spd_get_dimm_size(ctrl->channel0[i]);
834 return -1; /* Report SPD error */
836 set_dimm_size(ctrl, sz, i);
837 set_dimm_map (ctrl, sz, i);
842 static void route_dram_accesses(const struct mem_controller *ctrl,
843 unsigned long base_k, unsigned long limit_k)
845 /* Route the addresses to the controller node */
850 unsigned limit_reg, base_reg;
853 node_id = ctrl->node_id;
854 index = (node_id << 3);
855 limit = (limit_k << 2);
858 limit |= ( 0 << 8) | (node_id << 0);
859 base = (base_k << 2);
861 base |= (0 << 8) | (1<<1) | (1<<0);
863 limit_reg = 0x44 + index;
864 base_reg = 0x40 + index;
865 for(device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1); device += PCI_DEV(0, 1, 0)) {
866 pci_write_config32(device, limit_reg, limit);
867 pci_write_config32(device, base_reg, base);
871 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
873 /* Error if I don't have memory */
878 /* Report the amount of memory. */
879 print_spew("RAM: 0x");
880 print_spew_hex32(tom_k);
881 print_spew(" KB\r\n");
883 /* Now set top of memory */
885 msr.lo = (tom_k & 0x003fffff) << 10;
886 msr.hi = (tom_k & 0xffc00000) >> 22;
887 wrmsr(TOP_MEM2, msr);
889 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
890 * so I can see my rom chip and other I/O devices.
892 if (tom_k >= 0x003f0000) {
893 #if K8_HW_MEM_HOLE_SIZEK != 0
894 if(hole_startk != 0) {
900 msr.lo = (tom_k & 0x003fffff) << 10;
901 msr.hi = (tom_k & 0xffc00000) >> 22;
905 static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
908 static const uint8_t csbase_low_shift[] = {
911 /* 128MB */ (14 - 4),
912 /* 256MB */ (15 - 4),
913 /* 512MB */ (15 - 4),
918 static const uint8_t csbase_low_d0_shift[] = {
921 /* 128MB */ (14 - 4),
922 /* 128MB */ (15 - 4),
923 /* 256MB */ (15 - 4),
924 /* 512MB */ (15 - 4),
925 /* 256MB */ (16 - 4),
926 /* 512MB */ (16 - 4),
932 /* cs_base_high is not changed */
935 int chip_selects, index;
937 unsigned common_size;
938 unsigned common_cs_mode;
939 uint32_t csbase, csmask;
941 /* See if all of the memory chip selects are the same size
942 * and if so count them.
947 for(index = 0; index < 8; index++) {
952 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
960 if (common_size == 0) {
963 /* The size differed fail */
964 if (common_size != size) {
968 value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
969 cs_mode =( value >> ((index>>1)*4)) & 0xf;
970 if(cs_mode == 0 ) continue;
971 if(common_cs_mode == 0) {
972 common_cs_mode = cs_mode;
974 /* The size differed fail */
975 if(common_cs_mode != cs_mode) {
980 /* Chip selects can only be interleaved when there is
981 * more than one and their is a power of two of them.
983 bits = log2(chip_selects);
984 if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) {
988 /* Find the bits of csbase that we need to interleave on */
990 csbase_inc = 1 << csbase_low_shift[common_cs_mode];
991 if(is_dual_channel(ctrl)) {
992 /* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
993 if ((bits == 3) && (common_size == (1 << (32 - 3)))) {
994 // print_debug("8 4GB chip selects cannot be interleaved\r\n");
1001 csbase_inc = 1 << csbase_low_d0_shift[common_cs_mode];
1002 if(is_dual_channel(ctrl)) {
1003 if( (bits==3) && (common_cs_mode > 8)) {
1004 // print_debug("8 cs_mode>8 chip selects cannot be interleaved\r\n");
1011 /* Compute the initial values for csbase and csbask.
1012 * In csbase just set the enable bit and the base to zero.
1013 * In csmask set the mask bits for the size and page level interleave.
1016 csmask = (((common_size << bits) - 1) << 21);
1017 csmask |= 0xfe00 & ~((csbase_inc << bits) - csbase_inc);
1018 for(index = 0; index < 8; index++) {
1021 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1022 /* Is it enabled? */
1026 pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
1027 pci_write_config32(ctrl->f2, DRAM_CSMASK + (index << 2), csmask);
1028 csbase += csbase_inc;
1031 print_spew("Interleaved\r\n");
1033 /* Return the memory size in K */
1034 return common_size << (15 + bits);
1037 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
1041 /* Remember which registers we have used in the high 8 bits of tom */
1044 /* Find the largest remaining canidate */
1045 unsigned index, canidate;
1046 uint32_t csbase, csmask;
1050 for(index = 0; index < 8; index++) {
1052 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1054 /* Is it enabled? */
1059 /* Is it greater? */
1060 if (value <= csbase) {
1064 /* Has it already been selected */
1065 if (tom & (1 << (index + 24))) {
1068 /* I have a new canidate */
1072 /* See if I have found a new canidate */
1077 /* Remember the dimm size */
1078 size = csbase >> 21;
1080 /* Remember I have used this register */
1081 tom |= (1 << (canidate + 24));
1083 /* Recompute the cs base register value */
1084 csbase = (tom << 21) | 1;
1086 /* Increment the top of memory */
1089 /* Compute the memory mask */
1090 csmask = ((size -1) << 21);
1091 csmask |= 0xfe00; /* For now don't optimize */
1093 /* Write the new base register */
1094 pci_write_config32(ctrl->f2, DRAM_CSBASE + (canidate << 2), csbase);
1095 /* Write the new mask register */
1096 pci_write_config32(ctrl->f2, DRAM_CSMASK + (canidate << 2), csmask);
1099 /* Return the memory size in K */
1100 return (tom & ~0xff000000) << 15;
1103 unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
1107 /* Find the last memory address used */
1109 for(node_id = 0; node_id < max_node_id; node_id++) {
1110 uint32_t limit, base;
1112 index = node_id << 3;
1113 base = pci_read_config32(ctrl->f1, 0x40 + index);
1114 /* Only look at the limit if the base is enabled */
1115 if ((base & 3) == 3) {
1116 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1117 end_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1123 static void order_dimms(const struct mem_controller *ctrl)
1125 unsigned long tom_k, base_k;
1127 if (read_option(CMOS_VSTART_interleave_chip_selects, CMOS_VLEN_interleave_chip_selects, 1) != 0) {
1128 tom_k = interleave_chip_selects(ctrl);
1130 print_debug("Interleaving disabled\r\n");
1134 tom_k = order_chip_selects(ctrl);
1136 /* Compute the memory base address */
1137 base_k = memory_end_k(ctrl, ctrl->node_id);
1139 route_dram_accesses(ctrl, base_k, tom_k);
1140 set_top_mem(tom_k, 0);
1143 static long disable_dimm(const struct mem_controller *ctrl, unsigned index, long dimm_mask)
1145 print_debug("disabling dimm");
1146 print_debug_hex8(index);
1147 print_debug("\r\n");
1148 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), 0);
1149 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), 0);
1150 dimm_mask &= ~(1 << index);
1154 static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl, long dimm_mask)
1162 for(i = 0; (i < DIMM_SOCKETS); i++) {
1164 if (!(dimm_mask & (1 << i))) {
1167 value = spd_read_byte(ctrl->channel0[i], 21);
1171 /* Registered dimm ? */
1172 if (value & (1 << 1)) {
1175 /* Otherwise it must be an unbuffered dimm */
1180 if (unbuffered && registered) {
1181 die("Mixed buffered and registered dimms not supported");
1184 //By yhlu for debug Athlon64 939 can do dual channel, but it use unbuffer DIMM
1185 if (unbuffered && is_opteron(ctrl)) {
1186 die("Unbuffered Dimms not supported on Opteron");
1190 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1191 dcl &= ~DCL_UnBufDimm;
1193 dcl |= DCL_UnBufDimm;
1195 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1197 if (is_registered(ctrl)) {
1198 print_debug("Registered\r\n");
1200 print_debug("Unbuffered\r\n");
1206 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
1211 for(i = 0; i < DIMM_SOCKETS; i++) {
1214 device = ctrl->channel0[i];
1216 byte = spd_read_byte(ctrl->channel0[i], 2); /* Type */
1218 dimm_mask |= (1 << i);
1221 device = ctrl->channel1[i];
1223 byte = spd_read_byte(ctrl->channel1[i], 2);
1225 dimm_mask |= (1 << (i + DIMM_SOCKETS));
1232 static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_mask)
1236 /* SPD addresses to verify are identical */
1237 static const uint8_t addresses[] = {
1238 2, /* Type should be DDR SDRAM */
1239 3, /* *Row addresses */
1240 4, /* *Column addresses */
1241 5, /* *Physical Banks */
1242 6, /* *Module Data Width low */
1243 7, /* *Module Data Width high */
1244 9, /* *Cycle time at highest CAS Latency CL=X */
1245 11, /* *SDRAM Type */
1246 13, /* *SDRAM Width */
1247 17, /* *Logical Banks */
1248 18, /* *Supported CAS Latencies */
1249 21, /* *SDRAM Module Attributes */
1250 23, /* *Cycle time at CAS Latnecy (CLX - 0.5) */
1251 26, /* *Cycle time at CAS Latnecy (CLX - 1.0) */
1252 27, /* *tRP Row precharge time */
1253 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1254 29, /* *tRCD RAS to CAS */
1255 30, /* *tRAS Activate to Precharge */
1256 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1257 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1259 /* If the dimms are not in pairs do not do dual channels */
1260 if ((dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1261 ((dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1262 goto single_channel;
1264 /* If the cpu is not capable of doing dual channels don't do dual channels */
1265 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1266 if (!(nbcap & NBCAP_128Bit)) {
1267 goto single_channel;
1269 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1270 unsigned device0, device1;
1273 /* If I don't have a dimm skip this one */
1274 if (!(dimm_mask & (1 << i))) {
1277 device0 = ctrl->channel0[i];
1278 device1 = ctrl->channel1[i];
1279 for(j = 0; j < sizeof(addresses)/sizeof(addresses[0]); j++) {
1281 addr = addresses[j];
1282 value0 = spd_read_byte(device0, addr);
1286 value1 = spd_read_byte(device1, addr);
1290 if (value0 != value1) {
1291 goto single_channel;
1295 print_spew("Enabling dual channel memory\r\n");
1297 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1298 dcl &= ~DCL_32ByteEn;
1299 dcl |= DCL_128BitEn;
1300 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1303 dimm_mask &= ~((1 << (DIMM_SOCKETS *2)) - (1 << DIMM_SOCKETS));
1309 uint8_t divisor; /* In 1/2 ns increments */
1312 uint32_t dch_memclk;
1313 uint16_t dch_tref4k, dch_tref8k;
1318 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1320 static const struct mem_param speed[] = {
1322 .name = "100Mhz\r\n",
1324 .divisor = (10 <<1),
1327 .dch_memclk = DCH_MEMCLK_100MHZ << DCH_MEMCLK_SHIFT,
1328 .dch_tref4k = DTH_TREF_100MHZ_4K,
1329 .dch_tref8k = DTH_TREF_100MHZ_8K,
1333 .name = "133Mhz\r\n",
1335 .divisor = (7<<1)+1,
1338 .dch_memclk = DCH_MEMCLK_133MHZ << DCH_MEMCLK_SHIFT,
1339 .dch_tref4k = DTH_TREF_133MHZ_4K,
1340 .dch_tref8k = DTH_TREF_133MHZ_8K,
1344 .name = "166Mhz\r\n",
1349 .dch_memclk = DCH_MEMCLK_166MHZ << DCH_MEMCLK_SHIFT,
1350 .dch_tref4k = DTH_TREF_166MHZ_4K,
1351 .dch_tref8k = DTH_TREF_166MHZ_8K,
1355 .name = "200Mhz\r\n",
1360 .dch_memclk = DCH_MEMCLK_200MHZ << DCH_MEMCLK_SHIFT,
1361 .dch_tref4k = DTH_TREF_200MHZ_4K,
1362 .dch_tref8k = DTH_TREF_200MHZ_8K,
1369 const struct mem_param *param;
1370 for(param = &speed[0]; param->cycle_time ; param++) {
1371 if (min_cycle_time > (param+1)->cycle_time) {
1375 if (!param->cycle_time) {
1376 die("min_cycle_time to low");
1378 print_spew(param->name);
1379 #ifdef DRAM_MIN_CYCLE_TIME
1380 print_debug(param->name);
1385 struct spd_set_memclk_result {
1386 const struct mem_param *param;
1389 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask)
1391 /* Compute the minimum cycle time for these dimms */
1392 struct spd_set_memclk_result result;
1393 unsigned min_cycle_time, min_latency, bios_cycle_time;
1397 static const uint8_t latency_indicies[] = { 26, 23, 9 };
1398 static const unsigned char min_cycle_times[] = {
1399 [NBCAP_MEMCLK_200MHZ] = 0x50, /* 5ns */
1400 [NBCAP_MEMCLK_166MHZ] = 0x60, /* 6ns */
1401 [NBCAP_MEMCLK_133MHZ] = 0x75, /* 7.5ns */
1402 [NBCAP_MEMCLK_100MHZ] = 0xa0, /* 10ns */
1406 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1407 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1408 bios_cycle_time = min_cycle_times[
1409 read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
1410 if (bios_cycle_time > min_cycle_time) {
1411 min_cycle_time = bios_cycle_time;
1415 /* Compute the least latency with the fastest clock supported
1416 * by both the memory controller and the dimms.
1418 for(i = 0; i < DIMM_SOCKETS; i++) {
1419 int new_cycle_time, new_latency;
1424 if (!(dimm_mask & (1 << i))) {
1428 /* First find the supported CAS latencies
1429 * Byte 18 for DDR SDRAM is interpreted:
1430 * bit 0 == CAS Latency = 1.0
1431 * bit 1 == CAS Latency = 1.5
1432 * bit 2 == CAS Latency = 2.0
1433 * bit 3 == CAS Latency = 2.5
1434 * bit 4 == CAS Latency = 3.0
1435 * bit 5 == CAS Latency = 3.5
1439 new_cycle_time = 0xa0;
1442 latencies = spd_read_byte(ctrl->channel0[i], 18);
1443 if (latencies <= 0) continue;
1445 /* Compute the lowest cas latency supported */
1446 latency = log2(latencies) -2;
1448 /* Loop through and find a fast clock with a low latency */
1449 for(index = 0; index < 3; index++, latency++) {
1451 if ((latency < 2) || (latency > 4) ||
1452 (!(latencies & (1 << latency)))) {
1455 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1460 /* Only increase the latency if we decreas the clock */
1461 if ((value >= min_cycle_time) && (value < new_cycle_time)) {
1462 new_cycle_time = value;
1463 new_latency = latency;
1466 if (new_latency > 4){
1469 /* Does min_latency need to be increased? */
1470 if (new_cycle_time > min_cycle_time) {
1471 min_cycle_time = new_cycle_time;
1473 /* Does min_cycle_time need to be increased? */
1474 if (new_latency > min_latency) {
1475 min_latency = new_latency;
1478 /* Make a second pass through the dimms and disable
1479 * any that cannot support the selected memclk and cas latency.
1482 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1487 if (!(dimm_mask & (1 << i))) {
1490 latencies = spd_read_byte(ctrl->channel0[i], 18);
1491 if (latencies < 0) goto hw_error;
1492 if (latencies == 0) {
1496 /* Compute the lowest cas latency supported */
1497 latency = log2(latencies) -2;
1499 /* Walk through searching for the selected latency */
1500 for(index = 0; index < 3; index++, latency++) {
1501 if (!(latencies & (1 << latency))) {
1504 if (latency == min_latency)
1507 /* If I can't find the latency or my index is bad error */
1508 if ((latency != min_latency) || (index >= 3)) {
1512 /* Read the min_cycle_time for this latency */
1513 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1514 if (value < 0) goto hw_error;
1516 /* All is good if the selected clock speed
1517 * is what I need or slower.
1519 if (value <= min_cycle_time) {
1522 /* Otherwise I have an error, disable the dimm */
1524 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
1527 //down speed for full load 4 rank support
1528 #if K8_4RANK_DIMM_SUPPORT
1529 if(dimm_mask == (3|(3<<DIMM_SOCKETS)) ) {
1531 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1533 if (!(dimm_mask & (1 << i))) {
1536 val = spd_read_byte(ctrl->channel0[i], 5);
1543 if(min_cycle_time <= 0x50 ) {
1544 min_cycle_time = 0x60;
1551 /* Now that I know the minimum cycle time lookup the memory parameters */
1552 result.param = get_mem_param(min_cycle_time);
1554 /* Update DRAM Config High with our selected memory speed */
1555 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1556 value &= ~(DCH_MEMCLK_MASK << DCH_MEMCLK_SHIFT);
1558 /* Improves DQS centering by correcting for case when core speed multiplier and MEMCLK speed result in odd clock divisor, by selecting the next lowest memory speed, required only at DDR400 and higher speeds with certain DIMM loadings ---- cheating???*/
1559 if(!is_cpu_pre_e0()) {
1560 if(min_cycle_time==0x50) {
1566 value |= result.param->dch_memclk;
1567 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1569 static const unsigned latencies[] = { DTL_CL_2, DTL_CL_2_5, DTL_CL_3 };
1570 /* Update DRAM Timing Low with our selected cas latency */
1571 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1572 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1573 value |= latencies[min_latency - 2] << DTL_TCL_SHIFT;
1574 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1576 result.dimm_mask = dimm_mask;
1579 result.param = (const struct mem_param *)0;
1580 result.dimm_mask = -1;
1585 static int update_dimm_Trc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1587 unsigned clocks, old_clocks;
1590 value = spd_read_byte(ctrl->channel0[i], 41);
1591 if (value < 0) return -1;
1592 if ((value == 0) || (value == 0xff)) {
1595 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1596 if (clocks < DTL_TRC_MIN) {
1597 clocks = DTL_TRC_MIN;
1599 if (clocks > DTL_TRC_MAX) {
1603 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1604 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
1605 if (old_clocks > clocks) {
1606 clocks = old_clocks;
1608 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
1609 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
1610 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1614 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1616 unsigned clocks, old_clocks;
1619 value = spd_read_byte(ctrl->channel0[i], 42);
1620 if (value < 0) return -1;
1621 if ((value == 0) || (value == 0xff)) {
1622 value = param->tRFC;
1624 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1625 if (clocks < DTL_TRFC_MIN) {
1626 clocks = DTL_TRFC_MIN;
1628 if (clocks > DTL_TRFC_MAX) {
1631 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1632 old_clocks = ((dtl >> DTL_TRFC_SHIFT) & DTL_TRFC_MASK) + DTL_TRFC_BASE;
1633 if (old_clocks > clocks) {
1634 clocks = old_clocks;
1636 dtl &= ~(DTL_TRFC_MASK << DTL_TRFC_SHIFT);
1637 dtl |= ((clocks - DTL_TRFC_BASE) << DTL_TRFC_SHIFT);
1638 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1643 static int update_dimm_Trcd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1645 unsigned clocks, old_clocks;
1648 value = spd_read_byte(ctrl->channel0[i], 29);
1649 if (value < 0) return -1;
1650 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1651 if (clocks < DTL_TRCD_MIN) {
1652 clocks = DTL_TRCD_MIN;
1654 if (clocks > DTL_TRCD_MAX) {
1657 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1658 old_clocks = ((dtl >> DTL_TRCD_SHIFT) & DTL_TRCD_MASK) + DTL_TRCD_BASE;
1659 if (old_clocks > clocks) {
1660 clocks = old_clocks;
1662 dtl &= ~(DTL_TRCD_MASK << DTL_TRCD_SHIFT);
1663 dtl |= ((clocks - DTL_TRCD_BASE) << DTL_TRCD_SHIFT);
1664 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1668 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1670 unsigned clocks, old_clocks;
1673 value = spd_read_byte(ctrl->channel0[i], 28);
1674 if (value < 0) return -1;
1675 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1676 if (clocks < DTL_TRRD_MIN) {
1677 clocks = DTL_TRRD_MIN;
1679 if (clocks > DTL_TRRD_MAX) {
1682 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1683 old_clocks = ((dtl >> DTL_TRRD_SHIFT) & DTL_TRRD_MASK) + DTL_TRRD_BASE;
1684 if (old_clocks > clocks) {
1685 clocks = old_clocks;
1687 dtl &= ~(DTL_TRRD_MASK << DTL_TRRD_SHIFT);
1688 dtl |= ((clocks - DTL_TRRD_BASE) << DTL_TRRD_SHIFT);
1689 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1693 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1695 unsigned clocks, old_clocks;
1698 value = spd_read_byte(ctrl->channel0[i], 30);
1699 if (value < 0) return -1;
1700 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1701 if (clocks < DTL_TRAS_MIN) {
1702 clocks = DTL_TRAS_MIN;
1704 if (clocks > DTL_TRAS_MAX) {
1707 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1708 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
1709 if (old_clocks > clocks) {
1710 clocks = old_clocks;
1712 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
1713 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
1714 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1718 static int update_dimm_Trp(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1720 unsigned clocks, old_clocks;
1723 value = spd_read_byte(ctrl->channel0[i], 27);
1724 if (value < 0) return -1;
1725 clocks = (value + (param->divisor << 1) - 1)/(param->divisor << 1);
1726 if (clocks < DTL_TRP_MIN) {
1727 clocks = DTL_TRP_MIN;
1729 if (clocks > DTL_TRP_MAX) {
1732 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1733 old_clocks = ((dtl >> DTL_TRP_SHIFT) & DTL_TRP_MASK) + DTL_TRP_BASE;
1734 if (old_clocks > clocks) {
1735 clocks = old_clocks;
1737 dtl &= ~(DTL_TRP_MASK << DTL_TRP_SHIFT);
1738 dtl |= ((clocks - DTL_TRP_BASE) << DTL_TRP_SHIFT);
1739 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1743 static void set_Twr(const struct mem_controller *ctrl, const struct mem_param *param)
1746 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1747 dtl &= ~(DTL_TWR_MASK << DTL_TWR_SHIFT);
1748 dtl |= (param->dtl_twr - DTL_TWR_BASE) << DTL_TWR_SHIFT;
1749 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1753 static void init_Tref(const struct mem_controller *ctrl, const struct mem_param *param)
1756 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1757 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1758 dth |= (param->dch_tref4k << DTH_TREF_SHIFT);
1759 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1762 static int update_dimm_Tref(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1766 unsigned tref, old_tref;
1767 value = spd_read_byte(ctrl->channel0[i], 3);
1768 if (value < 0) return -1;
1771 tref = param->dch_tref8k;
1773 tref = param->dch_tref4k;
1776 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1777 old_tref = (dth >> DTH_TREF_SHIFT) & DTH_TREF_MASK;
1778 if ((value == 12) && (old_tref == param->dch_tref4k)) {
1779 tref = param->dch_tref4k;
1781 tref = param->dch_tref8k;
1783 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1784 dth |= (tref << DTH_TREF_SHIFT);
1785 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1790 static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1794 #if K8_4RANK_DIMM_SUPPORT == 1
1798 value = spd_read_byte(ctrl->channel0[i], 13);
1803 #if K8_4RANK_DIMM_SUPPORT == 1
1804 rank = spd_read_byte(ctrl->channel0[i], 5); /* number of physical banks */
1810 dimm = 1<<(DCL_x4DIMM_SHIFT+i);
1811 #if K8_4RANK_DIMM_SUPPORT == 1
1813 dimm |= 1<<(DCL_x4DIMM_SHIFT+i+2);
1816 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1821 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1825 static int update_dimm_ecc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1829 value = spd_read_byte(ctrl->channel0[i], 11);
1834 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1835 dcl &= ~DCL_DimmEccEn;
1836 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1841 static int count_dimms(const struct mem_controller *ctrl)
1846 for(index = 0; index < 8; index += 2) {
1848 csbase = pci_read_config32(ctrl->f2, (DRAM_CSBASE + (index << 2)));
1856 static void set_Twtr(const struct mem_controller *ctrl, const struct mem_param *param)
1860 clocks = 1; /* AMD says hard code this */
1861 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1862 dth &= ~(DTH_TWTR_MASK << DTH_TWTR_SHIFT);
1863 dth |= ((clocks - DTH_TWTR_BASE) << DTH_TWTR_SHIFT);
1864 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1867 static void set_Trwt(const struct mem_controller *ctrl, const struct mem_param *param)
1875 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1876 latency = (dtl >> DTL_TCL_SHIFT) & DTL_TCL_MASK;
1877 divisor = param->divisor;
1879 if (is_opteron(ctrl)) {
1880 if (latency == DTL_CL_2) {
1881 if (divisor == ((6 << 0) + 0)) {
1885 else if (divisor > ((6 << 0)+0)) {
1886 /* 100Mhz && 133Mhz */
1890 else if (latency == DTL_CL_2_5) {
1893 else if (latency == DTL_CL_3) {
1894 if (divisor == ((6 << 0)+0)) {
1898 else if (divisor > ((6 << 0)+0)) {
1899 /* 100Mhz && 133Mhz */
1904 else /* Athlon64 */ {
1905 if (is_registered(ctrl)) {
1906 if (latency == DTL_CL_2) {
1909 else if (latency == DTL_CL_2_5) {
1912 else if (latency == DTL_CL_3) {
1916 else /* Unbuffered */{
1917 if (latency == DTL_CL_2) {
1920 else if (latency == DTL_CL_2_5) {
1923 else if (latency == DTL_CL_3) {
1928 if ((clocks < DTH_TRWT_MIN) || (clocks > DTH_TRWT_MAX)) {
1929 die("Unknown Trwt\r\n");
1932 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1933 dth &= ~(DTH_TRWT_MASK << DTH_TRWT_SHIFT);
1934 dth |= ((clocks - DTH_TRWT_BASE) << DTH_TRWT_SHIFT);
1935 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1939 static void set_Twcl(const struct mem_controller *ctrl, const struct mem_param *param)
1941 /* Memory Clocks after CAS# */
1944 if (is_registered(ctrl)) {
1949 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1950 dth &= ~(DTH_TWCL_MASK << DTH_TWCL_SHIFT);
1951 dth |= ((clocks - DTH_TWCL_BASE) << DTH_TWCL_SHIFT);
1952 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1956 static void set_read_preamble(const struct mem_controller *ctrl, const struct mem_param *param)
1960 unsigned rdpreamble;
1961 divisor = param->divisor;
1962 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1963 dch &= ~(DCH_RDPREAMBLE_MASK << DCH_RDPREAMBLE_SHIFT);
1965 if (is_registered(ctrl)) {
1966 if (divisor == ((10 << 1)+0)) {
1968 rdpreamble = ((9 << 1)+ 0);
1970 else if (divisor == ((7 << 1)+1)) {
1972 rdpreamble = ((8 << 1)+0);
1974 else if (divisor == ((6 << 1)+0)) {
1976 rdpreamble = ((7 << 1)+1);
1978 else if (divisor == ((5 << 1)+0)) {
1980 rdpreamble = ((7 << 1)+0);
1987 for(i = 0; i < 4; i++) {
1988 if (ctrl->channel0[i]) {
1992 if (divisor == ((10 << 1)+0)) {
1996 rdpreamble = ((9 << 1)+0);
1999 rdpreamble = ((14 << 1)+0);
2002 else if (divisor == ((7 << 1)+1)) {
2006 rdpreamble = ((7 << 1)+0);
2009 rdpreamble = ((11 << 1)+0);
2012 else if (divisor == ((6 << 1)+0)) {
2016 rdpreamble = ((7 << 1)+0);
2019 rdpreamble = ((9 << 1)+0);
2022 else if (divisor == ((5 << 1)+0)) {
2026 rdpreamble = ((5 << 1)+0);
2029 rdpreamble = ((7 << 1)+0);
2033 if ((rdpreamble < DCH_RDPREAMBLE_MIN) || (rdpreamble > DCH_RDPREAMBLE_MAX)) {
2034 die("Unknown rdpreamble");
2036 dch |= (rdpreamble - DCH_RDPREAMBLE_BASE) << DCH_RDPREAMBLE_SHIFT;
2037 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2040 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
2046 dimms = count_dimms(ctrl);
2048 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2049 dch &= ~(DCH_ASYNC_LAT_MASK << DCH_ASYNC_LAT_SHIFT);
2051 if (is_registered(ctrl)) {
2063 die("Too many unbuffered dimms");
2065 else if (dimms == 3) {
2074 dch |= ((async_lat - DCH_ASYNC_LAT_BASE) << DCH_ASYNC_LAT_SHIFT);
2075 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2078 static void set_idle_cycle_limit(const struct mem_controller *ctrl, const struct mem_param *param)
2081 /* AMD says to Hardcode this */
2082 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2083 dch &= ~(DCH_IDLE_LIMIT_MASK << DCH_IDLE_LIMIT_SHIFT);
2084 dch |= DCH_IDLE_LIMIT_16 << DCH_IDLE_LIMIT_SHIFT;
2085 dch |= DCH_DYN_IDLE_CTR_EN;
2086 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2089 static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param, long dimm_mask)
2093 init_Tref(ctrl, param);
2094 for(i = 0; i < DIMM_SOCKETS; i++) {
2096 if (!(dimm_mask & (1 << i))) {
2099 /* DRAM Timing Low Register */
2100 if ((rc = update_dimm_Trc (ctrl, param, i)) <= 0) goto dimm_err;
2101 if ((rc = update_dimm_Trfc(ctrl, param, i)) <= 0) goto dimm_err;
2102 if ((rc = update_dimm_Trcd(ctrl, param, i)) <= 0) goto dimm_err;
2103 if ((rc = update_dimm_Trrd(ctrl, param, i)) <= 0) goto dimm_err;
2104 if ((rc = update_dimm_Tras(ctrl, param, i)) <= 0) goto dimm_err;
2105 if ((rc = update_dimm_Trp (ctrl, param, i)) <= 0) goto dimm_err;
2107 /* DRAM Timing High Register */
2108 if ((rc = update_dimm_Tref(ctrl, param, i)) <= 0) goto dimm_err;
2111 /* DRAM Config Low */
2112 if ((rc = update_dimm_x4 (ctrl, param, i)) <= 0) goto dimm_err;
2113 if ((rc = update_dimm_ecc(ctrl, param, i)) <= 0) goto dimm_err;
2119 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
2121 /* DRAM Timing Low Register */
2122 set_Twr(ctrl, param);
2124 /* DRAM Timing High Register */
2125 set_Twtr(ctrl, param);
2126 set_Trwt(ctrl, param);
2127 set_Twcl(ctrl, param);
2129 /* DRAM Config High */
2130 set_read_preamble(ctrl, param);
2131 set_max_async_latency(ctrl, param);
2132 set_idle_cycle_limit(ctrl, param);
2136 static void sdram_set_spd_registers(const struct mem_controller *ctrl)
2138 struct spd_set_memclk_result result;
2139 const struct mem_param *param;
2142 if (!controller_present(ctrl)) {
2143 // print_debug("No memory controller present\r\n");
2147 hw_enable_ecc(ctrl);
2148 activate_spd_rom(ctrl);
2149 dimm_mask = spd_detect_dimms(ctrl);
2150 if (!(dimm_mask & ((1 << DIMM_SOCKETS) - 1))) {
2151 print_debug("No memory for this cpu\r\n");
2154 dimm_mask = spd_enable_2channels(ctrl, dimm_mask);
2157 dimm_mask = spd_set_ram_size(ctrl , dimm_mask);
2160 dimm_mask = spd_handle_unbuffered_dimms(ctrl, dimm_mask);
2163 result = spd_set_memclk(ctrl, dimm_mask);
2164 param = result.param;
2165 dimm_mask = result.dimm_mask;
2168 dimm_mask = spd_set_dram_timing(ctrl, param , dimm_mask);
2174 /* Unrecoverable error reading SPD data */
2175 print_err("SPD error - reset\r\n");
2180 #if K8_HW_MEM_HOLE_SIZEK != 0
2181 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
2184 uint32_t carry_over;
2186 uint32_t base, limit;
2191 carry_over = (4*1024*1024) - hole_startk;
2193 for(ii=controllers - 1;ii>i;ii--) {
2194 base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
2195 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2198 limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
2199 for(j = 0; j < controllers; j++) {
2200 pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit + (carry_over << 2));
2201 pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base + (carry_over << 2));
2204 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2205 for(j = 0; j < controllers; j++) {
2206 pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit + (carry_over << 2));
2209 base = pci_read_config32(dev, 0x40 + (i << 3));
2210 basek = (base & 0xffff0000) >> 2;
2211 if(basek == hole_startk) {
2212 //don't need set memhole here, because hole off set will be 0, overflow
2213 //so need to change base reg instead, new basek will be 4*1024*1024
2215 base |= (4*1024*1024)<<2;
2216 for(j = 0; j < controllers; j++) {
2217 pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
2221 hoist = /* hole start address */
2222 ((hole_startk << 10) & 0xff000000) +
2223 /* hole address to memory controller address */
2224 (((basek + carry_over) >> 6) & 0x0000ff00) +
2227 pci_write_config32(dev, 0xf0, hoist);
2233 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
2236 uint32_t hole_startk;
2239 hole_startk = 4*1024*1024 - K8_HW_MEM_HOLE_SIZEK;
2241 #if K8_HW_MEM_HOLE_SIZE_AUTO_INC == 1
2242 //We need to double check if the hole_startk is valid, if it is equal to basek, we need to decrease it some
2244 for(i=0; i<controllers; i++) {
2247 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2248 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2251 base_k = (base & 0xffff0000) >> 2;
2252 if(base_k == hole_startk) {
2253 hole_startk -= (base_k - basek_pri)>>1; // decrease mem hole startk to make sure it is on middle of privous node
2254 break; //only one hole
2260 //find node index that need do set hole
2261 for(i=0; i<controllers; i++) {
2262 uint32_t base, limit;
2263 unsigned base_k, limit_k;
2264 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2265 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2268 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2269 base_k = (base & 0xffff0000) >> 2;
2270 limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
2271 if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
2273 hoist_memory(controllers, ctrl, hole_startk, i);
2274 end_k = memory_end_k(ctrl, controllers);
2275 set_top_mem(end_k, hole_startk);
2276 break; //only one hole
2284 #define TIMEOUT_LOOPS 300000
2285 static void sdram_enable(int controllers, const struct mem_controller *ctrl)
2289 /* Error if I don't have memory */
2290 if (memory_end_k(ctrl, controllers) == 0) {
2291 die("No memory\r\n");
2294 /* Before enabling memory start the memory clocks */
2295 for(i = 0; i < controllers; i++) {
2297 if (!controller_present(ctrl + i))
2299 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2300 if (dch & (DCH_MEMCLK_EN0|DCH_MEMCLK_EN1|DCH_MEMCLK_EN2|DCH_MEMCLK_EN3)) {
2301 dch |= DCH_MEMCLK_VALID;
2302 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2305 /* Disable dram receivers */
2307 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2308 dcl |= DCL_DisInRcvrs;
2309 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2313 /* And if necessary toggle the the reset on the dimms by hand */
2314 memreset(controllers, ctrl);
2316 for(i = 0; i < controllers; i++) {
2318 if (!controller_present(ctrl + i))
2320 /* Skip everything if I don't have any memory on this controller */
2321 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2322 if (!(dch & DCH_MEMCLK_VALID)) {
2326 /* Toggle DisDqsHys to get it working */
2327 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2328 if (dcl & DCL_DimmEccEn) {
2330 print_spew("ECC enabled\r\n");
2331 mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
2333 if (dcl & DCL_128BitEn) {
2334 mnc |= MNC_CHIPKILL_EN;
2336 pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
2338 dcl |= DCL_DisDqsHys;
2339 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2340 dcl &= ~DCL_DisDqsHys;
2341 dcl &= ~DCL_DLL_Disable;
2344 dcl |= DCL_DramInit;
2345 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2348 for(i = 0; i < controllers; i++) {
2350 if (!controller_present(ctrl + i))
2352 /* Skip everything if I don't have any memory on this controller */
2353 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2354 if (!(dch & DCH_MEMCLK_VALID)) {
2358 print_debug("Initializing memory: ");
2362 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2364 if ((loops & 1023) == 0) {
2367 } while(((dcl & DCL_DramInit) != 0) && (loops < TIMEOUT_LOOPS));
2368 if (loops >= TIMEOUT_LOOPS) {
2369 print_debug(" failed\r\n");
2373 if (!is_cpu_pre_c0()) {
2374 /* Wait until it is safe to touch memory */
2375 dcl &= ~(DCL_MemClrStatus | DCL_DramEnable);
2376 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2378 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2379 } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
2382 print_debug(" done\r\n");
2385 #if K8_HW_MEM_HOLE_SIZEK != 0
2386 // init hw mem hole here
2387 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
2388 if(!is_cpu_pre_e0())
2389 set_hw_mem_hole(controllers, ctrl);
2392 //FIXME add enable node interleaving here -- yhlu
2394 1. check how many nodes we have , if not all has ram installed get out
2395 2. check cs_base lo is 0, node 0 f2 0x40,,,,, if any one is not using lo is CS_BASE, get out
2396 3. check if other node is the same as node 0 about f2 0x40,,,,, otherwise get out
2397 4. if all ready enable node_interleaving in f1 0x40..... of every node
2398 5. for node interleaving we need to set mem hole to every node ( need recalcute hole offset in f0 for every node)
2401 #if USE_DCACHE_RAM == 0
2402 /* Make certain the first 1M of memory is intialized */
2403 print_debug("Clearing initial memory region: ");
2405 /* Use write combine caching while we setup the first 1M */
2406 cache_lbmem(MTRR_TYPE_WRCOMB);
2408 /* clear memory 1meg */
2409 clear_memory((void *)0, CONFIG_LB_MEM_TOPK << 10);
2411 /* The first 1M is now setup, use it */
2412 cache_lbmem(MTRR_TYPE_WRBACK);
2414 print_debug(" done\r\n");
2418 static int mem_inited(int controllers, const struct mem_controller *ctrl)
2423 unsigned mask_inited = 0;
2425 for(i = 0; i < controllers; i++) {
2427 if (!controller_present(ctrl + i))
2431 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2433 if (!is_cpu_pre_c0()) { // B3
2435 if( (dcl & DCL_MemClrStatus) && (dcl & DCL_DramEnable) ) {
2436 mask_inited |= (1<<i);
2441 if(mask == mask_inited) return 1;
2446 #if USE_DCACHE_RAM == 1
2447 static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a, const uint16_t *spd_addr)
2451 struct mem_controller *ctrl;
2452 for(i=0;i<controllers; i++) {
2455 ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
2456 ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
2457 ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
2458 ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
2460 if(spd_addr == (void *)0) continue;
2462 for(j=0;j<DIMM_SOCKETS;j++) {
2463 ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
2464 ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];