1 /* This should be done by Eric
2 2004.11 yhlu add 4 rank DIMM support
3 2004.12 yhlu add D0 support
4 2005.02 yhlu add E0 memory hole support
7 #include <cpu/x86/cache.h>
8 #include <cpu/x86/mtrr.h>
14 #if (CONFIG_RAMTOP & (CONFIG_RAMTOP -1)) != 0
15 # error "CONFIG_RAMTOP must be a power of 2"
18 #ifndef QRANK_DIMM_SUPPORT
19 #define QRANK_DIMM_SUPPORT 0
22 static void setup_resource_map(const unsigned int *register_values, int max)
25 // printk(BIOS_DEBUG, "setting up resource map....");
26 for (i = 0; i < max; i += 3) {
30 dev = register_values[i] & ~0xfff;
31 where = register_values[i] & 0xfff;
32 reg = pci_read_config32(dev, where);
33 reg &= register_values[i+1];
34 reg |= register_values[i+2];
35 pci_write_config32(dev, where, reg);
37 // printk(BIOS_DEBUG, "done.\n");
40 static int controller_present(const struct mem_controller *ctrl)
42 return pci_read_config32(ctrl->f0, 0) == 0x11001022;
45 #if RAMINIT_SYSINFO==1
46 static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
48 static void sdram_set_registers(const struct mem_controller *ctrl)
51 static const unsigned int register_values[] = {
53 /* Careful set limit registers before base registers which
54 contain the enables */
55 /* DRAM Limit i Registers
64 * [ 2: 0] Destination Node ID
74 * [10: 8] Interleave select
75 * specifies the values of A[14:12] to use with interleave enable.
77 * [31:16] DRAM Limit Address i Bits 39-24
78 * This field defines the upper address bits of a 40 bit address
79 * that define the end of the DRAM region.
81 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
82 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
83 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
84 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
85 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
86 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
87 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
88 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
89 /* DRAM Base i Registers
101 * [ 1: 1] Write Enable
102 * 0 = Writes Disabled
105 * [10: 8] Interleave Enable
106 * 000 = No interleave
107 * 001 = Interleave on A[12] (2 nodes)
109 * 011 = Interleave on A[12] and A[14] (4 nodes)
113 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
115 * [13:16] DRAM Base Address i Bits 39-24
116 * This field defines the upper address bits of a 40-bit address
117 * that define the start of the DRAM region.
119 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
120 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
121 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
122 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
123 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
124 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
125 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
126 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
128 /* DRAM CS Base Address i Registers
137 * [ 0: 0] Chip-Select Bank Enable
141 * [15: 9] Base Address (19-13)
142 * An optimization used when all DIMM are the same size...
144 * [31:21] Base Address (35-25)
145 * This field defines the top 11 addresses bit of a 40-bit
146 * address that define the memory address space. These
147 * bits decode 32-MByte blocks of memory.
149 PCI_ADDR(0, 0x18, 2, 0x40), 0x001f01fe, 0x00000000,
150 PCI_ADDR(0, 0x18, 2, 0x44), 0x001f01fe, 0x00000000,
151 PCI_ADDR(0, 0x18, 2, 0x48), 0x001f01fe, 0x00000000,
152 PCI_ADDR(0, 0x18, 2, 0x4C), 0x001f01fe, 0x00000000,
153 PCI_ADDR(0, 0x18, 2, 0x50), 0x001f01fe, 0x00000000,
154 PCI_ADDR(0, 0x18, 2, 0x54), 0x001f01fe, 0x00000000,
155 PCI_ADDR(0, 0x18, 2, 0x58), 0x001f01fe, 0x00000000,
156 PCI_ADDR(0, 0x18, 2, 0x5C), 0x001f01fe, 0x00000000,
157 /* DRAM CS Mask Address i Registers
166 * Select bits to exclude from comparison with the DRAM Base address register.
168 * [15: 9] Address Mask (19-13)
169 * Address to be excluded from the optimized case
171 * [29:21] Address Mask (33-25)
172 * The bits with an address mask of 1 are excluded from address comparison
176 PCI_ADDR(0, 0x18, 2, 0x60), 0xC01f01ff, 0x00000000,
177 PCI_ADDR(0, 0x18, 2, 0x64), 0xC01f01ff, 0x00000000,
178 PCI_ADDR(0, 0x18, 2, 0x68), 0xC01f01ff, 0x00000000,
179 PCI_ADDR(0, 0x18, 2, 0x6C), 0xC01f01ff, 0x00000000,
180 PCI_ADDR(0, 0x18, 2, 0x70), 0xC01f01ff, 0x00000000,
181 PCI_ADDR(0, 0x18, 2, 0x74), 0xC01f01ff, 0x00000000,
182 PCI_ADDR(0, 0x18, 2, 0x78), 0xC01f01ff, 0x00000000,
183 PCI_ADDR(0, 0x18, 2, 0x7C), 0xC01f01ff, 0x00000000,
184 /* DRAM Bank Address Mapping Register
186 * Specify the memory module size
191 * 000 = 32Mbyte (Rows = 12 & Col = 8)
192 * 001 = 64Mbyte (Rows = 12 & Col = 9)
193 * 010 = 128Mbyte (Rows = 13 & Col = 9)|(Rows = 12 & Col = 10)
194 * 011 = 256Mbyte (Rows = 13 & Col = 10)|(Rows = 12 & Col = 11)
195 * 100 = 512Mbyte (Rows = 13 & Col = 11)|(Rows = 14 & Col = 10)
196 * 101 = 1Gbyte (Rows = 14 & Col = 11)|(Rows = 13 & Col = 12)
197 * 110 = 2Gbyte (Rows = 14 & Col = 12)
204 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff8888, 0x00000000,
205 /* DRAM Timing Low Register
207 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
217 * [ 7: 4] Trc (Row Cycle Time, Ras#-active to Ras#-active/bank auto refresh)
218 * 0000 = 7 bus clocks
219 * 0001 = 8 bus clocks
221 * 1110 = 21 bus clocks
222 * 1111 = 22 bus clocks
223 * [11: 8] Trfc (Row refresh Cycle time, Auto-refresh-active to RAS#-active or RAS#auto-refresh)
224 * 0000 = 9 bus clocks
225 * 0010 = 10 bus clocks
227 * 1110 = 23 bus clocks
228 * 1111 = 24 bus clocks
229 * [14:12] Trcd (Ras#-active to Case#-read/write Delay)
239 * [18:16] Trrd (Ras# to Ras# Delay)
249 * [23:20] Tras (Minmum Ras# Active Time)
250 * 0000 to 0100 = reserved
251 * 0101 = 5 bus clocks
253 * 1111 = 15 bus clocks
254 * [26:24] Trp (Row Precharge Time)
264 * [28:28] Twr (Write Recovery Time)
269 PCI_ADDR(0, 0x18, 2, 0x88), 0xe8088008, 0x02522001 /* 0x03623125 */ ,
270 /* DRAM Timing High Register
272 * [ 0: 0] Twtr (Write to Read Delay)
276 * [ 6: 4] Trwt (Read to Write Delay)
286 * [12: 8] Tref (Refresh Rate)
287 * 00000 = 100Mhz 4K rows
288 * 00001 = 133Mhz 4K rows
289 * 00010 = 166Mhz 4K rows
290 * 00011 = 200Mhz 4K rows
291 * 01000 = 100Mhz 8K/16K rows
292 * 01001 = 133Mhz 8K/16K rows
293 * 01010 = 166Mhz 8K/16K rows
294 * 01011 = 200Mhz 8K/16K rows
296 * [22:20] Twcl (Write CAS Latency)
297 * 000 = 1 Mem clock after CAS# (Unbuffered Dimms)
298 * 001 = 2 Mem clocks after CAS# (Registered Dimms)
301 PCI_ADDR(0, 0x18, 2, 0x8c), 0xff8fe08e, (0 << 20)|(0 << 8)|(0 << 4)|(0 << 0),
302 /* DRAM Config Low Register
304 * [ 0: 0] DLL Disable
313 * [ 3: 3] Disable DQS Hystersis (FIXME handle this one carefully)
314 * 0 = Enable DQS input filter
315 * 1 = Disable DQS input filtering
318 * 0 = Initialization done or not yet started.
319 * 1 = Initiate DRAM intialization sequence
320 * [ 9: 9] SO-Dimm Enable
322 * 1 = SO-Dimms present
324 * 0 = DRAM not enabled
325 * 1 = DRAM initialized and enabled
326 * [11:11] Memory Clear Status
327 * 0 = Memory Clear function has not completed
328 * 1 = Memory Clear function has completed
329 * [12:12] Exit Self-Refresh
330 * 0 = Exit from self-refresh done or not yet started
331 * 1 = DRAM exiting from self refresh
332 * [13:13] Self-Refresh Status
333 * 0 = Normal Operation
334 * 1 = Self-refresh mode active
335 * [15:14] Read/Write Queue Bypass Count
340 * [16:16] 128-bit/64-Bit
341 * 0 = 64bit Interface to DRAM
342 * 1 = 128bit Interface to DRAM
343 * [17:17] DIMM ECC Enable
344 * 0 = Some DIMMs do not have ECC
345 * 1 = ALL DIMMS have ECC bits
346 * [18:18] UnBuffered DIMMs
348 * 1 = Unbuffered DIMMS
349 * [19:19] Enable 32-Byte Granularity
350 * 0 = Optimize for 64byte bursts
351 * 1 = Optimize for 32byte bursts
352 * [20:20] DIMM 0 is x4
353 * [21:21] DIMM 1 is x4
354 * [22:22] DIMM 2 is x4
355 * [23:23] DIMM 3 is x4
357 * 1 = x4 DIMM present
358 * [24:24] Disable DRAM Receivers
359 * 0 = Receivers enabled
360 * 1 = Receivers disabled
362 * 000 = Arbiters chois is always respected
363 * 001 = Oldest entry in DCQ can be bypassed 1 time
364 * 010 = Oldest entry in DCQ can be bypassed 2 times
365 * 011 = Oldest entry in DCQ can be bypassed 3 times
366 * 100 = Oldest entry in DCQ can be bypassed 4 times
367 * 101 = Oldest entry in DCQ can be bypassed 5 times
368 * 110 = Oldest entry in DCQ can be bypassed 6 times
369 * 111 = Oldest entry in DCQ can be bypassed 7 times
372 PCI_ADDR(0, 0x18, 2, 0x90), 0xf0000000,
374 (0 << 23)|(0 << 22)|(0 << 21)|(0 << 20)|
375 (1 << 19)|(0 << 18)|(1 << 17)|(0 << 16)|
376 (2 << 14)|(0 << 13)|(0 << 12)|
377 (0 << 11)|(0 << 10)|(0 << 9)|(0 << 8)|
378 (0 << 3) |(0 << 1) |(0 << 0),
379 /* DRAM Config High Register
381 * [ 0: 3] Maximum Asynchronous Latency
386 * [11: 8] Read Preamble
404 * [18:16] Idle Cycle Limit
413 * [19:19] Dynamic Idle Cycle Center Enable
414 * 0 = Use Idle Cycle Limit
415 * 1 = Generate a dynamic Idle cycle limit
416 * [22:20] DRAM MEMCLK Frequency
426 * [25:25] Memory Clock Ratio Valid (FIXME carefully enable memclk)
427 * 0 = Disable MemClks
429 * [26:26] Memory Clock 0 Enable
432 * [27:27] Memory Clock 1 Enable
435 * [28:28] Memory Clock 2 Enable
438 * [29:29] Memory Clock 3 Enable
443 PCI_ADDR(0, 0x18, 2, 0x94), 0xc180f0f0,
444 (0 << 29)|(0 << 28)|(0 << 27)|(0 << 26)|(0 << 25)|
445 (0 << 20)|(0 << 19)|(DCH_IDLE_LIMIT_16 << 16)|(0 << 8)|(0 << 0),
446 /* DRAM Delay Line Register
448 * Adjust the skew of the input DQS strobe relative to DATA
450 * [23:16] Delay Line Adjust
451 * Adjusts the DLL derived PDL delay by one or more delay stages
452 * in either the faster or slower direction.
453 * [24:24} Adjust Slower
455 * 1 = Adj is used to increase the PDL delay
456 * [25:25] Adjust Faster
458 * 1 = Adj is used to decrease the PDL delay
461 PCI_ADDR(0, 0x18, 2, 0x98), 0xfc00ffff, 0x00000000,
462 /* MCA NB Status Low reg */
463 PCI_ADDR(0, 0x18, 3, 0x48), 0x00f00000, 0x00000000,
464 /* MCA NB Status high reg */
465 PCI_ADDR(0, 0x18, 3, 0x4c), 0x01801e8c, 0x00000000,
466 /* MCA NB address Low reg */
467 PCI_ADDR(0, 0x18, 3, 0x50), 0x00000007, 0x00000000,
468 /* MCA NB address high reg */
469 PCI_ADDR(0, 0x18, 3, 0x54), 0xffffff00, 0x00000000,
470 /* DRAM Scrub Control Register
472 * [ 4: 0] DRAM Scrube Rate
474 * [12: 8] L2 Scrub Rate
476 * [20:16] Dcache Scrub
479 * 00000 = Do not scrub
501 * All Others = Reserved
503 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
504 /* DRAM Scrub Address Low Register
506 * [ 0: 0] DRAM Scrubber Redirect Enable
508 * 1 = Scrubber Corrects errors found in normal operation
510 * [31: 6] DRAM Scrub Address 31-6
512 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
513 /* DRAM Scrub Address High Register
515 * [ 7: 0] DRAM Scrubb Address 39-32
518 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
523 if (!controller_present(ctrl)) {
524 // printk(BIOS_DEBUG, "No memory controller present\n");
527 printk(BIOS_SPEW, "setting up CPU%02x northbridge registers\n", ctrl->node_id);
528 max = ARRAY_SIZE(register_values);
529 for (i = 0; i < max; i += 3) {
533 dev = (register_values[i] & ~0xfff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
534 where = register_values[i] & 0xfff;
535 reg = pci_read_config32(dev, where);
536 reg &= register_values[i+1];
537 reg |= register_values[i+2];
538 pci_write_config32(dev, where, reg);
540 printk(BIOS_SPEW, "done.\n");
543 static void hw_enable_ecc(const struct mem_controller *ctrl)
546 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
547 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
548 dcl &= ~DCL_DimmEccEn;
549 if (nbcap & NBCAP_ECC) {
550 dcl |= DCL_DimmEccEn;
552 if (read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
553 dcl &= ~DCL_DimmEccEn;
555 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
558 static int is_dual_channel(const struct mem_controller *ctrl)
561 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
562 return dcl & DCL_128BitEn;
565 static int is_opteron(const struct mem_controller *ctrl)
567 /* Test to see if I am an Opteron. Socket 939 based Athlon64
568 * have dual channel capability, too, so we need a better test
570 * However, all code uses is_opteron() to find out whether to
571 * use dual channel, so if we really check for opteron here, we
572 * need to fix up all code using this function, too.
575 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
576 return !!(nbcap & NBCAP_128Bit);
579 static int is_registered(const struct mem_controller *ctrl)
581 /* Test to see if we are dealing with registered SDRAM.
582 * If we are not registered we are unbuffered.
583 * This function must be called after spd_handle_unbuffered_dimms.
586 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
587 return !(dcl & DCL_UnBuffDimm);
595 #if QRANK_DIMM_SUPPORT == 1
600 static struct dimm_size spd_get_dimm_size(unsigned device)
602 /* Calculate the log base 2 size of a DIMM in bits */
609 #if QRANK_DIMM_SUPPORT == 1
613 /* Note it might be easier to use byte 31 here, it has the DIMM size as
614 * a multiple of 4MB. The way we do it now we can size both
615 * sides of an assymetric dimm.
617 value = spd_read_byte(device, 3); /* rows */
618 if (value < 0) goto hw_err;
619 if ((value & 0xf) == 0) goto val_err;
620 sz.side1 += value & 0xf;
621 sz.rows = value & 0xf;
623 value = spd_read_byte(device, 4); /* columns */
624 if (value < 0) goto hw_err;
625 if ((value & 0xf) == 0) goto val_err;
626 sz.side1 += value & 0xf;
627 sz.col = value & 0xf;
629 value = spd_read_byte(device, 17); /* banks */
630 if (value < 0) goto hw_err;
631 if ((value & 0xff) == 0) goto val_err;
632 sz.side1 += log2(value & 0xff);
634 /* Get the module data width and convert it to a power of two */
635 value = spd_read_byte(device, 7); /* (high byte) */
636 if (value < 0) goto hw_err;
640 low = spd_read_byte(device, 6); /* (low byte) */
641 if (low < 0) goto hw_err;
642 value = value | (low & 0xff);
643 if ((value != 72) && (value != 64)) goto val_err;
644 sz.side1 += log2(value);
647 value = spd_read_byte(device, 5); /* number of physical banks */
648 if (value < 0) goto hw_err;
649 if (value == 1) goto out;
650 if ((value != 2) && (value != 4 )) {
653 #if QRANK_DIMM_SUPPORT == 1
657 /* Start with the symmetrical case */
660 value = spd_read_byte(device, 3); /* rows */
661 if (value < 0) goto hw_err;
662 if ((value & 0xf0) == 0) goto out; /* If symmetrical we are done */
663 sz.side2 -= (value & 0x0f); /* Subtract out rows on side 1 */
664 sz.side2 += ((value >> 4) & 0x0f); /* Add in rows on side 2 */
666 value = spd_read_byte(device, 4); /* columns */
667 if (value < 0) goto hw_err;
668 if ((value & 0xff) == 0) goto val_err;
669 sz.side2 -= (value & 0x0f); /* Subtract out columns on side 1 */
670 sz.side2 += ((value >> 4) & 0x0f); /* Add in columsn on side 2 */
675 die("Bad SPD value\n");
676 /* If an hw_error occurs report that I have no memory */
682 #if QRANK_DIMM_SUPPORT == 1
690 static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
692 uint32_t base0, base1;
695 if (sz.side1 != sz.side2) {
699 /* For each base register.
700 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
701 * The initialize dimm size is in bits.
702 * Set the base enable bit0.
707 /* Make certain side1 of the dimm is at least 32MB */
708 if (sz.side1 >= (25 +3)) {
709 base0 = (1 << ((sz.side1 - (25 + 3)) + 21)) | 1;
712 /* Make certain side2 of the dimm is at least 32MB */
713 if (sz.side2 >= (25 + 3)) {
714 base1 = (1 << ((sz.side2 - (25 + 3)) + 21)) | 1;
717 /* Double the size if we are using dual channel memory */
718 if (is_dual_channel(ctrl)) {
719 base0 = (base0 << 1) | (base0 & 1);
720 base1 = (base1 << 1) | (base1 & 1);
723 /* Clear the reserved bits */
724 base0 &= ~0x001ffffe;
725 base1 &= ~0x001ffffe;
727 /* Set the appropriate DIMM base address register */
728 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
729 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
730 #if QRANK_DIMM_SUPPORT == 1
732 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
733 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
737 /* Enable the memory clocks for this DIMM */
739 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
740 dch |= DCH_MEMCLK_EN0 << index;
741 #if QRANK_DIMM_SUPPORT == 1
743 dch |= DCH_MEMCLK_EN0 << (index + 2);
746 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
750 static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
752 static const unsigned cs_map_aa[] = {
753 /* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
761 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
762 map &= ~(0xf << (index * 4));
763 #if QRANK_DIMM_SUPPORT == 1
765 map &= ~(0xf << ( (index + 2) * 4));
770 /* Make certain side1 of the dimm is at least 32MB */
771 if (sz.side1 >= (25 +3)) {
772 if (is_cpu_pre_d0()) {
773 map |= (sz.side1 - (25 + 3)) << (index *4);
774 #if QRANK_DIMM_SUPPORT == 1
776 map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
781 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
782 #if QRANK_DIMM_SUPPORT == 1
784 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
790 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
794 static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
798 for (i = 0; i < DIMM_SOCKETS; i++) {
800 if (!(dimm_mask & (1 << i))) {
803 sz = spd_get_dimm_size(ctrl->channel0[i]);
805 return -1; /* Report SPD error */
807 set_dimm_size(ctrl, sz, i);
808 set_dimm_map (ctrl, sz, i);
813 static void route_dram_accesses(const struct mem_controller *ctrl,
814 unsigned long base_k, unsigned long limit_k)
816 /* Route the addresses to the controller node */
821 unsigned limit_reg, base_reg;
824 node_id = ctrl->node_id;
825 index = (node_id << 3);
826 limit = (limit_k << 2);
829 limit |= ( 0 << 8) | (node_id << 0);
830 base = (base_k << 2);
832 base |= (0 << 8) | (1<<1) | (1<<0);
834 limit_reg = 0x44 + index;
835 base_reg = 0x40 + index;
836 for (device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1); device += PCI_DEV(0, 1, 0)) {
837 pci_write_config32(device, limit_reg, limit);
838 pci_write_config32(device, base_reg, base);
842 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
844 /* Error if I don't have memory */
849 /* Report the amount of memory. */
850 printk(BIOS_DEBUG, "RAM end at 0x%08x kB\n", tom_k);
852 /* Now set top of memory */
854 if (tom_k > (4*1024*1024)) {
855 printk(BIOS_SPEW, "Handling memory mapped above 4 GB\n");
856 printk(BIOS_SPEW, "Upper RAM end at 0x%08x kB\n", tom_k);
857 msr.lo = (tom_k & 0x003fffff) << 10;
858 msr.hi = (tom_k & 0xffc00000) >> 22;
859 wrmsr(TOP_MEM2, msr);
860 printk(BIOS_SPEW, "Correcting memory amount mapped below 4 GB\n");
863 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
864 * so I can see my rom chip and other I/O devices.
866 if (tom_k >= 0x003f0000) {
867 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
868 if (hole_startk != 0) {
873 printk(BIOS_SPEW, "Adjusting lower RAM end\n");
875 printk(BIOS_SPEW, "Lower RAM end at 0x%08x kB\n", tom_k);
876 msr.lo = (tom_k & 0x003fffff) << 10;
877 msr.hi = (tom_k & 0xffc00000) >> 22;
881 static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
884 static const uint8_t csbase_low_shift[] = {
887 /* 128MB */ (14 - 4),
888 /* 256MB */ (15 - 4),
889 /* 512MB */ (15 - 4),
894 static const uint8_t csbase_low_d0_shift[] = {
897 /* 128MB */ (14 - 4),
898 /* 128MB */ (15 - 4),
899 /* 256MB */ (15 - 4),
900 /* 512MB */ (15 - 4),
901 /* 256MB */ (16 - 4),
902 /* 512MB */ (16 - 4),
908 /* cs_base_high is not changed */
911 int chip_selects, index;
913 unsigned common_size;
914 unsigned common_cs_mode;
915 uint32_t csbase, csmask;
917 /* See if all of the memory chip selects are the same size
918 * and if so count them.
923 for (index = 0; index < 8; index++) {
928 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
936 if (common_size == 0) {
939 /* The size differed fail */
940 if (common_size != size) {
944 value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
945 cs_mode =( value >> ((index>>1)*4)) & 0xf;
946 if (cs_mode == 0 ) continue;
947 if (common_cs_mode == 0) {
948 common_cs_mode = cs_mode;
950 /* The cs_mode differed fail */
951 if (common_cs_mode != cs_mode) {
956 /* Chip selects can only be interleaved when there is
957 * more than one and their is a power of two of them.
959 bits = log2(chip_selects);
960 if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) {
964 /* Find the bits of csbase that we need to interleave on */
965 if (is_cpu_pre_d0()){
966 csbase_inc = 1 << csbase_low_shift[common_cs_mode];
967 if (is_dual_channel(ctrl)) {
968 /* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
969 if ((bits == 3) && (common_size == (1 << (32 - 3)))) {
970 // printk(BIOS_DEBUG, "8 4GB chip selects cannot be interleaved\n");
977 csbase_inc = 1 << csbase_low_d0_shift[common_cs_mode];
978 if (is_dual_channel(ctrl)) {
979 if ( (bits==3) && (common_cs_mode > 8)) {
980 // printk(BIOS_DEBUG, "8 cs_mode>8 chip selects cannot be interleaved\n");
987 /* Compute the initial values for csbase and csbask.
988 * In csbase just set the enable bit and the base to zero.
989 * In csmask set the mask bits for the size and page level interleave.
992 csmask = (((common_size << bits) - 1) << 21);
993 csmask |= 0xfe00 & ~((csbase_inc << bits) - csbase_inc);
994 for (index = 0; index < 8; index++) {
997 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1002 pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
1003 pci_write_config32(ctrl->f2, DRAM_CSMASK + (index << 2), csmask);
1004 csbase += csbase_inc;
1007 printk(BIOS_SPEW, "Interleaved\n");
1009 /* Return the memory size in K */
1010 return common_size << (15 + bits);
1013 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
1017 /* Remember which registers we have used in the high 8 bits of tom */
1020 /* Find the largest remaining candidate */
1021 unsigned index, candidate;
1022 uint32_t csbase, csmask;
1026 for (index = 0; index < 8; index++) {
1028 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1030 /* Is it enabled? */
1035 /* Is it greater? */
1036 if (value <= csbase) {
1040 /* Has it already been selected */
1041 if (tom & (1 << (index + 24))) {
1044 /* I have a new candidate */
1049 /* See if I have found a new candidate */
1054 /* Remember the dimm size */
1055 size = csbase >> 21;
1057 /* Remember I have used this register */
1058 tom |= (1 << (candidate + 24));
1060 /* Recompute the cs base register value */
1061 csbase = (tom << 21) | 1;
1063 /* Increment the top of memory */
1066 /* Compute the memory mask */
1067 csmask = ((size -1) << 21);
1068 csmask |= 0xfe00; /* For now don't optimize */
1070 /* Write the new base register */
1071 pci_write_config32(ctrl->f2, DRAM_CSBASE + (candidate << 2), csbase);
1072 /* Write the new mask register */
1073 pci_write_config32(ctrl->f2, DRAM_CSMASK + (candidate << 2), csmask);
1076 /* Return the memory size in K */
1077 return (tom & ~0xff000000) << 15;
1080 static unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
1084 /* Find the last memory address used */
1086 for (node_id = 0; node_id < max_node_id; node_id++) {
1087 uint32_t limit, base;
1089 index = node_id << 3;
1090 base = pci_read_config32(ctrl->f1, 0x40 + index);
1091 /* Only look at the limit if the base is enabled */
1092 if ((base & 3) == 3) {
1093 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1094 end_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1100 static void order_dimms(const struct mem_controller *ctrl)
1102 unsigned long tom_k, base_k;
1104 if (read_option(CMOS_VSTART_interleave_chip_selects, CMOS_VLEN_interleave_chip_selects, 1) != 0) {
1105 tom_k = interleave_chip_selects(ctrl);
1107 printk(BIOS_DEBUG, "Interleaving disabled\n");
1112 tom_k = order_chip_selects(ctrl);
1115 /* Compute the memory base address */
1116 base_k = memory_end_k(ctrl, ctrl->node_id);
1118 route_dram_accesses(ctrl, base_k, tom_k);
1119 set_top_mem(tom_k, 0);
1122 static long disable_dimm(const struct mem_controller *ctrl, unsigned index, long dimm_mask)
1124 printk(BIOS_DEBUG, "disabling dimm %02x\n", index);
1125 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), 0);
1126 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), 0);
1127 dimm_mask &= ~(1 << index);
1131 static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl,
1137 int has_dualch = is_opteron(ctrl);
1141 for (i = 0; (i < DIMM_SOCKETS); i++) {
1143 if (!(dimm_mask & (1 << i))) {
1146 value = spd_read_byte(ctrl->channel0[i], 21);
1151 /* Registered dimm ? */
1152 if (value & (1 << 1)) {
1155 /* Otherwise it must be an unbuffered dimm */
1160 if (unbuffered && registered) {
1161 die("Mixed buffered and registered dimms not supported");
1164 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1165 dcl &= ~DCL_UnBuffDimm;
1167 if ((has_dualch) && (!is_cpu_pre_d0())) {
1168 dcl |= DCL_UnBuffDimm; /* set DCL_DualDIMMen too? */
1170 /* set DCL_En2T if you have non-equal DDR mem types! */
1172 if ((cpuid_eax(1) & 0x30) == 0x30) {
1173 /* CS[7:4] is copy of CS[3:0], should be set for 939 socket */
1174 dcl |= DCL_UpperCSMap;
1177 dcl |= DCL_UnBuffDimm;
1180 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1182 if (is_registered(ctrl)) {
1183 printk(BIOS_SPEW, "Registered\n");
1185 printk(BIOS_SPEW, "Unbuffered\n");
1191 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
1196 for (i = 0; i < DIMM_SOCKETS; i++) {
1199 device = ctrl->channel0[i];
1201 byte = spd_read_byte(ctrl->channel0[i], 2); /* Type */
1203 dimm_mask |= (1 << i);
1206 device = ctrl->channel1[i];
1208 byte = spd_read_byte(ctrl->channel1[i], 2);
1210 dimm_mask |= (1 << (i + DIMM_SOCKETS));
1217 static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_mask)
1221 /* SPD addresses to verify are identical */
1222 static const uint8_t addresses[] = {
1223 2, /* Type should be DDR SDRAM */
1224 3, /* *Row addresses */
1225 4, /* *Column addresses */
1226 5, /* *Physical Banks */
1227 6, /* *Module Data Width low */
1228 7, /* *Module Data Width high */
1229 9, /* *Cycle time at highest CAS Latency CL=X */
1230 11, /* *SDRAM Type */
1231 13, /* *SDRAM Width */
1232 17, /* *Logical Banks */
1233 18, /* *Supported CAS Latencies */
1234 21, /* *SDRAM Module Attributes */
1235 23, /* *Cycle time at CAS Latnecy (CLX - 0.5) */
1236 26, /* *Cycle time at CAS Latnecy (CLX - 1.0) */
1237 27, /* *tRP Row precharge time */
1238 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1239 29, /* *tRCD RAS to CAS */
1240 30, /* *tRAS Activate to Precharge */
1241 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1242 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1244 /* If the dimms are not in pairs do not do dual channels */
1245 if ((dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1246 ((dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1247 goto single_channel;
1249 /* If the cpu is not capable of doing dual channels don't do dual channels */
1250 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1251 if (!(nbcap & NBCAP_128Bit)) {
1252 goto single_channel;
1254 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1255 unsigned device0, device1;
1258 /* If I don't have a dimm skip this one */
1259 if (!(dimm_mask & (1 << i))) {
1262 device0 = ctrl->channel0[i];
1263 device1 = ctrl->channel1[i];
1264 for (j = 0; j < ARRAY_SIZE(addresses); j++) {
1266 addr = addresses[j];
1267 value0 = spd_read_byte(device0, addr);
1271 value1 = spd_read_byte(device1, addr);
1275 if (value0 != value1) {
1276 goto single_channel;
1280 printk(BIOS_SPEW, "Enabling dual channel memory\n");
1282 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1283 dcl &= ~DCL_32ByteEn;
1284 dcl |= DCL_128BitEn;
1285 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1288 dimm_mask &= ~((1 << (DIMM_SOCKETS *2)) - (1 << DIMM_SOCKETS));
1294 uint8_t divisor; /* In 1/2 ns increments */
1297 uint32_t dch_memclk;
1298 uint16_t dch_tref4k, dch_tref8k;
1301 uint8_t dtl_trwt[3][3]; /* first index is CAS_LAT 2/2.5/3 and 128/registered64/64 */
1302 uint8_t rdpreamble[4]; /* 0 is for registered, 1 for 1-2 DIMMS, 2 and 3 for 3 or 4 unreg dimm slots */
1306 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1308 static const struct mem_param speed[] = {
1312 .divisor = (10 <<1),
1315 .dch_memclk = DCH_MEMCLK_100MHZ << DCH_MEMCLK_SHIFT,
1316 .dch_tref4k = DTH_TREF_100MHZ_4K,
1317 .dch_tref8k = DTH_TREF_100MHZ_8K,
1320 .dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1321 .rdpreamble = { ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0) }
1326 .divisor = (7<<1)+1,
1329 .dch_memclk = DCH_MEMCLK_133MHZ << DCH_MEMCLK_SHIFT,
1330 .dch_tref4k = DTH_TREF_133MHZ_4K,
1331 .dch_tref8k = DTH_TREF_133MHZ_8K,
1334 .dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1335 .rdpreamble = { ((8 << 1) + 0), ((7 << 1) + 0), ((7 << 1) + 1), ((7 << 1) + 0) }
1343 .dch_memclk = DCH_MEMCLK_166MHZ << DCH_MEMCLK_SHIFT,
1344 .dch_tref4k = DTH_TREF_166MHZ_4K,
1345 .dch_tref8k = DTH_TREF_166MHZ_8K,
1348 .dtl_trwt = { { 3, 2, 3 }, { 3, 3, 4 }, { 4, 3, 4 }},
1349 .rdpreamble = { ((7 << 1) + 1), ((6 << 1) + 0), ((6 << 1) + 1), ((6 << 1) + 0) }
1357 .dch_memclk = DCH_MEMCLK_200MHZ << DCH_MEMCLK_SHIFT,
1358 .dch_tref4k = DTH_TREF_200MHZ_4K,
1359 .dch_tref8k = DTH_TREF_200MHZ_8K,
1362 .dtl_trwt = { { 0, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1363 .rdpreamble = { ((7 << 1) + 0), ((5 << 1) + 0), ((5 << 1) + 1), ((5 << 1) + 1) }
1369 const struct mem_param *param;
1370 for (param = &speed[0]; param->cycle_time ; param++) {
1371 if (min_cycle_time > (param+1)->cycle_time) {
1375 if (!param->cycle_time) {
1376 die("min_cycle_time to low");
1378 printk(BIOS_SPEW, "%s\n", param->name);
1382 struct spd_set_memclk_result {
1383 const struct mem_param *param;
1386 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask)
1388 /* Compute the minimum cycle time for these dimms */
1389 struct spd_set_memclk_result result;
1390 unsigned min_cycle_time, min_latency, bios_cycle_time;
1394 static const uint8_t latency_indicies[] = { 26, 23, 9 };
1395 static const unsigned char min_cycle_times[] = {
1396 [NBCAP_MEMCLK_200MHZ] = 0x50, /* 5ns */
1397 [NBCAP_MEMCLK_166MHZ] = 0x60, /* 6ns */
1398 [NBCAP_MEMCLK_133MHZ] = 0x75, /* 7.5ns */
1399 [NBCAP_MEMCLK_100MHZ] = 0xa0, /* 10ns */
1402 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1404 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1405 bios_cycle_time = min_cycle_times[
1406 read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
1407 if (bios_cycle_time > min_cycle_time) {
1408 min_cycle_time = bios_cycle_time;
1412 /* Compute the least latency with the fastest clock supported
1413 * by both the memory controller and the dimms.
1415 for (i = 0; i < DIMM_SOCKETS; i++) {
1416 int new_cycle_time, new_latency;
1421 if (!(dimm_mask & (1 << i))) {
1425 /* First find the supported CAS latencies
1426 * Byte 18 for DDR SDRAM is interpreted:
1427 * bit 0 == CAS Latency = 1.0
1428 * bit 1 == CAS Latency = 1.5
1429 * bit 2 == CAS Latency = 2.0
1430 * bit 3 == CAS Latency = 2.5
1431 * bit 4 == CAS Latency = 3.0
1432 * bit 5 == CAS Latency = 3.5
1436 new_cycle_time = 0xa0;
1439 latencies = spd_read_byte(ctrl->channel0[i], 18);
1440 if (latencies <= 0) continue;
1442 /* Compute the lowest cas latency supported */
1443 latency = log2(latencies) -2;
1445 /* Loop through and find a fast clock with a low latency */
1446 for (index = 0; index < 3; index++, latency++) {
1448 if ((latency < 2) || (latency > 4) ||
1449 (!(latencies & (1 << latency)))) {
1452 spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1453 if (spd_value < 0) {
1457 /* Only increase the latency if we decreas the clock */
1458 if ((spd_value >= min_cycle_time) && (spd_value < new_cycle_time)) {
1459 new_cycle_time = spd_value;
1460 new_latency = latency;
1463 if (new_latency > 4){
1466 /* Does min_latency need to be increased? */
1467 if (new_cycle_time > min_cycle_time) {
1468 min_cycle_time = new_cycle_time;
1470 /* Does min_cycle_time need to be increased? */
1471 if (new_latency > min_latency) {
1472 min_latency = new_latency;
1475 /* Make a second pass through the dimms and disable
1476 * any that cannot support the selected memclk and cas latency.
1479 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1484 if (!(dimm_mask & (1 << i))) {
1488 latencies = spd_read_byte(ctrl->channel0[i], 18);
1489 if (latencies < 0) goto hw_error;
1490 if (latencies == 0) {
1494 /* Compute the lowest cas latency supported */
1495 latency = log2(latencies) -2;
1497 /* Walk through searching for the selected latency */
1498 for (index = 0; index < 3; index++, latency++) {
1499 if (!(latencies & (1 << latency))) {
1502 if (latency == min_latency)
1505 /* If I can't find the latency or my index is bad error */
1506 if ((latency != min_latency) || (index >= 3)) {
1510 /* Read the min_cycle_time for this latency */
1511 spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1512 if (spd_value < 0) goto hw_error;
1514 /* All is good if the selected clock speed
1515 * is what I need or slower.
1517 if (spd_value <= min_cycle_time) {
1520 /* Otherwise I have an error, disable the dimm */
1522 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
1525 //down speed for full load 4 rank support
1526 #if QRANK_DIMM_SUPPORT
1527 if (dimm_mask == (3|(3<<DIMM_SOCKETS)) ) {
1529 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1531 if (!(dimm_mask & (1 << i))) {
1534 val = spd_read_byte(ctrl->channel0[i], 5);
1541 if (min_cycle_time <= 0x50 ) {
1542 min_cycle_time = 0x60;
1549 /* Now that I know the minimum cycle time lookup the memory parameters */
1550 result.param = get_mem_param(min_cycle_time);
1552 /* Update DRAM Config High with our selected memory speed */
1553 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1554 value &= ~(DCH_MEMCLK_MASK << DCH_MEMCLK_SHIFT);
1556 /* Improves DQS centering by correcting for case when core speed multiplier and MEMCLK speed result in odd clock divisor, by selecting the next lowest memory speed, required only at DDR400 and higher speeds with certain DIMM loadings ---- cheating???*/
1557 if (!is_cpu_pre_e0()) {
1558 if (min_cycle_time==0x50) {
1564 value |= result.param->dch_memclk;
1565 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1567 static const unsigned latencies[] = { DTL_CL_2, DTL_CL_2_5, DTL_CL_3 };
1569 /* Update DRAM Timing Low with our selected cas latency */
1570 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1571 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1572 value |= latencies[min_latency - 2] << DTL_TCL_SHIFT;
1573 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1575 result.dimm_mask = dimm_mask;
1578 result.param = (const struct mem_param *)0;
1579 result.dimm_mask = -1;
1584 static int update_dimm_Trc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1586 unsigned clocks, old_clocks;
1589 value = spd_read_byte(ctrl->channel0[i], 41);
1590 if (value < 0) return -1;
1591 if ((value == 0) || (value == 0xff)) {
1594 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1595 if (clocks < DTL_TRC_MIN) {
1596 clocks = DTL_TRC_MIN;
1598 if (clocks > DTL_TRC_MAX) {
1602 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1603 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
1604 if (old_clocks > clocks) {
1605 clocks = old_clocks;
1607 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
1608 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
1609 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1613 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1615 unsigned clocks, old_clocks;
1618 value = spd_read_byte(ctrl->channel0[i], 42);
1619 if (value < 0) return -1;
1620 if ((value == 0) || (value == 0xff)) {
1621 value = param->tRFC;
1623 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1624 if (clocks < DTL_TRFC_MIN) {
1625 clocks = DTL_TRFC_MIN;
1627 if (clocks > DTL_TRFC_MAX) {
1630 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1631 old_clocks = ((dtl >> DTL_TRFC_SHIFT) & DTL_TRFC_MASK) + DTL_TRFC_BASE;
1632 if (old_clocks > clocks) {
1633 clocks = old_clocks;
1635 dtl &= ~(DTL_TRFC_MASK << DTL_TRFC_SHIFT);
1636 dtl |= ((clocks - DTL_TRFC_BASE) << DTL_TRFC_SHIFT);
1637 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1642 static int update_dimm_Trcd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1644 unsigned clocks, old_clocks;
1647 value = spd_read_byte(ctrl->channel0[i], 29);
1648 if (value < 0) return -1;
1649 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1650 if (clocks < DTL_TRCD_MIN) {
1651 clocks = DTL_TRCD_MIN;
1653 if (clocks > DTL_TRCD_MAX) {
1656 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1657 old_clocks = ((dtl >> DTL_TRCD_SHIFT) & DTL_TRCD_MASK) + DTL_TRCD_BASE;
1658 if (old_clocks > clocks) {
1659 clocks = old_clocks;
1661 dtl &= ~(DTL_TRCD_MASK << DTL_TRCD_SHIFT);
1662 dtl |= ((clocks - DTL_TRCD_BASE) << DTL_TRCD_SHIFT);
1663 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1667 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1669 unsigned clocks, old_clocks;
1672 value = spd_read_byte(ctrl->channel0[i], 28);
1673 if (value < 0) return -1;
1674 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1675 if (clocks < DTL_TRRD_MIN) {
1676 clocks = DTL_TRRD_MIN;
1678 if (clocks > DTL_TRRD_MAX) {
1681 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1682 old_clocks = ((dtl >> DTL_TRRD_SHIFT) & DTL_TRRD_MASK) + DTL_TRRD_BASE;
1683 if (old_clocks > clocks) {
1684 clocks = old_clocks;
1686 dtl &= ~(DTL_TRRD_MASK << DTL_TRRD_SHIFT);
1687 dtl |= ((clocks - DTL_TRRD_BASE) << DTL_TRRD_SHIFT);
1688 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1692 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1694 unsigned clocks, old_clocks;
1697 value = spd_read_byte(ctrl->channel0[i], 30);
1698 if (value < 0) return -1;
1699 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1700 if (clocks < DTL_TRAS_MIN) {
1701 clocks = DTL_TRAS_MIN;
1703 if (clocks > DTL_TRAS_MAX) {
1706 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1707 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
1708 if (old_clocks > clocks) {
1709 clocks = old_clocks;
1711 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
1712 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
1713 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1717 static int update_dimm_Trp(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1719 unsigned clocks, old_clocks;
1722 value = spd_read_byte(ctrl->channel0[i], 27);
1723 if (value < 0) return -1;
1724 clocks = (value + (param->divisor << 1) - 1)/(param->divisor << 1);
1725 if (clocks < DTL_TRP_MIN) {
1726 clocks = DTL_TRP_MIN;
1728 if (clocks > DTL_TRP_MAX) {
1731 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1732 old_clocks = ((dtl >> DTL_TRP_SHIFT) & DTL_TRP_MASK) + DTL_TRP_BASE;
1733 if (old_clocks > clocks) {
1734 clocks = old_clocks;
1736 dtl &= ~(DTL_TRP_MASK << DTL_TRP_SHIFT);
1737 dtl |= ((clocks - DTL_TRP_BASE) << DTL_TRP_SHIFT);
1738 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1742 static void set_Twr(const struct mem_controller *ctrl, const struct mem_param *param)
1745 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1746 dtl &= ~(DTL_TWR_MASK << DTL_TWR_SHIFT);
1747 dtl |= (param->dtl_twr - DTL_TWR_BASE) << DTL_TWR_SHIFT;
1748 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1752 static void init_Tref(const struct mem_controller *ctrl, const struct mem_param *param)
1755 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1756 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1757 dth |= (param->dch_tref4k << DTH_TREF_SHIFT);
1758 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1761 static int update_dimm_Tref(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1765 unsigned tref, old_tref;
1766 value = spd_read_byte(ctrl->channel0[i], 3);
1767 if (value < 0) return -1;
1770 tref = param->dch_tref8k;
1772 tref = param->dch_tref4k;
1775 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1776 old_tref = (dth >> DTH_TREF_SHIFT) & DTH_TREF_MASK;
1777 if ((value == 12) && (old_tref == param->dch_tref4k)) {
1778 tref = param->dch_tref4k;
1780 tref = param->dch_tref8k;
1782 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1783 dth |= (tref << DTH_TREF_SHIFT);
1784 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1789 static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1793 #if QRANK_DIMM_SUPPORT == 1
1797 value = spd_read_byte(ctrl->channel0[i], 13);
1802 #if QRANK_DIMM_SUPPORT == 1
1803 rank = spd_read_byte(ctrl->channel0[i], 5); /* number of physical banks */
1809 dimm = 1<<(DCL_x4DIMM_SHIFT+i);
1810 #if QRANK_DIMM_SUPPORT == 1
1812 dimm |= 1<<(DCL_x4DIMM_SHIFT+i+2);
1815 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1820 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1824 static int update_dimm_ecc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1828 value = spd_read_byte(ctrl->channel0[i], 11);
1833 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1834 dcl &= ~DCL_DimmEccEn;
1835 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1840 static int count_dimms(const struct mem_controller *ctrl)
1845 for (index = 0; index < 8; index += 2) {
1847 csbase = pci_read_config32(ctrl->f2, (DRAM_CSBASE + (index << 2)));
1855 static void set_Twtr(const struct mem_controller *ctrl, const struct mem_param *param)
1859 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1860 dth &= ~(DTH_TWTR_MASK << DTH_TWTR_SHIFT);
1861 dth |= ((param->dtl_twtr - DTH_TWTR_BASE) << DTH_TWTR_SHIFT);
1862 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1865 static void set_Trwt(const struct mem_controller *ctrl, const struct mem_param *param)
1873 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1874 latency = (dtl >> DTL_TCL_SHIFT) & DTL_TCL_MASK;
1876 if (is_opteron(ctrl)) {
1877 mtype = 0; /* dual channel */
1878 } else if (is_registered(ctrl)) {
1879 mtype = 1; /* registered 64bit interface */
1881 mtype = 2; /* unbuffered 64bit interface */
1895 die("Unknown LAT for Trwt");
1898 clocks = param->dtl_trwt[lat][mtype];
1899 if ((clocks < DTH_TRWT_MIN) || (clocks > DTH_TRWT_MAX)) {
1900 die("Unknown Trwt\n");
1903 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1904 dth &= ~(DTH_TRWT_MASK << DTH_TRWT_SHIFT);
1905 dth |= ((clocks - DTH_TRWT_BASE) << DTH_TRWT_SHIFT);
1906 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1910 static void set_Twcl(const struct mem_controller *ctrl, const struct mem_param *param)
1912 /* Memory Clocks after CAS# */
1915 if (is_registered(ctrl)) {
1920 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1921 dth &= ~(DTH_TWCL_MASK << DTH_TWCL_SHIFT);
1922 dth |= ((clocks - DTH_TWCL_BASE) << DTH_TWCL_SHIFT);
1923 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1927 static void set_read_preamble(const struct mem_controller *ctrl, const struct mem_param *param)
1930 unsigned rdpreamble;
1935 for (i = 0; i < 4; i++) {
1936 if (ctrl->channel0[i]) {
1941 /* map to index to param.rdpreamble array */
1942 if (is_registered(ctrl)) {
1944 } else if (slots < 3) {
1946 } else if (slots == 3) {
1948 } else if (slots == 4) {
1951 die("Unknown rdpreamble for this nr of slots");
1954 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1955 dch &= ~(DCH_RDPREAMBLE_MASK << DCH_RDPREAMBLE_SHIFT);
1956 rdpreamble = param->rdpreamble[i];
1958 if ((rdpreamble < DCH_RDPREAMBLE_MIN) || (rdpreamble > DCH_RDPREAMBLE_MAX)) {
1959 die("Unknown rdpreamble");
1962 dch |= (rdpreamble - DCH_RDPREAMBLE_BASE) << DCH_RDPREAMBLE_SHIFT;
1963 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
1966 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
1972 dimms = count_dimms(ctrl);
1974 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1975 dch &= ~(DCH_ASYNC_LAT_MASK << DCH_ASYNC_LAT_SHIFT);
1977 if (is_registered(ctrl)) {
1989 die("Too many unbuffered dimms");
1991 else if (dimms == 3) {
2000 dch |= ((async_lat - DCH_ASYNC_LAT_BASE) << DCH_ASYNC_LAT_SHIFT);
2001 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2004 static void set_idle_cycle_limit(const struct mem_controller *ctrl, const struct mem_param *param)
2007 /* AMD says to Hardcode this */
2008 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2009 dch &= ~(DCH_IDLE_LIMIT_MASK << DCH_IDLE_LIMIT_SHIFT);
2010 dch |= DCH_IDLE_LIMIT_16 << DCH_IDLE_LIMIT_SHIFT;
2011 dch |= DCH_DYN_IDLE_CTR_EN;
2012 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2015 static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param, long dimm_mask)
2019 init_Tref(ctrl, param);
2020 for (i = 0; i < DIMM_SOCKETS; i++) {
2022 if (!(dimm_mask & (1 << i))) {
2025 /* DRAM Timing Low Register */
2026 if ((rc = update_dimm_Trc (ctrl, param, i)) <= 0) goto dimm_err;
2027 if ((rc = update_dimm_Trfc(ctrl, param, i)) <= 0) goto dimm_err;
2028 if ((rc = update_dimm_Trcd(ctrl, param, i)) <= 0) goto dimm_err;
2029 if ((rc = update_dimm_Trrd(ctrl, param, i)) <= 0) goto dimm_err;
2030 if ((rc = update_dimm_Tras(ctrl, param, i)) <= 0) goto dimm_err;
2031 if ((rc = update_dimm_Trp (ctrl, param, i)) <= 0) goto dimm_err;
2033 /* DRAM Timing High Register */
2034 if ((rc = update_dimm_Tref(ctrl, param, i)) <= 0) goto dimm_err;
2037 /* DRAM Config Low */
2038 if ((rc = update_dimm_x4 (ctrl, param, i)) <= 0) goto dimm_err;
2039 if ((rc = update_dimm_ecc(ctrl, param, i)) <= 0) goto dimm_err;
2045 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
2047 /* DRAM Timing Low Register */
2048 set_Twr(ctrl, param);
2050 /* DRAM Timing High Register */
2051 set_Twtr(ctrl, param);
2052 set_Trwt(ctrl, param);
2053 set_Twcl(ctrl, param);
2055 /* DRAM Config High */
2056 set_read_preamble(ctrl, param);
2057 set_max_async_latency(ctrl, param);
2058 set_idle_cycle_limit(ctrl, param);
2062 #if RAMINIT_SYSINFO==1
2063 static void sdram_set_spd_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
2065 static void sdram_set_spd_registers(const struct mem_controller *ctrl)
2068 struct spd_set_memclk_result result;
2069 const struct mem_param *param;
2072 if (!controller_present(ctrl)) {
2073 // printk(BIOS_DEBUG, "No memory controller present\n");
2077 hw_enable_ecc(ctrl);
2078 activate_spd_rom(ctrl);
2079 dimm_mask = spd_detect_dimms(ctrl);
2080 if (!(dimm_mask & ((1 << DIMM_SOCKETS) - 1))) {
2081 printk(BIOS_DEBUG, "No memory for this cpu\n");
2084 dimm_mask = spd_enable_2channels(ctrl, dimm_mask);
2087 dimm_mask = spd_set_ram_size(ctrl , dimm_mask);
2090 dimm_mask = spd_handle_unbuffered_dimms(ctrl, dimm_mask);
2093 result = spd_set_memclk(ctrl, dimm_mask);
2094 param = result.param;
2095 dimm_mask = result.dimm_mask;
2098 dimm_mask = spd_set_dram_timing(ctrl, param , dimm_mask);
2104 /* Unrecoverable error reading SPD data */
2105 printk(BIOS_ERR, "SPD error - reset\n");
2110 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
2111 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
2114 uint32_t carry_over;
2116 uint32_t base, limit;
2121 carry_over = (4*1024*1024) - hole_startk;
2123 for (ii=controllers - 1;ii>i;ii--) {
2124 base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
2125 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2128 limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
2129 for (j = 0; j < controllers; j++) {
2130 pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit + (carry_over << 2));
2131 pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base + (carry_over << 2));
2134 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2135 for (j = 0; j < controllers; j++) {
2136 pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit + (carry_over << 2));
2139 base = pci_read_config32(dev, 0x40 + (i << 3));
2140 basek = (base & 0xffff0000) >> 2;
2141 if (basek == hole_startk) {
2142 //don't need set memhole here, because hole off set will be 0, overflow
2143 //so need to change base reg instead, new basek will be 4*1024*1024
2145 base |= (4*1024*1024)<<2;
2146 for (j = 0; j < controllers; j++) {
2147 pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
2151 hoist = /* hole start address */
2152 ((hole_startk << 10) & 0xff000000) +
2153 /* hole address to memory controller address */
2154 (((basek + carry_over) >> 6) & 0x0000ff00) +
2157 pci_write_config32(dev, 0xf0, hoist);
2163 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
2166 uint32_t hole_startk;
2169 hole_startk = 4*1024*1024 - CONFIG_HW_MEM_HOLE_SIZEK;
2171 printk(BIOS_SPEW, "Handling memory hole at 0x%08x (default)\n", hole_startk);
2172 #if CONFIG_HW_MEM_HOLE_SIZE_AUTO_INC == 1
2173 /* We need to double check if hole_startk is valid.
2174 * If it is equal to the dram base address in K (base_k),
2175 * we need to decrease it.
2178 for (i=0; i<controllers; i++) {
2181 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2182 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2185 base_k = (base & 0xffff0000) >> 2;
2186 if (base_k == hole_startk) {
2187 /* decrease memory hole startk to make sure it is
2188 * in the middle of the previous node
2190 hole_startk -= (base_k - basek_pri)>>1;
2191 break; /* only one hole */
2196 printk(BIOS_SPEW, "Handling memory hole at 0x%08x (adjusted)\n", hole_startk);
2198 /* Find node number that needs the memory hole configured */
2199 for (i=0; i<controllers; i++) {
2200 uint32_t base, limit;
2201 unsigned base_k, limit_k;
2202 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2203 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2206 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2207 base_k = (base & 0xffff0000) >> 2;
2208 limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
2209 if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
2211 hoist_memory(controllers, ctrl, hole_startk, i);
2212 end_k = memory_end_k(ctrl, controllers);
2213 set_top_mem(end_k, hole_startk);
2214 break; /* only one hole */
2222 #define TIMEOUT_LOOPS 300000
2223 #if RAMINIT_SYSINFO == 1
2224 static void sdram_enable(int controllers, const struct mem_controller *ctrl, struct sys_info *sysinfo)
2226 static void sdram_enable(int controllers, const struct mem_controller *ctrl)
2231 /* Error if I don't have memory */
2232 if (memory_end_k(ctrl, controllers) == 0) {
2236 /* Before enabling memory start the memory clocks */
2237 for (i = 0; i < controllers; i++) {
2239 if (!controller_present(ctrl + i))
2241 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2242 if (dch & (DCH_MEMCLK_EN0|DCH_MEMCLK_EN1|DCH_MEMCLK_EN2|DCH_MEMCLK_EN3)) {
2243 dch |= DCH_MEMCLK_VALID;
2244 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2247 /* Disable dram receivers */
2249 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2250 dcl |= DCL_DisInRcvrs;
2251 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2255 /* We need to wait a minimum of 20 MEMCLKS to enable the InitDram */
2256 /* And if necessary toggle the the reset on the dimms by hand */
2257 memreset(controllers, ctrl);
2259 for (i = 0; i < controllers; i++) {
2261 if (!controller_present(ctrl + i))
2263 /* Skip everything if I don't have any memory on this controller */
2264 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2265 if (!(dch & DCH_MEMCLK_VALID)) {
2269 /* Toggle DisDqsHys to get it working */
2270 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2271 if (dcl & DCL_DimmEccEn) {
2273 printk(BIOS_SPEW, "ECC enabled\n");
2274 mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
2276 if (dcl & DCL_128BitEn) {
2277 mnc |= MNC_CHIPKILL_EN;
2279 pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
2281 dcl |= DCL_DisDqsHys;
2282 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2283 dcl &= ~DCL_DisDqsHys;
2284 dcl &= ~DCL_DLL_Disable;
2287 dcl |= DCL_DramInit;
2288 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2291 for (i = 0; i < controllers; i++) {
2293 if (!controller_present(ctrl + i))
2295 /* Skip everything if I don't have any memory on this controller */
2296 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2297 if (!(dch & DCH_MEMCLK_VALID)) {
2301 printk(BIOS_DEBUG, "Initializing memory: ");
2304 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2306 if ((loops & 1023) == 0) {
2307 printk(BIOS_DEBUG, ".");
2309 } while(((dcl & DCL_DramInit) != 0) && (loops < TIMEOUT_LOOPS));
2310 if (loops >= TIMEOUT_LOOPS) {
2311 printk(BIOS_DEBUG, " failed\n");
2315 if (!is_cpu_pre_c0()) {
2316 /* Wait until it is safe to touch memory */
2317 dcl &= ~(DCL_MemClrStatus | DCL_DramEnable);
2318 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2320 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2321 } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
2324 printk(BIOS_DEBUG, " done\n");
2327 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
2328 // init hw mem hole here
2329 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
2330 if (!is_cpu_pre_e0())
2331 set_hw_mem_hole(controllers, ctrl);
2334 //FIXME add enable node interleaving here -- yhlu
2336 1. check how many nodes we have , if not all has ram installed get out
2337 2. check cs_base lo is 0, node 0 f2 0x40,,,,, if any one is not using lo is CS_BASE, get out
2338 3. check if other node is the same as node 0 about f2 0x40,,,,, otherwise get out
2339 4. if all ready enable node_interleaving in f1 0x40..... of every node
2340 5. for node interleaving we need to set mem hole to every node ( need recalcute hole offset in f0 for every node)
2345 static void set_sysinfo_in_ram(unsigned val)
2349 static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
2350 const uint16_t *spd_addr)
2354 struct mem_controller *ctrl;
2355 for (i=0;i<controllers; i++) {
2358 ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
2359 ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
2360 ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
2361 ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
2363 if (spd_addr == (void *)0) continue;
2365 for (j=0;j<DIMM_SOCKETS;j++) {
2366 ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
2367 ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];