1 /* This should be done by Eric
2 2004.11 yhlu add 4 rank DIMM support
3 2004.12 yhlu add D0 support
4 2005.02 yhlu add E0 memory hole support
6 #if K8_REV_F_SUPPORT == 1
10 #include <cpu/x86/mem.h>
11 #include <cpu/x86/cache.h>
12 #include <cpu/x86/mtrr.h>
16 #if (CONFIG_LB_MEM_TOPK & (CONFIG_LB_MEM_TOPK -1)) != 0
17 # error "CONFIG_LB_MEM_TOPK must be a power of 2"
20 #ifndef QRANK_DIMM_SUPPORT
21 #define QRANK_DIMM_SUPPORT 0
24 #if defined (__GNUC__)
25 static void hard_reset(void);
29 static void setup_resource_map(const unsigned int *register_values, int max)
32 // print_debug("setting up resource map....");
36 for(i = 0; i < max; i += 3) {
41 #if CONFIG_USE_PRINTK_IN_CAR
42 prink_debug("%08x <- %08x\r\n", register_values[i], register_values[i+2]);
44 print_debug_hex32(register_values[i]);
46 print_debug_hex32(register_values[i+2]);
50 dev = register_values[i] & ~0xfff;
51 where = register_values[i] & 0xfff;
52 reg = pci_read_config32(dev, where);
53 reg &= register_values[i+1];
54 reg |= register_values[i+2];
55 pci_write_config32(dev, where, reg);
57 reg = pci_read_config32(register_values[i]);
58 reg &= register_values[i+1];
59 reg |= register_values[i+2] & ~register_values[i+1];
60 pci_write_config32(register_values[i], reg);
63 // print_debug("done.\r\n");
67 static int controller_present(const struct mem_controller *ctrl)
69 return pci_read_config32(ctrl->f0, 0) == 0x11001022;
72 #if RAMINIT_SYSINFO==1
73 static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
75 static void sdram_set_registers(const struct mem_controller *ctrl)
78 static const unsigned int register_values[] = {
80 /* Careful set limit registers before base registers which contain the enables */
81 /* DRAM Limit i Registers
90 * [ 2: 0] Destination Node ID
100 * [10: 8] Interleave select
101 * specifies the values of A[14:12] to use with interleave enable.
103 * [31:16] DRAM Limit Address i Bits 39-24
104 * This field defines the upper address bits of a 40 bit address
105 * that define the end of the DRAM region.
107 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
108 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
109 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
110 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
111 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
112 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
113 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
114 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
115 /* DRAM Base i Registers
124 * [ 0: 0] Read Enable
127 * [ 1: 1] Write Enable
128 * 0 = Writes Disabled
131 * [10: 8] Interleave Enable
132 * 000 = No interleave
133 * 001 = Interleave on A[12] (2 nodes)
135 * 011 = Interleave on A[12] and A[14] (4 nodes)
139 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
141 * [13:16] DRAM Base Address i Bits 39-24
142 * This field defines the upper address bits of a 40-bit address
143 * that define the start of the DRAM region.
145 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
146 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
147 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
148 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
149 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
150 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
151 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
152 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
154 /* DRAM CS Base Address i Registers
163 * [ 0: 0] Chip-Select Bank Enable
167 * [15: 9] Base Address (19-13)
168 * An optimization used when all DIMM are the same size...
170 * [31:21] Base Address (35-25)
171 * This field defines the top 11 addresses bit of a 40-bit
172 * address that define the memory address space. These
173 * bits decode 32-MByte blocks of memory.
175 PCI_ADDR(0, 0x18, 2, 0x40), 0x001f01fe, 0x00000000,
176 PCI_ADDR(0, 0x18, 2, 0x44), 0x001f01fe, 0x00000000,
177 PCI_ADDR(0, 0x18, 2, 0x48), 0x001f01fe, 0x00000000,
178 PCI_ADDR(0, 0x18, 2, 0x4C), 0x001f01fe, 0x00000000,
179 PCI_ADDR(0, 0x18, 2, 0x50), 0x001f01fe, 0x00000000,
180 PCI_ADDR(0, 0x18, 2, 0x54), 0x001f01fe, 0x00000000,
181 PCI_ADDR(0, 0x18, 2, 0x58), 0x001f01fe, 0x00000000,
182 PCI_ADDR(0, 0x18, 2, 0x5C), 0x001f01fe, 0x00000000,
183 /* DRAM CS Mask Address i Registers
192 * Select bits to exclude from comparison with the DRAM Base address register.
194 * [15: 9] Address Mask (19-13)
195 * Address to be excluded from the optimized case
197 * [29:21] Address Mask (33-25)
198 * The bits with an address mask of 1 are excluded from address comparison
202 PCI_ADDR(0, 0x18, 2, 0x60), 0xC01f01ff, 0x00000000,
203 PCI_ADDR(0, 0x18, 2, 0x64), 0xC01f01ff, 0x00000000,
204 PCI_ADDR(0, 0x18, 2, 0x68), 0xC01f01ff, 0x00000000,
205 PCI_ADDR(0, 0x18, 2, 0x6C), 0xC01f01ff, 0x00000000,
206 PCI_ADDR(0, 0x18, 2, 0x70), 0xC01f01ff, 0x00000000,
207 PCI_ADDR(0, 0x18, 2, 0x74), 0xC01f01ff, 0x00000000,
208 PCI_ADDR(0, 0x18, 2, 0x78), 0xC01f01ff, 0x00000000,
209 PCI_ADDR(0, 0x18, 2, 0x7C), 0xC01f01ff, 0x00000000,
210 /* DRAM Bank Address Mapping Register
212 * Specify the memory module size
217 * 000 = 32Mbyte (Rows = 12 & Col = 8)
218 * 001 = 64Mbyte (Rows = 12 & Col = 9)
219 * 010 = 128Mbyte (Rows = 13 & Col = 9)|(Rows = 12 & Col = 10)
220 * 011 = 256Mbyte (Rows = 13 & Col = 10)|(Rows = 12 & Col = 11)
221 * 100 = 512Mbyte (Rows = 13 & Col = 11)|(Rows = 14 & Col = 10)
222 * 101 = 1Gbyte (Rows = 14 & Col = 11)|(Rows = 13 & Col = 12)
223 * 110 = 2Gbyte (Rows = 14 & Col = 12)
230 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff8888, 0x00000000,
231 /* DRAM Timing Low Register
233 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
243 * [ 7: 4] Trc (Row Cycle Time, Ras#-active to Ras#-active/bank auto refresh)
244 * 0000 = 7 bus clocks
245 * 0001 = 8 bus clocks
247 * 1110 = 21 bus clocks
248 * 1111 = 22 bus clocks
249 * [11: 8] Trfc (Row refresh Cycle time, Auto-refresh-active to RAS#-active or RAS#auto-refresh)
250 * 0000 = 9 bus clocks
251 * 0010 = 10 bus clocks
253 * 1110 = 23 bus clocks
254 * 1111 = 24 bus clocks
255 * [14:12] Trcd (Ras#-active to Case#-read/write Delay)
265 * [18:16] Trrd (Ras# to Ras# Delay)
275 * [23:20] Tras (Minmum Ras# Active Time)
276 * 0000 to 0100 = reserved
277 * 0101 = 5 bus clocks
279 * 1111 = 15 bus clocks
280 * [26:24] Trp (Row Precharge Time)
290 * [28:28] Twr (Write Recovery Time)
295 PCI_ADDR(0, 0x18, 2, 0x88), 0xe8088008, 0x02522001 /* 0x03623125 */ ,
296 /* DRAM Timing High Register
298 * [ 0: 0] Twtr (Write to Read Delay)
302 * [ 6: 4] Trwt (Read to Write Delay)
312 * [12: 8] Tref (Refresh Rate)
313 * 00000 = 100Mhz 4K rows
314 * 00001 = 133Mhz 4K rows
315 * 00010 = 166Mhz 4K rows
316 * 00011 = 200Mhz 4K rows
317 * 01000 = 100Mhz 8K/16K rows
318 * 01001 = 133Mhz 8K/16K rows
319 * 01010 = 166Mhz 8K/16K rows
320 * 01011 = 200Mhz 8K/16K rows
322 * [22:20] Twcl (Write CAS Latency)
323 * 000 = 1 Mem clock after CAS# (Unbuffered Dimms)
324 * 001 = 2 Mem clocks after CAS# (Registered Dimms)
327 PCI_ADDR(0, 0x18, 2, 0x8c), 0xff8fe08e, (0 << 20)|(0 << 8)|(0 << 4)|(0 << 0),
328 /* DRAM Config Low Register
330 * [ 0: 0] DLL Disable
339 * [ 3: 3] Disable DQS Hystersis (FIXME handle this one carefully)
340 * 0 = Enable DQS input filter
341 * 1 = Disable DQS input filtering
344 * 0 = Initialization done or not yet started.
345 * 1 = Initiate DRAM intialization sequence
346 * [ 9: 9] SO-Dimm Enable
348 * 1 = SO-Dimms present
350 * 0 = DRAM not enabled
351 * 1 = DRAM initialized and enabled
352 * [11:11] Memory Clear Status
353 * 0 = Memory Clear function has not completed
354 * 1 = Memory Clear function has completed
355 * [12:12] Exit Self-Refresh
356 * 0 = Exit from self-refresh done or not yet started
357 * 1 = DRAM exiting from self refresh
358 * [13:13] Self-Refresh Status
359 * 0 = Normal Operation
360 * 1 = Self-refresh mode active
361 * [15:14] Read/Write Queue Bypass Count
366 * [16:16] 128-bit/64-Bit
367 * 0 = 64bit Interface to DRAM
368 * 1 = 128bit Interface to DRAM
369 * [17:17] DIMM ECC Enable
370 * 0 = Some DIMMs do not have ECC
371 * 1 = ALL DIMMS have ECC bits
372 * [18:18] UnBuffered DIMMs
374 * 1 = Unbuffered DIMMS
375 * [19:19] Enable 32-Byte Granularity
376 * 0 = Optimize for 64byte bursts
377 * 1 = Optimize for 32byte bursts
378 * [20:20] DIMM 0 is x4
379 * [21:21] DIMM 1 is x4
380 * [22:22] DIMM 2 is x4
381 * [23:23] DIMM 3 is x4
383 * 1 = x4 DIMM present
384 * [24:24] Disable DRAM Receivers
385 * 0 = Receivers enabled
386 * 1 = Receivers disabled
388 * 000 = Arbiters chois is always respected
389 * 001 = Oldest entry in DCQ can be bypassed 1 time
390 * 010 = Oldest entry in DCQ can be bypassed 2 times
391 * 011 = Oldest entry in DCQ can be bypassed 3 times
392 * 100 = Oldest entry in DCQ can be bypassed 4 times
393 * 101 = Oldest entry in DCQ can be bypassed 5 times
394 * 110 = Oldest entry in DCQ can be bypassed 6 times
395 * 111 = Oldest entry in DCQ can be bypassed 7 times
398 PCI_ADDR(0, 0x18, 2, 0x90), 0xf0000000,
400 (0 << 23)|(0 << 22)|(0 << 21)|(0 << 20)|
401 (1 << 19)|(0 << 18)|(1 << 17)|(0 << 16)|
402 (2 << 14)|(0 << 13)|(0 << 12)|
403 (0 << 11)|(0 << 10)|(0 << 9)|(0 << 8)|
404 (0 << 3) |(0 << 1) |(0 << 0),
405 /* DRAM Config High Register
407 * [ 0: 3] Maximum Asynchronous Latency
412 * [11: 8] Read Preamble
430 * [18:16] Idle Cycle Limit
439 * [19:19] Dynamic Idle Cycle Center Enable
440 * 0 = Use Idle Cycle Limit
441 * 1 = Generate a dynamic Idle cycle limit
442 * [22:20] DRAM MEMCLK Frequency
452 * [25:25] Memory Clock Ratio Valid (FIXME carefully enable memclk)
453 * 0 = Disable MemClks
455 * [26:26] Memory Clock 0 Enable
458 * [27:27] Memory Clock 1 Enable
461 * [28:28] Memory Clock 2 Enable
464 * [29:29] Memory Clock 3 Enable
469 PCI_ADDR(0, 0x18, 2, 0x94), 0xc180f0f0,
470 (0 << 29)|(0 << 28)|(0 << 27)|(0 << 26)|(0 << 25)|
471 (0 << 20)|(0 << 19)|(DCH_IDLE_LIMIT_16 << 16)|(0 << 8)|(0 << 0),
472 /* DRAM Delay Line Register
474 * Adjust the skew of the input DQS strobe relative to DATA
476 * [23:16] Delay Line Adjust
477 * Adjusts the DLL derived PDL delay by one or more delay stages
478 * in either the faster or slower direction.
479 * [24:24} Adjust Slower
481 * 1 = Adj is used to increase the PDL delay
482 * [25:25] Adjust Faster
484 * 1 = Adj is used to decrease the PDL delay
487 PCI_ADDR(0, 0x18, 2, 0x98), 0xfc00ffff, 0x00000000,
488 /* DRAM Scrub Control Register
490 * [ 4: 0] DRAM Scrube Rate
492 * [12: 8] L2 Scrub Rate
494 * [20:16] Dcache Scrub
497 * 00000 = Do not scrub
519 * All Others = Reserved
521 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
522 /* DRAM Scrub Address Low Register
524 * [ 0: 0] DRAM Scrubber Redirect Enable
526 * 1 = Scrubber Corrects errors found in normal operation
528 * [31: 6] DRAM Scrub Address 31-6
530 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
531 /* DRAM Scrub Address High Register
533 * [ 7: 0] DRAM Scrubb Address 39-32
536 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
542 if (!controller_present(ctrl)) {
543 // print_debug("No memory controller present\r\n");
547 print_spew("setting up CPU");
548 print_spew_hex8(ctrl->node_id);
549 print_spew(" northbridge registers\r\n");
550 max = sizeof(register_values)/sizeof(register_values[0]);
551 for(i = 0; i < max; i += 3) {
556 #if CONFIG_USE_PRINTK_IN_CAR
557 prink_debug("%08x <- %08x\r\n", register_values[i], register_values[i+2]);
559 print_spew_hex32(register_values[i]);
561 print_spew_hex32(register_values[i+2]);
565 dev = (register_values[i] & ~0xfff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
566 where = register_values[i] & 0xfff;
567 reg = pci_read_config32(dev, where);
568 reg &= register_values[i+1];
569 reg |= register_values[i+2];
570 pci_write_config32(dev, where, reg);
573 reg = pci_read_config32(register_values[i]);
574 reg &= register_values[i+1];
575 reg |= register_values[i+2];
576 pci_write_config32(register_values[i], reg);
579 print_spew("done.\r\n");
583 static void hw_enable_ecc(const struct mem_controller *ctrl)
586 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
587 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
588 dcl &= ~DCL_DimmEccEn;
589 if (nbcap & NBCAP_ECC) {
590 dcl |= DCL_DimmEccEn;
592 if (read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
593 dcl &= ~DCL_DimmEccEn;
595 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
599 static int is_dual_channel(const struct mem_controller *ctrl)
602 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
603 return dcl & DCL_128BitEn;
606 static int is_opteron(const struct mem_controller *ctrl)
608 /* Test to see if I am an Opteron.
609 * FIXME Testing dual channel capability is correct for now
610 * but a beter test is probably required.
612 #warning "FIXME implement a better test for opterons"
614 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
615 return !!(nbcap & NBCAP_128Bit);
618 static int is_registered(const struct mem_controller *ctrl)
620 /* Test to see if we are dealing with registered SDRAM.
621 * If we are not registered we are unbuffered.
622 * This function must be called after spd_handle_unbuffered_dimms.
625 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
626 return !(dcl & DCL_UnBufDimm);
634 #if QRANK_DIMM_SUPPORT == 1
639 static struct dimm_size spd_get_dimm_size(unsigned device)
641 /* Calculate the log base 2 size of a DIMM in bits */
648 #if QRANK_DIMM_SUPPORT == 1
652 /* Note it might be easier to use byte 31 here, it has the DIMM size as
653 * a multiple of 4MB. The way we do it now we can size both
654 * sides of an assymetric dimm.
656 value = spd_read_byte(device, 3); /* rows */
657 if (value < 0) goto hw_err;
658 if ((value & 0xf) == 0) goto val_err;
659 sz.side1 += value & 0xf;
660 sz.rows = value & 0xf;
662 value = spd_read_byte(device, 4); /* columns */
663 if (value < 0) goto hw_err;
664 if ((value & 0xf) == 0) goto val_err;
665 sz.side1 += value & 0xf;
666 sz.col = value & 0xf;
668 value = spd_read_byte(device, 17); /* banks */
669 if (value < 0) goto hw_err;
670 if ((value & 0xff) == 0) goto val_err;
671 sz.side1 += log2(value & 0xff);
673 /* Get the module data width and convert it to a power of two */
674 value = spd_read_byte(device, 7); /* (high byte) */
675 if (value < 0) goto hw_err;
679 low = spd_read_byte(device, 6); /* (low byte) */
680 if (low < 0) goto hw_err;
681 value = value | (low & 0xff);
682 if ((value != 72) && (value != 64)) goto val_err;
683 sz.side1 += log2(value);
686 value = spd_read_byte(device, 5); /* number of physical banks */
687 if (value < 0) goto hw_err;
688 if (value == 1) goto out;
689 if ((value != 2) && (value != 4 )) {
692 #if QRANK_DIMM_SUPPORT == 1
696 /* Start with the symmetrical case */
699 value = spd_read_byte(device, 3); /* rows */
700 if (value < 0) goto hw_err;
701 if ((value & 0xf0) == 0) goto out; /* If symmetrical we are done */
702 sz.side2 -= (value & 0x0f); /* Subtract out rows on side 1 */
703 sz.side2 += ((value >> 4) & 0x0f); /* Add in rows on side 2 */
705 value = spd_read_byte(device, 4); /* columns */
706 if (value < 0) goto hw_err;
707 if ((value & 0xff) == 0) goto val_err;
708 sz.side2 -= (value & 0x0f); /* Subtract out columns on side 1 */
709 sz.side2 += ((value >> 4) & 0x0f); /* Add in columsn on side 2 */
714 die("Bad SPD value\r\n");
715 /* If an hw_error occurs report that I have no memory */
721 #if QRANK_DIMM_SUPPORT == 1
729 static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
731 uint32_t base0, base1;
734 if (sz.side1 != sz.side2) {
738 /* For each base register.
739 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
740 * The initialize dimm size is in bits.
741 * Set the base enable bit0.
746 /* Make certain side1 of the dimm is at least 32MB */
747 if (sz.side1 >= (25 +3)) {
748 base0 = (1 << ((sz.side1 - (25 + 3)) + 21)) | 1;
751 /* Make certain side2 of the dimm is at least 32MB */
752 if (sz.side2 >= (25 + 3)) {
753 base1 = (1 << ((sz.side2 - (25 + 3)) + 21)) | 1;
756 /* Double the size if we are using dual channel memory */
757 if (is_dual_channel(ctrl)) {
758 base0 = (base0 << 1) | (base0 & 1);
759 base1 = (base1 << 1) | (base1 & 1);
762 /* Clear the reserved bits */
763 base0 &= ~0x001ffffe;
764 base1 &= ~0x001ffffe;
766 /* Set the appropriate DIMM base address register */
767 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
768 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
769 #if QRANK_DIMM_SUPPORT == 1
771 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
772 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
776 /* Enable the memory clocks for this DIMM */
778 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
779 dch |= DCH_MEMCLK_EN0 << index;
780 #if QRANK_DIMM_SUPPORT == 1
782 dch |= DCH_MEMCLK_EN0 << (index + 2);
785 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
789 static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
791 static const unsigned cs_map_aa[] = {
792 /* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
801 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
802 map &= ~(0xf << (index * 4));
803 #if QRANK_DIMM_SUPPORT == 1
805 map &= ~(0xf << ( (index + 2) * 4));
810 /* Make certain side1 of the dimm is at least 32MB */
811 if (sz.side1 >= (25 +3)) {
812 if(is_cpu_pre_d0()) {
813 map |= (sz.side1 - (25 + 3)) << (index *4);
814 #if QRANK_DIMM_SUPPORT == 1
816 map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
821 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
822 #if QRANK_DIMM_SUPPORT == 1
824 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
830 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
834 static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
838 for(i = 0; i < DIMM_SOCKETS; i++) {
840 if (!(dimm_mask & (1 << i))) {
843 sz = spd_get_dimm_size(ctrl->channel0[i]);
845 return -1; /* Report SPD error */
847 set_dimm_size(ctrl, sz, i);
848 set_dimm_map (ctrl, sz, i);
853 static void route_dram_accesses(const struct mem_controller *ctrl,
854 unsigned long base_k, unsigned long limit_k)
856 /* Route the addresses to the controller node */
861 unsigned limit_reg, base_reg;
864 node_id = ctrl->node_id;
865 index = (node_id << 3);
866 limit = (limit_k << 2);
869 limit |= ( 0 << 8) | (node_id << 0);
870 base = (base_k << 2);
872 base |= (0 << 8) | (1<<1) | (1<<0);
874 limit_reg = 0x44 + index;
875 base_reg = 0x40 + index;
876 for(device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1); device += PCI_DEV(0, 1, 0)) {
877 pci_write_config32(device, limit_reg, limit);
878 pci_write_config32(device, base_reg, base);
882 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
884 /* Error if I don't have memory */
889 /* Report the amount of memory. */
890 print_spew("RAM: 0x");
891 print_spew_hex32(tom_k);
892 print_spew(" KB\r\n");
894 /* Now set top of memory */
896 if(tom_k > (4*1024*1024)) {
897 msr.lo = (tom_k & 0x003fffff) << 10;
898 msr.hi = (tom_k & 0xffc00000) >> 22;
899 wrmsr(TOP_MEM2, msr);
902 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
903 * so I can see my rom chip and other I/O devices.
905 if (tom_k >= 0x003f0000) {
906 #if HW_MEM_HOLE_SIZEK != 0
907 if(hole_startk != 0) {
913 msr.lo = (tom_k & 0x003fffff) << 10;
914 msr.hi = (tom_k & 0xffc00000) >> 22;
918 static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
921 static const uint8_t csbase_low_shift[] = {
924 /* 128MB */ (14 - 4),
925 /* 256MB */ (15 - 4),
926 /* 512MB */ (15 - 4),
931 static const uint8_t csbase_low_d0_shift[] = {
934 /* 128MB */ (14 - 4),
935 /* 128MB */ (15 - 4),
936 /* 256MB */ (15 - 4),
937 /* 512MB */ (15 - 4),
938 /* 256MB */ (16 - 4),
939 /* 512MB */ (16 - 4),
945 /* cs_base_high is not changed */
948 int chip_selects, index;
950 unsigned common_size;
951 unsigned common_cs_mode;
952 uint32_t csbase, csmask;
954 /* See if all of the memory chip selects are the same size
955 * and if so count them.
960 for(index = 0; index < 8; index++) {
965 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
973 if (common_size == 0) {
976 /* The size differed fail */
977 if (common_size != size) {
981 value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
982 cs_mode =( value >> ((index>>1)*4)) & 0xf;
983 if(cs_mode == 0 ) continue;
984 if(common_cs_mode == 0) {
985 common_cs_mode = cs_mode;
987 /* The size differed fail */
988 if(common_cs_mode != cs_mode) {
993 /* Chip selects can only be interleaved when there is
994 * more than one and their is a power of two of them.
996 bits = log2(chip_selects);
997 if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) {
1001 /* Find the bits of csbase that we need to interleave on */
1002 if(is_cpu_pre_d0()){
1003 csbase_inc = 1 << csbase_low_shift[common_cs_mode];
1004 if(is_dual_channel(ctrl)) {
1005 /* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
1006 if ((bits == 3) && (common_size == (1 << (32 - 3)))) {
1007 // print_debug("8 4GB chip selects cannot be interleaved\r\n");
1014 csbase_inc = 1 << csbase_low_d0_shift[common_cs_mode];
1015 if(is_dual_channel(ctrl)) {
1016 if( (bits==3) && (common_cs_mode > 8)) {
1017 // print_debug("8 cs_mode>8 chip selects cannot be interleaved\r\n");
1024 /* Compute the initial values for csbase and csbask.
1025 * In csbase just set the enable bit and the base to zero.
1026 * In csmask set the mask bits for the size and page level interleave.
1029 csmask = (((common_size << bits) - 1) << 21);
1030 csmask |= 0xfe00 & ~((csbase_inc << bits) - csbase_inc);
1031 for(index = 0; index < 8; index++) {
1034 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1035 /* Is it enabled? */
1039 pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
1040 pci_write_config32(ctrl->f2, DRAM_CSMASK + (index << 2), csmask);
1041 csbase += csbase_inc;
1044 print_spew("Interleaved\r\n");
1046 /* Return the memory size in K */
1047 return common_size << (15 + bits);
1050 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
1054 /* Remember which registers we have used in the high 8 bits of tom */
1057 /* Find the largest remaining canidate */
1058 unsigned index, canidate;
1059 uint32_t csbase, csmask;
1063 for(index = 0; index < 8; index++) {
1065 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1067 /* Is it enabled? */
1072 /* Is it greater? */
1073 if (value <= csbase) {
1077 /* Has it already been selected */
1078 if (tom & (1 << (index + 24))) {
1081 /* I have a new canidate */
1085 /* See if I have found a new canidate */
1090 /* Remember the dimm size */
1091 size = csbase >> 21;
1093 /* Remember I have used this register */
1094 tom |= (1 << (canidate + 24));
1096 /* Recompute the cs base register value */
1097 csbase = (tom << 21) | 1;
1099 /* Increment the top of memory */
1102 /* Compute the memory mask */
1103 csmask = ((size -1) << 21);
1104 csmask |= 0xfe00; /* For now don't optimize */
1106 /* Write the new base register */
1107 pci_write_config32(ctrl->f2, DRAM_CSBASE + (canidate << 2), csbase);
1108 /* Write the new mask register */
1109 pci_write_config32(ctrl->f2, DRAM_CSMASK + (canidate << 2), csmask);
1112 /* Return the memory size in K */
1113 return (tom & ~0xff000000) << 15;
1116 unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
1120 /* Find the last memory address used */
1122 for(node_id = 0; node_id < max_node_id; node_id++) {
1123 uint32_t limit, base;
1125 index = node_id << 3;
1126 base = pci_read_config32(ctrl->f1, 0x40 + index);
1127 /* Only look at the limit if the base is enabled */
1128 if ((base & 3) == 3) {
1129 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1130 end_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1136 static void order_dimms(const struct mem_controller *ctrl)
1138 unsigned long tom_k, base_k;
1140 if (read_option(CMOS_VSTART_interleave_chip_selects, CMOS_VLEN_interleave_chip_selects, 1) != 0) {
1141 tom_k = interleave_chip_selects(ctrl);
1143 print_debug("Interleaving disabled\r\n");
1147 tom_k = order_chip_selects(ctrl);
1149 /* Compute the memory base address */
1150 base_k = memory_end_k(ctrl, ctrl->node_id);
1152 route_dram_accesses(ctrl, base_k, tom_k);
1153 set_top_mem(tom_k, 0);
1156 static long disable_dimm(const struct mem_controller *ctrl, unsigned index, long dimm_mask)
1158 print_debug("disabling dimm");
1159 print_debug_hex8(index);
1160 print_debug("\r\n");
1161 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), 0);
1162 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), 0);
1163 dimm_mask &= ~(1 << index);
1167 static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl, long dimm_mask)
1175 for(i = 0; (i < DIMM_SOCKETS); i++) {
1177 if (!(dimm_mask & (1 << i))) {
1180 value = spd_read_byte(ctrl->channel0[i], 21);
1184 /* Registered dimm ? */
1185 if (value & (1 << 1)) {
1188 /* Otherwise it must be an unbuffered dimm */
1193 if (unbuffered && registered) {
1194 die("Mixed buffered and registered dimms not supported");
1197 //By yhlu for debug Athlon64 939 can do dual channel, but it use unbuffer DIMM
1198 if (unbuffered && is_opteron(ctrl)) {
1199 die("Unbuffered Dimms not supported on Opteron");
1203 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1204 dcl &= ~DCL_UnBufDimm;
1206 dcl |= DCL_UnBufDimm;
1208 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1210 if (is_registered(ctrl)) {
1211 print_debug("Registered\r\n");
1213 print_debug("Unbuffered\r\n");
1219 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
1224 for(i = 0; i < DIMM_SOCKETS; i++) {
1227 device = ctrl->channel0[i];
1229 byte = spd_read_byte(ctrl->channel0[i], 2); /* Type */
1231 dimm_mask |= (1 << i);
1234 device = ctrl->channel1[i];
1236 byte = spd_read_byte(ctrl->channel1[i], 2);
1238 dimm_mask |= (1 << (i + DIMM_SOCKETS));
1245 static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_mask)
1249 /* SPD addresses to verify are identical */
1250 static const uint8_t addresses[] = {
1251 2, /* Type should be DDR SDRAM */
1252 3, /* *Row addresses */
1253 4, /* *Column addresses */
1254 5, /* *Physical Banks */
1255 6, /* *Module Data Width low */
1256 7, /* *Module Data Width high */
1257 9, /* *Cycle time at highest CAS Latency CL=X */
1258 11, /* *SDRAM Type */
1259 13, /* *SDRAM Width */
1260 17, /* *Logical Banks */
1261 18, /* *Supported CAS Latencies */
1262 21, /* *SDRAM Module Attributes */
1263 23, /* *Cycle time at CAS Latnecy (CLX - 0.5) */
1264 26, /* *Cycle time at CAS Latnecy (CLX - 1.0) */
1265 27, /* *tRP Row precharge time */
1266 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1267 29, /* *tRCD RAS to CAS */
1268 30, /* *tRAS Activate to Precharge */
1269 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1270 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1272 /* If the dimms are not in pairs do not do dual channels */
1273 if ((dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1274 ((dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1275 goto single_channel;
1277 /* If the cpu is not capable of doing dual channels don't do dual channels */
1278 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1279 if (!(nbcap & NBCAP_128Bit)) {
1280 goto single_channel;
1282 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1283 unsigned device0, device1;
1286 /* If I don't have a dimm skip this one */
1287 if (!(dimm_mask & (1 << i))) {
1290 device0 = ctrl->channel0[i];
1291 device1 = ctrl->channel1[i];
1292 for(j = 0; j < sizeof(addresses)/sizeof(addresses[0]); j++) {
1294 addr = addresses[j];
1295 value0 = spd_read_byte(device0, addr);
1299 value1 = spd_read_byte(device1, addr);
1303 if (value0 != value1) {
1304 goto single_channel;
1308 print_spew("Enabling dual channel memory\r\n");
1310 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1311 dcl &= ~DCL_32ByteEn;
1312 dcl |= DCL_128BitEn;
1313 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1316 dimm_mask &= ~((1 << (DIMM_SOCKETS *2)) - (1 << DIMM_SOCKETS));
1322 uint8_t divisor; /* In 1/2 ns increments */
1325 uint32_t dch_memclk;
1326 uint16_t dch_tref4k, dch_tref8k;
1331 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1333 static const struct mem_param speed[] = {
1335 .name = "100Mhz\r\n",
1337 .divisor = (10 <<1),
1340 .dch_memclk = DCH_MEMCLK_100MHZ << DCH_MEMCLK_SHIFT,
1341 .dch_tref4k = DTH_TREF_100MHZ_4K,
1342 .dch_tref8k = DTH_TREF_100MHZ_8K,
1346 .name = "133Mhz\r\n",
1348 .divisor = (7<<1)+1,
1351 .dch_memclk = DCH_MEMCLK_133MHZ << DCH_MEMCLK_SHIFT,
1352 .dch_tref4k = DTH_TREF_133MHZ_4K,
1353 .dch_tref8k = DTH_TREF_133MHZ_8K,
1357 .name = "166Mhz\r\n",
1362 .dch_memclk = DCH_MEMCLK_166MHZ << DCH_MEMCLK_SHIFT,
1363 .dch_tref4k = DTH_TREF_166MHZ_4K,
1364 .dch_tref8k = DTH_TREF_166MHZ_8K,
1368 .name = "200Mhz\r\n",
1373 .dch_memclk = DCH_MEMCLK_200MHZ << DCH_MEMCLK_SHIFT,
1374 .dch_tref4k = DTH_TREF_200MHZ_4K,
1375 .dch_tref8k = DTH_TREF_200MHZ_8K,
1382 const struct mem_param *param;
1383 for(param = &speed[0]; param->cycle_time ; param++) {
1384 if (min_cycle_time > (param+1)->cycle_time) {
1388 if (!param->cycle_time) {
1389 die("min_cycle_time to low");
1391 print_spew(param->name);
1392 #ifdef DRAM_MIN_CYCLE_TIME
1393 print_debug(param->name);
1398 struct spd_set_memclk_result {
1399 const struct mem_param *param;
1402 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask)
1404 /* Compute the minimum cycle time for these dimms */
1405 struct spd_set_memclk_result result;
1406 unsigned min_cycle_time, min_latency, bios_cycle_time;
1410 static const uint8_t latency_indicies[] = { 26, 23, 9 };
1411 static const unsigned char min_cycle_times[] = {
1412 [NBCAP_MEMCLK_200MHZ] = 0x50, /* 5ns */
1413 [NBCAP_MEMCLK_166MHZ] = 0x60, /* 6ns */
1414 [NBCAP_MEMCLK_133MHZ] = 0x75, /* 7.5ns */
1415 [NBCAP_MEMCLK_100MHZ] = 0xa0, /* 10ns */
1419 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1420 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1421 bios_cycle_time = min_cycle_times[
1422 read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
1423 if (bios_cycle_time > min_cycle_time) {
1424 min_cycle_time = bios_cycle_time;
1428 /* Compute the least latency with the fastest clock supported
1429 * by both the memory controller and the dimms.
1431 for(i = 0; i < DIMM_SOCKETS; i++) {
1432 int new_cycle_time, new_latency;
1437 if (!(dimm_mask & (1 << i))) {
1441 /* First find the supported CAS latencies
1442 * Byte 18 for DDR SDRAM is interpreted:
1443 * bit 0 == CAS Latency = 1.0
1444 * bit 1 == CAS Latency = 1.5
1445 * bit 2 == CAS Latency = 2.0
1446 * bit 3 == CAS Latency = 2.5
1447 * bit 4 == CAS Latency = 3.0
1448 * bit 5 == CAS Latency = 3.5
1452 new_cycle_time = 0xa0;
1455 latencies = spd_read_byte(ctrl->channel0[i], 18);
1456 if (latencies <= 0) continue;
1458 /* Compute the lowest cas latency supported */
1459 latency = log2(latencies) -2;
1461 /* Loop through and find a fast clock with a low latency */
1462 for(index = 0; index < 3; index++, latency++) {
1464 if ((latency < 2) || (latency > 4) ||
1465 (!(latencies & (1 << latency)))) {
1468 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1473 /* Only increase the latency if we decreas the clock */
1474 if ((value >= min_cycle_time) && (value < new_cycle_time)) {
1475 new_cycle_time = value;
1476 new_latency = latency;
1479 if (new_latency > 4){
1482 /* Does min_latency need to be increased? */
1483 if (new_cycle_time > min_cycle_time) {
1484 min_cycle_time = new_cycle_time;
1486 /* Does min_cycle_time need to be increased? */
1487 if (new_latency > min_latency) {
1488 min_latency = new_latency;
1491 /* Make a second pass through the dimms and disable
1492 * any that cannot support the selected memclk and cas latency.
1495 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1500 if (!(dimm_mask & (1 << i))) {
1503 latencies = spd_read_byte(ctrl->channel0[i], 18);
1504 if (latencies < 0) goto hw_error;
1505 if (latencies == 0) {
1509 /* Compute the lowest cas latency supported */
1510 latency = log2(latencies) -2;
1512 /* Walk through searching for the selected latency */
1513 for(index = 0; index < 3; index++, latency++) {
1514 if (!(latencies & (1 << latency))) {
1517 if (latency == min_latency)
1520 /* If I can't find the latency or my index is bad error */
1521 if ((latency != min_latency) || (index >= 3)) {
1525 /* Read the min_cycle_time for this latency */
1526 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1527 if (value < 0) goto hw_error;
1529 /* All is good if the selected clock speed
1530 * is what I need or slower.
1532 if (value <= min_cycle_time) {
1535 /* Otherwise I have an error, disable the dimm */
1537 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
1540 //down speed for full load 4 rank support
1541 #if QRANK_DIMM_SUPPORT
1542 if(dimm_mask == (3|(3<<DIMM_SOCKETS)) ) {
1544 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1546 if (!(dimm_mask & (1 << i))) {
1549 val = spd_read_byte(ctrl->channel0[i], 5);
1556 if(min_cycle_time <= 0x50 ) {
1557 min_cycle_time = 0x60;
1564 /* Now that I know the minimum cycle time lookup the memory parameters */
1565 result.param = get_mem_param(min_cycle_time);
1567 /* Update DRAM Config High with our selected memory speed */
1568 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1569 value &= ~(DCH_MEMCLK_MASK << DCH_MEMCLK_SHIFT);
1571 /* Improves DQS centering by correcting for case when core speed multiplier and MEMCLK speed result in odd clock divisor, by selecting the next lowest memory speed, required only at DDR400 and higher speeds with certain DIMM loadings ---- cheating???*/
1572 if(!is_cpu_pre_e0()) {
1573 if(min_cycle_time==0x50) {
1579 value |= result.param->dch_memclk;
1580 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1582 static const unsigned latencies[] = { DTL_CL_2, DTL_CL_2_5, DTL_CL_3 };
1583 /* Update DRAM Timing Low with our selected cas latency */
1584 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1585 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1586 value |= latencies[min_latency - 2] << DTL_TCL_SHIFT;
1587 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1589 result.dimm_mask = dimm_mask;
1592 result.param = (const struct mem_param *)0;
1593 result.dimm_mask = -1;
1598 static int update_dimm_Trc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1600 unsigned clocks, old_clocks;
1603 value = spd_read_byte(ctrl->channel0[i], 41);
1604 if (value < 0) return -1;
1605 if ((value == 0) || (value == 0xff)) {
1608 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1609 if (clocks < DTL_TRC_MIN) {
1610 clocks = DTL_TRC_MIN;
1612 if (clocks > DTL_TRC_MAX) {
1616 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1617 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
1618 if (old_clocks > clocks) {
1619 clocks = old_clocks;
1621 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
1622 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
1623 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1627 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1629 unsigned clocks, old_clocks;
1632 value = spd_read_byte(ctrl->channel0[i], 42);
1633 if (value < 0) return -1;
1634 if ((value == 0) || (value == 0xff)) {
1635 value = param->tRFC;
1637 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1638 if (clocks < DTL_TRFC_MIN) {
1639 clocks = DTL_TRFC_MIN;
1641 if (clocks > DTL_TRFC_MAX) {
1644 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1645 old_clocks = ((dtl >> DTL_TRFC_SHIFT) & DTL_TRFC_MASK) + DTL_TRFC_BASE;
1646 if (old_clocks > clocks) {
1647 clocks = old_clocks;
1649 dtl &= ~(DTL_TRFC_MASK << DTL_TRFC_SHIFT);
1650 dtl |= ((clocks - DTL_TRFC_BASE) << DTL_TRFC_SHIFT);
1651 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1656 static int update_dimm_Trcd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1658 unsigned clocks, old_clocks;
1661 value = spd_read_byte(ctrl->channel0[i], 29);
1662 if (value < 0) return -1;
1663 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1664 if (clocks < DTL_TRCD_MIN) {
1665 clocks = DTL_TRCD_MIN;
1667 if (clocks > DTL_TRCD_MAX) {
1670 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1671 old_clocks = ((dtl >> DTL_TRCD_SHIFT) & DTL_TRCD_MASK) + DTL_TRCD_BASE;
1672 if (old_clocks > clocks) {
1673 clocks = old_clocks;
1675 dtl &= ~(DTL_TRCD_MASK << DTL_TRCD_SHIFT);
1676 dtl |= ((clocks - DTL_TRCD_BASE) << DTL_TRCD_SHIFT);
1677 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1681 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1683 unsigned clocks, old_clocks;
1686 value = spd_read_byte(ctrl->channel0[i], 28);
1687 if (value < 0) return -1;
1688 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1689 if (clocks < DTL_TRRD_MIN) {
1690 clocks = DTL_TRRD_MIN;
1692 if (clocks > DTL_TRRD_MAX) {
1695 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1696 old_clocks = ((dtl >> DTL_TRRD_SHIFT) & DTL_TRRD_MASK) + DTL_TRRD_BASE;
1697 if (old_clocks > clocks) {
1698 clocks = old_clocks;
1700 dtl &= ~(DTL_TRRD_MASK << DTL_TRRD_SHIFT);
1701 dtl |= ((clocks - DTL_TRRD_BASE) << DTL_TRRD_SHIFT);
1702 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1706 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1708 unsigned clocks, old_clocks;
1711 value = spd_read_byte(ctrl->channel0[i], 30);
1712 if (value < 0) return -1;
1713 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1714 if (clocks < DTL_TRAS_MIN) {
1715 clocks = DTL_TRAS_MIN;
1717 if (clocks > DTL_TRAS_MAX) {
1720 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1721 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
1722 if (old_clocks > clocks) {
1723 clocks = old_clocks;
1725 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
1726 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
1727 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1731 static int update_dimm_Trp(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1733 unsigned clocks, old_clocks;
1736 value = spd_read_byte(ctrl->channel0[i], 27);
1737 if (value < 0) return -1;
1738 clocks = (value + (param->divisor << 1) - 1)/(param->divisor << 1);
1739 if (clocks < DTL_TRP_MIN) {
1740 clocks = DTL_TRP_MIN;
1742 if (clocks > DTL_TRP_MAX) {
1745 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1746 old_clocks = ((dtl >> DTL_TRP_SHIFT) & DTL_TRP_MASK) + DTL_TRP_BASE;
1747 if (old_clocks > clocks) {
1748 clocks = old_clocks;
1750 dtl &= ~(DTL_TRP_MASK << DTL_TRP_SHIFT);
1751 dtl |= ((clocks - DTL_TRP_BASE) << DTL_TRP_SHIFT);
1752 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1756 static void set_Twr(const struct mem_controller *ctrl, const struct mem_param *param)
1759 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1760 dtl &= ~(DTL_TWR_MASK << DTL_TWR_SHIFT);
1761 dtl |= (param->dtl_twr - DTL_TWR_BASE) << DTL_TWR_SHIFT;
1762 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1766 static void init_Tref(const struct mem_controller *ctrl, const struct mem_param *param)
1769 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1770 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1771 dth |= (param->dch_tref4k << DTH_TREF_SHIFT);
1772 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1775 static int update_dimm_Tref(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1779 unsigned tref, old_tref;
1780 value = spd_read_byte(ctrl->channel0[i], 3);
1781 if (value < 0) return -1;
1784 tref = param->dch_tref8k;
1786 tref = param->dch_tref4k;
1789 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1790 old_tref = (dth >> DTH_TREF_SHIFT) & DTH_TREF_MASK;
1791 if ((value == 12) && (old_tref == param->dch_tref4k)) {
1792 tref = param->dch_tref4k;
1794 tref = param->dch_tref8k;
1796 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1797 dth |= (tref << DTH_TREF_SHIFT);
1798 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1803 static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1807 #if QRANK_DIMM_SUPPORT == 1
1811 value = spd_read_byte(ctrl->channel0[i], 13);
1816 #if QRANK_DIMM_SUPPORT == 1
1817 rank = spd_read_byte(ctrl->channel0[i], 5); /* number of physical banks */
1823 dimm = 1<<(DCL_x4DIMM_SHIFT+i);
1824 #if QRANK_DIMM_SUPPORT == 1
1826 dimm |= 1<<(DCL_x4DIMM_SHIFT+i+2);
1829 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1834 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1838 static int update_dimm_ecc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1842 value = spd_read_byte(ctrl->channel0[i], 11);
1847 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1848 dcl &= ~DCL_DimmEccEn;
1849 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1854 static int count_dimms(const struct mem_controller *ctrl)
1859 for(index = 0; index < 8; index += 2) {
1861 csbase = pci_read_config32(ctrl->f2, (DRAM_CSBASE + (index << 2)));
1869 static void set_Twtr(const struct mem_controller *ctrl, const struct mem_param *param)
1873 clocks = 1; /* AMD says hard code this */
1874 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1875 dth &= ~(DTH_TWTR_MASK << DTH_TWTR_SHIFT);
1876 dth |= ((clocks - DTH_TWTR_BASE) << DTH_TWTR_SHIFT);
1877 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1880 static void set_Trwt(const struct mem_controller *ctrl, const struct mem_param *param)
1888 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1889 latency = (dtl >> DTL_TCL_SHIFT) & DTL_TCL_MASK;
1890 divisor = param->divisor;
1892 if (is_opteron(ctrl)) {
1893 if (latency == DTL_CL_2) {
1894 if (divisor == ((6 << 0) + 0)) {
1898 else if (divisor > ((6 << 0)+0)) {
1899 /* 100Mhz && 133Mhz */
1903 else if (latency == DTL_CL_2_5) {
1906 else if (latency == DTL_CL_3) {
1907 if (divisor == ((6 << 0)+0)) {
1911 else if (divisor > ((6 << 0)+0)) {
1912 /* 100Mhz && 133Mhz */
1917 else /* Athlon64 */ {
1918 if (is_registered(ctrl)) {
1919 if (latency == DTL_CL_2) {
1922 else if (latency == DTL_CL_2_5) {
1925 else if (latency == DTL_CL_3) {
1929 else /* Unbuffered */{
1930 if (latency == DTL_CL_2) {
1933 else if (latency == DTL_CL_2_5) {
1936 else if (latency == DTL_CL_3) {
1941 if ((clocks < DTH_TRWT_MIN) || (clocks > DTH_TRWT_MAX)) {
1942 die("Unknown Trwt\r\n");
1945 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1946 dth &= ~(DTH_TRWT_MASK << DTH_TRWT_SHIFT);
1947 dth |= ((clocks - DTH_TRWT_BASE) << DTH_TRWT_SHIFT);
1948 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1952 static void set_Twcl(const struct mem_controller *ctrl, const struct mem_param *param)
1954 /* Memory Clocks after CAS# */
1957 if (is_registered(ctrl)) {
1962 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1963 dth &= ~(DTH_TWCL_MASK << DTH_TWCL_SHIFT);
1964 dth |= ((clocks - DTH_TWCL_BASE) << DTH_TWCL_SHIFT);
1965 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1969 static void set_read_preamble(const struct mem_controller *ctrl, const struct mem_param *param)
1973 unsigned rdpreamble;
1974 divisor = param->divisor;
1975 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1976 dch &= ~(DCH_RDPREAMBLE_MASK << DCH_RDPREAMBLE_SHIFT);
1978 if (is_registered(ctrl)) {
1979 if (divisor == ((10 << 1)+0)) {
1981 rdpreamble = ((9 << 1)+ 0);
1983 else if (divisor == ((7 << 1)+1)) {
1985 rdpreamble = ((8 << 1)+0);
1987 else if (divisor == ((6 << 1)+0)) {
1989 rdpreamble = ((7 << 1)+1);
1991 else if (divisor == ((5 << 1)+0)) {
1993 rdpreamble = ((7 << 1)+0);
2000 for(i = 0; i < 4; i++) {
2001 if (ctrl->channel0[i]) {
2005 if (divisor == ((10 << 1)+0)) {
2009 rdpreamble = ((9 << 1)+0);
2012 rdpreamble = ((14 << 1)+0);
2015 else if (divisor == ((7 << 1)+1)) {
2019 rdpreamble = ((7 << 1)+0);
2022 rdpreamble = ((11 << 1)+0);
2025 else if (divisor == ((6 << 1)+0)) {
2029 rdpreamble = ((7 << 1)+0);
2032 rdpreamble = ((9 << 1)+0);
2035 else if (divisor == ((5 << 1)+0)) {
2039 rdpreamble = ((5 << 1)+0);
2042 rdpreamble = ((7 << 1)+0);
2046 if ((rdpreamble < DCH_RDPREAMBLE_MIN) || (rdpreamble > DCH_RDPREAMBLE_MAX)) {
2047 die("Unknown rdpreamble");
2049 dch |= (rdpreamble - DCH_RDPREAMBLE_BASE) << DCH_RDPREAMBLE_SHIFT;
2050 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2053 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
2059 dimms = count_dimms(ctrl);
2061 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2062 dch &= ~(DCH_ASYNC_LAT_MASK << DCH_ASYNC_LAT_SHIFT);
2064 if (is_registered(ctrl)) {
2076 die("Too many unbuffered dimms");
2078 else if (dimms == 3) {
2087 dch |= ((async_lat - DCH_ASYNC_LAT_BASE) << DCH_ASYNC_LAT_SHIFT);
2088 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2091 static void set_idle_cycle_limit(const struct mem_controller *ctrl, const struct mem_param *param)
2094 /* AMD says to Hardcode this */
2095 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2096 dch &= ~(DCH_IDLE_LIMIT_MASK << DCH_IDLE_LIMIT_SHIFT);
2097 dch |= DCH_IDLE_LIMIT_16 << DCH_IDLE_LIMIT_SHIFT;
2098 dch |= DCH_DYN_IDLE_CTR_EN;
2099 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2102 static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param, long dimm_mask)
2106 init_Tref(ctrl, param);
2107 for(i = 0; i < DIMM_SOCKETS; i++) {
2109 if (!(dimm_mask & (1 << i))) {
2112 /* DRAM Timing Low Register */
2113 if ((rc = update_dimm_Trc (ctrl, param, i)) <= 0) goto dimm_err;
2114 if ((rc = update_dimm_Trfc(ctrl, param, i)) <= 0) goto dimm_err;
2115 if ((rc = update_dimm_Trcd(ctrl, param, i)) <= 0) goto dimm_err;
2116 if ((rc = update_dimm_Trrd(ctrl, param, i)) <= 0) goto dimm_err;
2117 if ((rc = update_dimm_Tras(ctrl, param, i)) <= 0) goto dimm_err;
2118 if ((rc = update_dimm_Trp (ctrl, param, i)) <= 0) goto dimm_err;
2120 /* DRAM Timing High Register */
2121 if ((rc = update_dimm_Tref(ctrl, param, i)) <= 0) goto dimm_err;
2124 /* DRAM Config Low */
2125 if ((rc = update_dimm_x4 (ctrl, param, i)) <= 0) goto dimm_err;
2126 if ((rc = update_dimm_ecc(ctrl, param, i)) <= 0) goto dimm_err;
2132 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
2134 /* DRAM Timing Low Register */
2135 set_Twr(ctrl, param);
2137 /* DRAM Timing High Register */
2138 set_Twtr(ctrl, param);
2139 set_Trwt(ctrl, param);
2140 set_Twcl(ctrl, param);
2142 /* DRAM Config High */
2143 set_read_preamble(ctrl, param);
2144 set_max_async_latency(ctrl, param);
2145 set_idle_cycle_limit(ctrl, param);
2149 #if RAMINIT_SYSINFO==1
2150 static void sdram_set_spd_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
2152 static void sdram_set_spd_registers(const struct mem_controller *ctrl)
2155 struct spd_set_memclk_result result;
2156 const struct mem_param *param;
2159 if (!controller_present(ctrl)) {
2160 // print_debug("No memory controller present\r\n");
2164 hw_enable_ecc(ctrl);
2165 activate_spd_rom(ctrl);
2166 dimm_mask = spd_detect_dimms(ctrl);
2167 if (!(dimm_mask & ((1 << DIMM_SOCKETS) - 1))) {
2168 print_debug("No memory for this cpu\r\n");
2171 dimm_mask = spd_enable_2channels(ctrl, dimm_mask);
2174 dimm_mask = spd_set_ram_size(ctrl , dimm_mask);
2177 dimm_mask = spd_handle_unbuffered_dimms(ctrl, dimm_mask);
2180 result = spd_set_memclk(ctrl, dimm_mask);
2181 param = result.param;
2182 dimm_mask = result.dimm_mask;
2185 dimm_mask = spd_set_dram_timing(ctrl, param , dimm_mask);
2191 /* Unrecoverable error reading SPD data */
2192 print_err("SPD error - reset\r\n");
2197 #if HW_MEM_HOLE_SIZEK != 0
2198 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
2201 uint32_t carry_over;
2203 uint32_t base, limit;
2208 carry_over = (4*1024*1024) - hole_startk;
2210 for(ii=controllers - 1;ii>i;ii--) {
2211 base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
2212 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2215 limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
2216 for(j = 0; j < controllers; j++) {
2217 pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit + (carry_over << 2));
2218 pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base + (carry_over << 2));
2221 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2222 for(j = 0; j < controllers; j++) {
2223 pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit + (carry_over << 2));
2226 base = pci_read_config32(dev, 0x40 + (i << 3));
2227 basek = (base & 0xffff0000) >> 2;
2228 if(basek == hole_startk) {
2229 //don't need set memhole here, because hole off set will be 0, overflow
2230 //so need to change base reg instead, new basek will be 4*1024*1024
2232 base |= (4*1024*1024)<<2;
2233 for(j = 0; j < controllers; j++) {
2234 pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
2238 hoist = /* hole start address */
2239 ((hole_startk << 10) & 0xff000000) +
2240 /* hole address to memory controller address */
2241 (((basek + carry_over) >> 6) & 0x0000ff00) +
2244 pci_write_config32(dev, 0xf0, hoist);
2250 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
2253 uint32_t hole_startk;
2256 hole_startk = 4*1024*1024 - HW_MEM_HOLE_SIZEK;
2258 #if HW_MEM_HOLE_SIZE_AUTO_INC == 1
2259 //We need to double check if the hole_startk is valid, if it is equal to basek, we need to decrease it some
2261 for(i=0; i<controllers; i++) {
2264 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2265 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2268 base_k = (base & 0xffff0000) >> 2;
2269 if(base_k == hole_startk) {
2270 hole_startk -= (base_k - basek_pri)>>1; // decrease mem hole startk to make sure it is on middle of privous node
2271 break; //only one hole
2277 //find node index that need do set hole
2278 for(i=0; i<controllers; i++) {
2279 uint32_t base, limit;
2280 unsigned base_k, limit_k;
2281 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2282 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2285 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2286 base_k = (base & 0xffff0000) >> 2;
2287 limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
2288 if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
2290 hoist_memory(controllers, ctrl, hole_startk, i);
2291 end_k = memory_end_k(ctrl, controllers);
2292 set_top_mem(end_k, hole_startk);
2293 break; //only one hole
2301 #define TIMEOUT_LOOPS 300000
2302 #if RAMINIT_SYSINFO == 1
2303 static void sdram_enable(int controllers, const struct mem_controller *ctrl, struct sys_info *sysinfo)
2305 static void sdram_enable(int controllers, const struct mem_controller *ctrl)
2310 /* Error if I don't have memory */
2311 if (memory_end_k(ctrl, controllers) == 0) {
2312 die("No memory\r\n");
2315 /* Before enabling memory start the memory clocks */
2316 for(i = 0; i < controllers; i++) {
2318 if (!controller_present(ctrl + i))
2320 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2321 if (dch & (DCH_MEMCLK_EN0|DCH_MEMCLK_EN1|DCH_MEMCLK_EN2|DCH_MEMCLK_EN3)) {
2322 dch |= DCH_MEMCLK_VALID;
2323 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2326 /* Disable dram receivers */
2328 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2329 dcl |= DCL_DisInRcvrs;
2330 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2334 /* And if necessary toggle the the reset on the dimms by hand */
2335 memreset(controllers, ctrl);
2337 for(i = 0; i < controllers; i++) {
2339 if (!controller_present(ctrl + i))
2341 /* Skip everything if I don't have any memory on this controller */
2342 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2343 if (!(dch & DCH_MEMCLK_VALID)) {
2347 /* Toggle DisDqsHys to get it working */
2348 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2349 if (dcl & DCL_DimmEccEn) {
2351 print_spew("ECC enabled\r\n");
2352 mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
2354 if (dcl & DCL_128BitEn) {
2355 mnc |= MNC_CHIPKILL_EN;
2357 pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
2359 dcl |= DCL_DisDqsHys;
2360 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2361 dcl &= ~DCL_DisDqsHys;
2362 dcl &= ~DCL_DLL_Disable;
2365 dcl |= DCL_DramInit;
2366 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2369 for(i = 0; i < controllers; i++) {
2371 if (!controller_present(ctrl + i))
2373 /* Skip everything if I don't have any memory on this controller */
2374 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2375 if (!(dch & DCH_MEMCLK_VALID)) {
2379 print_debug("Initializing memory: ");
2383 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2385 if ((loops & 1023) == 0) {
2388 } while(((dcl & DCL_DramInit) != 0) && (loops < TIMEOUT_LOOPS));
2389 if (loops >= TIMEOUT_LOOPS) {
2390 print_debug(" failed\r\n");
2394 if (!is_cpu_pre_c0()) {
2395 /* Wait until it is safe to touch memory */
2396 dcl &= ~(DCL_MemClrStatus | DCL_DramEnable);
2397 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2399 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2400 } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
2403 print_debug(" done\r\n");
2406 #if HW_MEM_HOLE_SIZEK != 0
2407 // init hw mem hole here
2408 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
2409 if(!is_cpu_pre_e0())
2410 set_hw_mem_hole(controllers, ctrl);
2413 //FIXME add enable node interleaving here -- yhlu
2415 1. check how many nodes we have , if not all has ram installed get out
2416 2. check cs_base lo is 0, node 0 f2 0x40,,,,, if any one is not using lo is CS_BASE, get out
2417 3. check if other node is the same as node 0 about f2 0x40,,,,, otherwise get out
2418 4. if all ready enable node_interleaving in f1 0x40..... of every node
2419 5. for node interleaving we need to set mem hole to every node ( need recalcute hole offset in f0 for every node)
2422 #if USE_DCACHE_RAM == 0
2423 /* Make certain the first 1M of memory is intialized */
2424 print_debug("Clearing initial memory region: ");
2426 /* Use write combine caching while we setup the first 1M */
2427 cache_lbmem(MTRR_TYPE_WRCOMB);
2429 /* clear memory 1meg */
2430 clear_memory((void *)0, CONFIG_LB_MEM_TOPK << 10);
2432 /* The first 1M is now setup, use it */
2433 cache_lbmem(MTRR_TYPE_WRBACK);
2435 print_debug(" done\r\n");
2439 static int mem_inited(int controllers, const struct mem_controller *ctrl)
2444 unsigned mask_inited = 0;
2446 for(i = 0; i < controllers; i++) {
2448 if (!controller_present(ctrl + i))
2452 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2454 if (!is_cpu_pre_c0()) { // B3
2456 if( (dcl & DCL_MemClrStatus) && (dcl & DCL_DramEnable) ) {
2457 mask_inited |= (1<<i);
2462 if(mask == mask_inited) return 1;
2467 #if USE_DCACHE_RAM == 1
2468 static void set_sysinfo_in_ram(unsigned val)
2472 static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a, const uint16_t *spd_addr)
2476 struct mem_controller *ctrl;
2477 for(i=0;i<controllers; i++) {
2480 ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
2481 ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
2482 ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
2483 ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
2485 if(spd_addr == (void *)0) continue;
2487 for(j=0;j<DIMM_SOCKETS;j++) {
2488 ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
2489 ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];