1 /* This should be done by Eric
2 2004.11 yhlu add 4 rank DIMM support
3 2004.12 yhlu add D0 support
4 2005.02 yhlu add E0 memory hole support
6 #if K8_REV_F_SUPPORT == 1
10 #include <cpu/x86/mem.h>
11 #include <cpu/x86/cache.h>
12 #include <cpu/x86/mtrr.h>
17 #if (CONFIG_LB_MEM_TOPK & (CONFIG_LB_MEM_TOPK -1)) != 0
18 # error "CONFIG_LB_MEM_TOPK must be a power of 2"
21 #ifndef QRANK_DIMM_SUPPORT
22 #define QRANK_DIMM_SUPPORT 0
25 #if defined (__GNUC__)
26 static void hard_reset(void);
30 static void setup_resource_map(const unsigned int *register_values, int max)
33 // print_debug("setting up resource map....");
37 for(i = 0; i < max; i += 3) {
42 #if CONFIG_USE_PRINTK_IN_CAR
43 prink_debug("%08x <- %08x\r\n", register_values[i], register_values[i+2]);
45 print_debug_hex32(register_values[i]);
47 print_debug_hex32(register_values[i+2]);
51 dev = register_values[i] & ~0xfff;
52 where = register_values[i] & 0xfff;
53 reg = pci_read_config32(dev, where);
54 reg &= register_values[i+1];
55 reg |= register_values[i+2];
56 pci_write_config32(dev, where, reg);
58 reg = pci_read_config32(register_values[i]);
59 reg &= register_values[i+1];
60 reg |= register_values[i+2] & ~register_values[i+1];
61 pci_write_config32(register_values[i], reg);
64 // print_debug("done.\r\n");
68 static int controller_present(const struct mem_controller *ctrl)
70 return pci_read_config32(ctrl->f0, 0) == 0x11001022;
73 #if RAMINIT_SYSINFO==1
74 static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
76 static void sdram_set_registers(const struct mem_controller *ctrl)
79 static const unsigned int register_values[] = {
81 /* Careful set limit registers before base registers which contain the enables */
82 /* DRAM Limit i Registers
91 * [ 2: 0] Destination Node ID
101 * [10: 8] Interleave select
102 * specifies the values of A[14:12] to use with interleave enable.
104 * [31:16] DRAM Limit Address i Bits 39-24
105 * This field defines the upper address bits of a 40 bit address
106 * that define the end of the DRAM region.
108 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
109 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
110 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
111 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
112 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
113 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
114 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
115 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
116 /* DRAM Base i Registers
125 * [ 0: 0] Read Enable
128 * [ 1: 1] Write Enable
129 * 0 = Writes Disabled
132 * [10: 8] Interleave Enable
133 * 000 = No interleave
134 * 001 = Interleave on A[12] (2 nodes)
136 * 011 = Interleave on A[12] and A[14] (4 nodes)
140 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
142 * [13:16] DRAM Base Address i Bits 39-24
143 * This field defines the upper address bits of a 40-bit address
144 * that define the start of the DRAM region.
146 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
147 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
148 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
149 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
150 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
151 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
152 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
153 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
155 /* DRAM CS Base Address i Registers
164 * [ 0: 0] Chip-Select Bank Enable
168 * [15: 9] Base Address (19-13)
169 * An optimization used when all DIMM are the same size...
171 * [31:21] Base Address (35-25)
172 * This field defines the top 11 addresses bit of a 40-bit
173 * address that define the memory address space. These
174 * bits decode 32-MByte blocks of memory.
176 PCI_ADDR(0, 0x18, 2, 0x40), 0x001f01fe, 0x00000000,
177 PCI_ADDR(0, 0x18, 2, 0x44), 0x001f01fe, 0x00000000,
178 PCI_ADDR(0, 0x18, 2, 0x48), 0x001f01fe, 0x00000000,
179 PCI_ADDR(0, 0x18, 2, 0x4C), 0x001f01fe, 0x00000000,
180 PCI_ADDR(0, 0x18, 2, 0x50), 0x001f01fe, 0x00000000,
181 PCI_ADDR(0, 0x18, 2, 0x54), 0x001f01fe, 0x00000000,
182 PCI_ADDR(0, 0x18, 2, 0x58), 0x001f01fe, 0x00000000,
183 PCI_ADDR(0, 0x18, 2, 0x5C), 0x001f01fe, 0x00000000,
184 /* DRAM CS Mask Address i Registers
193 * Select bits to exclude from comparison with the DRAM Base address register.
195 * [15: 9] Address Mask (19-13)
196 * Address to be excluded from the optimized case
198 * [29:21] Address Mask (33-25)
199 * The bits with an address mask of 1 are excluded from address comparison
203 PCI_ADDR(0, 0x18, 2, 0x60), 0xC01f01ff, 0x00000000,
204 PCI_ADDR(0, 0x18, 2, 0x64), 0xC01f01ff, 0x00000000,
205 PCI_ADDR(0, 0x18, 2, 0x68), 0xC01f01ff, 0x00000000,
206 PCI_ADDR(0, 0x18, 2, 0x6C), 0xC01f01ff, 0x00000000,
207 PCI_ADDR(0, 0x18, 2, 0x70), 0xC01f01ff, 0x00000000,
208 PCI_ADDR(0, 0x18, 2, 0x74), 0xC01f01ff, 0x00000000,
209 PCI_ADDR(0, 0x18, 2, 0x78), 0xC01f01ff, 0x00000000,
210 PCI_ADDR(0, 0x18, 2, 0x7C), 0xC01f01ff, 0x00000000,
211 /* DRAM Bank Address Mapping Register
213 * Specify the memory module size
218 * 000 = 32Mbyte (Rows = 12 & Col = 8)
219 * 001 = 64Mbyte (Rows = 12 & Col = 9)
220 * 010 = 128Mbyte (Rows = 13 & Col = 9)|(Rows = 12 & Col = 10)
221 * 011 = 256Mbyte (Rows = 13 & Col = 10)|(Rows = 12 & Col = 11)
222 * 100 = 512Mbyte (Rows = 13 & Col = 11)|(Rows = 14 & Col = 10)
223 * 101 = 1Gbyte (Rows = 14 & Col = 11)|(Rows = 13 & Col = 12)
224 * 110 = 2Gbyte (Rows = 14 & Col = 12)
231 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff8888, 0x00000000,
232 /* DRAM Timing Low Register
234 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
244 * [ 7: 4] Trc (Row Cycle Time, Ras#-active to Ras#-active/bank auto refresh)
245 * 0000 = 7 bus clocks
246 * 0001 = 8 bus clocks
248 * 1110 = 21 bus clocks
249 * 1111 = 22 bus clocks
250 * [11: 8] Trfc (Row refresh Cycle time, Auto-refresh-active to RAS#-active or RAS#auto-refresh)
251 * 0000 = 9 bus clocks
252 * 0010 = 10 bus clocks
254 * 1110 = 23 bus clocks
255 * 1111 = 24 bus clocks
256 * [14:12] Trcd (Ras#-active to Case#-read/write Delay)
266 * [18:16] Trrd (Ras# to Ras# Delay)
276 * [23:20] Tras (Minmum Ras# Active Time)
277 * 0000 to 0100 = reserved
278 * 0101 = 5 bus clocks
280 * 1111 = 15 bus clocks
281 * [26:24] Trp (Row Precharge Time)
291 * [28:28] Twr (Write Recovery Time)
296 PCI_ADDR(0, 0x18, 2, 0x88), 0xe8088008, 0x02522001 /* 0x03623125 */ ,
297 /* DRAM Timing High Register
299 * [ 0: 0] Twtr (Write to Read Delay)
303 * [ 6: 4] Trwt (Read to Write Delay)
313 * [12: 8] Tref (Refresh Rate)
314 * 00000 = 100Mhz 4K rows
315 * 00001 = 133Mhz 4K rows
316 * 00010 = 166Mhz 4K rows
317 * 00011 = 200Mhz 4K rows
318 * 01000 = 100Mhz 8K/16K rows
319 * 01001 = 133Mhz 8K/16K rows
320 * 01010 = 166Mhz 8K/16K rows
321 * 01011 = 200Mhz 8K/16K rows
323 * [22:20] Twcl (Write CAS Latency)
324 * 000 = 1 Mem clock after CAS# (Unbuffered Dimms)
325 * 001 = 2 Mem clocks after CAS# (Registered Dimms)
328 PCI_ADDR(0, 0x18, 2, 0x8c), 0xff8fe08e, (0 << 20)|(0 << 8)|(0 << 4)|(0 << 0),
329 /* DRAM Config Low Register
331 * [ 0: 0] DLL Disable
340 * [ 3: 3] Disable DQS Hystersis (FIXME handle this one carefully)
341 * 0 = Enable DQS input filter
342 * 1 = Disable DQS input filtering
345 * 0 = Initialization done or not yet started.
346 * 1 = Initiate DRAM intialization sequence
347 * [ 9: 9] SO-Dimm Enable
349 * 1 = SO-Dimms present
351 * 0 = DRAM not enabled
352 * 1 = DRAM initialized and enabled
353 * [11:11] Memory Clear Status
354 * 0 = Memory Clear function has not completed
355 * 1 = Memory Clear function has completed
356 * [12:12] Exit Self-Refresh
357 * 0 = Exit from self-refresh done or not yet started
358 * 1 = DRAM exiting from self refresh
359 * [13:13] Self-Refresh Status
360 * 0 = Normal Operation
361 * 1 = Self-refresh mode active
362 * [15:14] Read/Write Queue Bypass Count
367 * [16:16] 128-bit/64-Bit
368 * 0 = 64bit Interface to DRAM
369 * 1 = 128bit Interface to DRAM
370 * [17:17] DIMM ECC Enable
371 * 0 = Some DIMMs do not have ECC
372 * 1 = ALL DIMMS have ECC bits
373 * [18:18] UnBuffered DIMMs
375 * 1 = Unbuffered DIMMS
376 * [19:19] Enable 32-Byte Granularity
377 * 0 = Optimize for 64byte bursts
378 * 1 = Optimize for 32byte bursts
379 * [20:20] DIMM 0 is x4
380 * [21:21] DIMM 1 is x4
381 * [22:22] DIMM 2 is x4
382 * [23:23] DIMM 3 is x4
384 * 1 = x4 DIMM present
385 * [24:24] Disable DRAM Receivers
386 * 0 = Receivers enabled
387 * 1 = Receivers disabled
389 * 000 = Arbiters chois is always respected
390 * 001 = Oldest entry in DCQ can be bypassed 1 time
391 * 010 = Oldest entry in DCQ can be bypassed 2 times
392 * 011 = Oldest entry in DCQ can be bypassed 3 times
393 * 100 = Oldest entry in DCQ can be bypassed 4 times
394 * 101 = Oldest entry in DCQ can be bypassed 5 times
395 * 110 = Oldest entry in DCQ can be bypassed 6 times
396 * 111 = Oldest entry in DCQ can be bypassed 7 times
399 PCI_ADDR(0, 0x18, 2, 0x90), 0xf0000000,
401 (0 << 23)|(0 << 22)|(0 << 21)|(0 << 20)|
402 (1 << 19)|(0 << 18)|(1 << 17)|(0 << 16)|
403 (2 << 14)|(0 << 13)|(0 << 12)|
404 (0 << 11)|(0 << 10)|(0 << 9)|(0 << 8)|
405 (0 << 3) |(0 << 1) |(0 << 0),
406 /* DRAM Config High Register
408 * [ 0: 3] Maximum Asynchronous Latency
413 * [11: 8] Read Preamble
431 * [18:16] Idle Cycle Limit
440 * [19:19] Dynamic Idle Cycle Center Enable
441 * 0 = Use Idle Cycle Limit
442 * 1 = Generate a dynamic Idle cycle limit
443 * [22:20] DRAM MEMCLK Frequency
453 * [25:25] Memory Clock Ratio Valid (FIXME carefully enable memclk)
454 * 0 = Disable MemClks
456 * [26:26] Memory Clock 0 Enable
459 * [27:27] Memory Clock 1 Enable
462 * [28:28] Memory Clock 2 Enable
465 * [29:29] Memory Clock 3 Enable
470 PCI_ADDR(0, 0x18, 2, 0x94), 0xc180f0f0,
471 (0 << 29)|(0 << 28)|(0 << 27)|(0 << 26)|(0 << 25)|
472 (0 << 20)|(0 << 19)|(DCH_IDLE_LIMIT_16 << 16)|(0 << 8)|(0 << 0),
473 /* DRAM Delay Line Register
475 * Adjust the skew of the input DQS strobe relative to DATA
477 * [23:16] Delay Line Adjust
478 * Adjusts the DLL derived PDL delay by one or more delay stages
479 * in either the faster or slower direction.
480 * [24:24} Adjust Slower
482 * 1 = Adj is used to increase the PDL delay
483 * [25:25] Adjust Faster
485 * 1 = Adj is used to decrease the PDL delay
488 PCI_ADDR(0, 0x18, 2, 0x98), 0xfc00ffff, 0x00000000,
489 /* MCA NB Status Low reg */
490 PCI_ADDR(0, 0x18, 3, 0x48), 0x00f00000, 0x00000000,
491 /* MCA NB Status high reg */
492 PCI_ADDR(0, 0x18, 3, 0x4c), 0x01801e8c, 0x00000000,
493 /* MCA NB address Low reg */
494 PCI_ADDR(0, 0x18, 3, 0x50), 0x00000007, 0x00000000,
495 /* MCA NB address high reg */
496 PCI_ADDR(0, 0x18, 3, 0x54), 0xffffff00, 0x00000000,
497 /* DRAM Scrub Control Register
499 * [ 4: 0] DRAM Scrube Rate
501 * [12: 8] L2 Scrub Rate
503 * [20:16] Dcache Scrub
506 * 00000 = Do not scrub
528 * All Others = Reserved
530 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
531 /* DRAM Scrub Address Low Register
533 * [ 0: 0] DRAM Scrubber Redirect Enable
535 * 1 = Scrubber Corrects errors found in normal operation
537 * [31: 6] DRAM Scrub Address 31-6
539 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
540 /* DRAM Scrub Address High Register
542 * [ 7: 0] DRAM Scrubb Address 39-32
545 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
551 if (!controller_present(ctrl)) {
552 // print_debug("No memory controller present\r\n");
556 print_spew("setting up CPU");
557 print_spew_hex8(ctrl->node_id);
558 print_spew(" northbridge registers\r\n");
559 max = ARRAY_SIZE(register_values);
560 for(i = 0; i < max; i += 3) {
565 #if CONFIG_USE_PRINTK_IN_CAR
566 prink_debug("%08x <- %08x\r\n", register_values[i], register_values[i+2]);
568 print_spew_hex32(register_values[i]);
570 print_spew_hex32(register_values[i+2]);
574 dev = (register_values[i] & ~0xfff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
575 where = register_values[i] & 0xfff;
576 reg = pci_read_config32(dev, where);
577 reg &= register_values[i+1];
578 reg |= register_values[i+2];
579 pci_write_config32(dev, where, reg);
582 reg = pci_read_config32(register_values[i]);
583 reg &= register_values[i+1];
584 reg |= register_values[i+2];
585 pci_write_config32(register_values[i], reg);
588 print_spew("done.\r\n");
592 static void hw_enable_ecc(const struct mem_controller *ctrl)
595 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
596 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
597 dcl &= ~DCL_DimmEccEn;
598 if (nbcap & NBCAP_ECC) {
599 dcl |= DCL_DimmEccEn;
601 if (HAVE_OPTION_TABLE &&
602 read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
603 dcl &= ~DCL_DimmEccEn;
605 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
608 static int is_dual_channel(const struct mem_controller *ctrl)
611 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
612 return dcl & DCL_128BitEn;
615 static int is_opteron(const struct mem_controller *ctrl)
617 /* Test to see if I am an Opteron.
618 * FIXME Socket 939 based Athlon64 have dual channel capability,
619 * too, so we need a better test for Opterons
621 #warning "FIXME: Implement a better test for Opterons"
623 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
624 return !!(nbcap & NBCAP_128Bit);
627 static int is_registered(const struct mem_controller *ctrl)
629 /* Test to see if we are dealing with registered SDRAM.
630 * If we are not registered we are unbuffered.
631 * This function must be called after spd_handle_unbuffered_dimms.
634 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
635 return !(dcl & DCL_UnBufDimm);
643 #if QRANK_DIMM_SUPPORT == 1
648 static struct dimm_size spd_get_dimm_size(unsigned device)
650 /* Calculate the log base 2 size of a DIMM in bits */
657 #if QRANK_DIMM_SUPPORT == 1
661 /* Note it might be easier to use byte 31 here, it has the DIMM size as
662 * a multiple of 4MB. The way we do it now we can size both
663 * sides of an assymetric dimm.
665 value = spd_read_byte(device, 3); /* rows */
666 if (value < 0) goto hw_err;
667 if ((value & 0xf) == 0) goto val_err;
668 sz.side1 += value & 0xf;
669 sz.rows = value & 0xf;
671 value = spd_read_byte(device, 4); /* columns */
672 if (value < 0) goto hw_err;
673 if ((value & 0xf) == 0) goto val_err;
674 sz.side1 += value & 0xf;
675 sz.col = value & 0xf;
677 value = spd_read_byte(device, 17); /* banks */
678 if (value < 0) goto hw_err;
679 if ((value & 0xff) == 0) goto val_err;
680 sz.side1 += log2(value & 0xff);
682 /* Get the module data width and convert it to a power of two */
683 value = spd_read_byte(device, 7); /* (high byte) */
684 if (value < 0) goto hw_err;
688 low = spd_read_byte(device, 6); /* (low byte) */
689 if (low < 0) goto hw_err;
690 value = value | (low & 0xff);
691 if ((value != 72) && (value != 64)) goto val_err;
692 sz.side1 += log2(value);
695 value = spd_read_byte(device, 5); /* number of physical banks */
696 if (value < 0) goto hw_err;
697 if (value == 1) goto out;
698 if ((value != 2) && (value != 4 )) {
701 #if QRANK_DIMM_SUPPORT == 1
705 /* Start with the symmetrical case */
708 value = spd_read_byte(device, 3); /* rows */
709 if (value < 0) goto hw_err;
710 if ((value & 0xf0) == 0) goto out; /* If symmetrical we are done */
711 sz.side2 -= (value & 0x0f); /* Subtract out rows on side 1 */
712 sz.side2 += ((value >> 4) & 0x0f); /* Add in rows on side 2 */
714 value = spd_read_byte(device, 4); /* columns */
715 if (value < 0) goto hw_err;
716 if ((value & 0xff) == 0) goto val_err;
717 sz.side2 -= (value & 0x0f); /* Subtract out columns on side 1 */
718 sz.side2 += ((value >> 4) & 0x0f); /* Add in columsn on side 2 */
723 die("Bad SPD value\r\n");
724 /* If an hw_error occurs report that I have no memory */
730 #if QRANK_DIMM_SUPPORT == 1
738 static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
740 uint32_t base0, base1;
743 if (sz.side1 != sz.side2) {
747 /* For each base register.
748 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
749 * The initialize dimm size is in bits.
750 * Set the base enable bit0.
755 /* Make certain side1 of the dimm is at least 32MB */
756 if (sz.side1 >= (25 +3)) {
757 base0 = (1 << ((sz.side1 - (25 + 3)) + 21)) | 1;
760 /* Make certain side2 of the dimm is at least 32MB */
761 if (sz.side2 >= (25 + 3)) {
762 base1 = (1 << ((sz.side2 - (25 + 3)) + 21)) | 1;
765 /* Double the size if we are using dual channel memory */
766 if (is_dual_channel(ctrl)) {
767 base0 = (base0 << 1) | (base0 & 1);
768 base1 = (base1 << 1) | (base1 & 1);
771 /* Clear the reserved bits */
772 base0 &= ~0x001ffffe;
773 base1 &= ~0x001ffffe;
775 /* Set the appropriate DIMM base address register */
776 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
777 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
778 #if QRANK_DIMM_SUPPORT == 1
780 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
781 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
785 /* Enable the memory clocks for this DIMM */
787 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
788 dch |= DCH_MEMCLK_EN0 << index;
789 #if QRANK_DIMM_SUPPORT == 1
791 dch |= DCH_MEMCLK_EN0 << (index + 2);
794 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
798 static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
800 static const unsigned cs_map_aa[] = {
801 /* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
809 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
810 map &= ~(0xf << (index * 4));
811 #if QRANK_DIMM_SUPPORT == 1
813 map &= ~(0xf << ( (index + 2) * 4));
818 /* Make certain side1 of the dimm is at least 32MB */
819 if (sz.side1 >= (25 +3)) {
820 if(is_cpu_pre_d0()) {
821 map |= (sz.side1 - (25 + 3)) << (index *4);
822 #if QRANK_DIMM_SUPPORT == 1
824 map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
829 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
830 #if QRANK_DIMM_SUPPORT == 1
832 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
838 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
842 static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
846 for(i = 0; i < DIMM_SOCKETS; i++) {
848 if (!(dimm_mask & (1 << i))) {
851 sz = spd_get_dimm_size(ctrl->channel0[i]);
853 return -1; /* Report SPD error */
855 set_dimm_size(ctrl, sz, i);
856 set_dimm_map (ctrl, sz, i);
861 static void route_dram_accesses(const struct mem_controller *ctrl,
862 unsigned long base_k, unsigned long limit_k)
864 /* Route the addresses to the controller node */
869 unsigned limit_reg, base_reg;
872 node_id = ctrl->node_id;
873 index = (node_id << 3);
874 limit = (limit_k << 2);
877 limit |= ( 0 << 8) | (node_id << 0);
878 base = (base_k << 2);
880 base |= (0 << 8) | (1<<1) | (1<<0);
882 limit_reg = 0x44 + index;
883 base_reg = 0x40 + index;
884 for(device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1); device += PCI_DEV(0, 1, 0)) {
885 pci_write_config32(device, limit_reg, limit);
886 pci_write_config32(device, base_reg, base);
890 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
892 /* Error if I don't have memory */
897 /* Report the amount of memory. */
898 print_spew("RAM: 0x");
899 print_spew_hex32(tom_k);
900 print_spew(" KB\r\n");
902 /* Now set top of memory */
904 if(tom_k > (4*1024*1024)) {
905 msr.lo = (tom_k & 0x003fffff) << 10;
906 msr.hi = (tom_k & 0xffc00000) >> 22;
907 wrmsr(TOP_MEM2, msr);
910 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
911 * so I can see my rom chip and other I/O devices.
913 if (tom_k >= 0x003f0000) {
914 #if HW_MEM_HOLE_SIZEK != 0
915 if(hole_startk != 0) {
921 msr.lo = (tom_k & 0x003fffff) << 10;
922 msr.hi = (tom_k & 0xffc00000) >> 22;
926 static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
929 static const uint8_t csbase_low_shift[] = {
932 /* 128MB */ (14 - 4),
933 /* 256MB */ (15 - 4),
934 /* 512MB */ (15 - 4),
939 static const uint8_t csbase_low_d0_shift[] = {
942 /* 128MB */ (14 - 4),
943 /* 128MB */ (15 - 4),
944 /* 256MB */ (15 - 4),
945 /* 512MB */ (15 - 4),
946 /* 256MB */ (16 - 4),
947 /* 512MB */ (16 - 4),
953 /* cs_base_high is not changed */
956 int chip_selects, index;
958 unsigned common_size;
959 unsigned common_cs_mode;
960 uint32_t csbase, csmask;
962 /* See if all of the memory chip selects are the same size
963 * and if so count them.
968 for(index = 0; index < 8; index++) {
973 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
981 if (common_size == 0) {
984 /* The size differed fail */
985 if (common_size != size) {
989 value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
990 cs_mode =( value >> ((index>>1)*4)) & 0xf;
991 if(cs_mode == 0 ) continue;
992 if(common_cs_mode == 0) {
993 common_cs_mode = cs_mode;
995 /* The size differed fail */
996 if(common_cs_mode != cs_mode) {
1001 /* Chip selects can only be interleaved when there is
1002 * more than one and their is a power of two of them.
1004 bits = log2(chip_selects);
1005 if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) {
1009 /* Find the bits of csbase that we need to interleave on */
1010 if(is_cpu_pre_d0()){
1011 csbase_inc = 1 << csbase_low_shift[common_cs_mode];
1012 if(is_dual_channel(ctrl)) {
1013 /* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
1014 if ((bits == 3) && (common_size == (1 << (32 - 3)))) {
1015 // print_debug("8 4GB chip selects cannot be interleaved\r\n");
1022 csbase_inc = 1 << csbase_low_d0_shift[common_cs_mode];
1023 if(is_dual_channel(ctrl)) {
1024 if( (bits==3) && (common_cs_mode > 8)) {
1025 // print_debug("8 cs_mode>8 chip selects cannot be interleaved\r\n");
1032 /* Compute the initial values for csbase and csbask.
1033 * In csbase just set the enable bit and the base to zero.
1034 * In csmask set the mask bits for the size and page level interleave.
1037 csmask = (((common_size << bits) - 1) << 21);
1038 csmask |= 0xfe00 & ~((csbase_inc << bits) - csbase_inc);
1039 for(index = 0; index < 8; index++) {
1042 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1043 /* Is it enabled? */
1047 pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
1048 pci_write_config32(ctrl->f2, DRAM_CSMASK + (index << 2), csmask);
1049 csbase += csbase_inc;
1052 print_spew("Interleaved\r\n");
1054 /* Return the memory size in K */
1055 return common_size << (15 + bits);
1058 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
1062 /* Remember which registers we have used in the high 8 bits of tom */
1065 /* Find the largest remaining canidate */
1066 unsigned index, canidate;
1067 uint32_t csbase, csmask;
1071 for(index = 0; index < 8; index++) {
1073 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1075 /* Is it enabled? */
1080 /* Is it greater? */
1081 if (value <= csbase) {
1085 /* Has it already been selected */
1086 if (tom & (1 << (index + 24))) {
1089 /* I have a new canidate */
1093 /* See if I have found a new canidate */
1098 /* Remember the dimm size */
1099 size = csbase >> 21;
1101 /* Remember I have used this register */
1102 tom |= (1 << (canidate + 24));
1104 /* Recompute the cs base register value */
1105 csbase = (tom << 21) | 1;
1107 /* Increment the top of memory */
1110 /* Compute the memory mask */
1111 csmask = ((size -1) << 21);
1112 csmask |= 0xfe00; /* For now don't optimize */
1114 /* Write the new base register */
1115 pci_write_config32(ctrl->f2, DRAM_CSBASE + (canidate << 2), csbase);
1116 /* Write the new mask register */
1117 pci_write_config32(ctrl->f2, DRAM_CSMASK + (canidate << 2), csmask);
1120 /* Return the memory size in K */
1121 return (tom & ~0xff000000) << 15;
1124 unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
1128 /* Find the last memory address used */
1130 for(node_id = 0; node_id < max_node_id; node_id++) {
1131 uint32_t limit, base;
1133 index = node_id << 3;
1134 base = pci_read_config32(ctrl->f1, 0x40 + index);
1135 /* Only look at the limit if the base is enabled */
1136 if ((base & 3) == 3) {
1137 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1138 end_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1144 static void order_dimms(const struct mem_controller *ctrl)
1146 unsigned long tom_k, base_k;
1148 if ((!HAVE_OPTION_TABLE) ||
1149 read_option(CMOS_VSTART_interleave_chip_selects, CMOS_VLEN_interleave_chip_selects, 1) != 0) {
1150 tom_k = interleave_chip_selects(ctrl);
1152 print_debug("Interleaving disabled\r\n");
1156 tom_k = order_chip_selects(ctrl);
1158 /* Compute the memory base address */
1159 base_k = memory_end_k(ctrl, ctrl->node_id);
1161 route_dram_accesses(ctrl, base_k, tom_k);
1162 set_top_mem(tom_k, 0);
1165 static long disable_dimm(const struct mem_controller *ctrl, unsigned index, long dimm_mask)
1167 print_debug("disabling dimm");
1168 print_debug_hex8(index);
1169 print_debug("\r\n");
1170 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), 0);
1171 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), 0);
1172 dimm_mask &= ~(1 << index);
1176 static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl, long dimm_mask)
1181 int has_dualch = is_opteron(ctrl);
1185 for(i = 0; (i < DIMM_SOCKETS); i++) {
1187 if (!(dimm_mask & (1 << i))) {
1190 value = spd_read_byte(ctrl->channel0[i], 21);
1194 /* Registered dimm ? */
1195 if (value & (1 << 1)) {
1198 /* Otherwise it must be an unbuffered dimm */
1203 if (unbuffered && registered) {
1204 die("Mixed buffered and registered dimms not supported");
1207 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1208 dcl &= ~DCL_UnBufDimm;
1210 if ((has_dualch) && (!is_cpu_pre_d0())) {
1211 dcl |= DCL_UnBufDimm; /* set DCL_DualDIMMen too? */
1213 /* set DCL_En2T if you have non-equal DDR mem types! */
1215 if ((cpuid_eax(1) & 0x30) == 0x30) {
1216 /* CS[7:4] is copy of CS[3:0], should be set for 939 socket */
1217 dcl |= DCL_UpperCSMap;
1220 dcl |= DCL_UnBufDimm;
1223 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1225 if (is_registered(ctrl)) {
1226 print_debug("Registered\r\n");
1228 print_debug("Unbuffered\r\n");
1234 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
1239 for(i = 0; i < DIMM_SOCKETS; i++) {
1242 device = ctrl->channel0[i];
1244 byte = spd_read_byte(ctrl->channel0[i], 2); /* Type */
1246 dimm_mask |= (1 << i);
1249 device = ctrl->channel1[i];
1251 byte = spd_read_byte(ctrl->channel1[i], 2);
1253 dimm_mask |= (1 << (i + DIMM_SOCKETS));
1260 static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_mask)
1264 /* SPD addresses to verify are identical */
1265 static const uint8_t addresses[] = {
1266 2, /* Type should be DDR SDRAM */
1267 3, /* *Row addresses */
1268 4, /* *Column addresses */
1269 5, /* *Physical Banks */
1270 6, /* *Module Data Width low */
1271 7, /* *Module Data Width high */
1272 9, /* *Cycle time at highest CAS Latency CL=X */
1273 11, /* *SDRAM Type */
1274 13, /* *SDRAM Width */
1275 17, /* *Logical Banks */
1276 18, /* *Supported CAS Latencies */
1277 21, /* *SDRAM Module Attributes */
1278 23, /* *Cycle time at CAS Latnecy (CLX - 0.5) */
1279 26, /* *Cycle time at CAS Latnecy (CLX - 1.0) */
1280 27, /* *tRP Row precharge time */
1281 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1282 29, /* *tRCD RAS to CAS */
1283 30, /* *tRAS Activate to Precharge */
1284 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1285 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1287 /* If the dimms are not in pairs do not do dual channels */
1288 if ((dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1289 ((dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1290 goto single_channel;
1292 /* If the cpu is not capable of doing dual channels don't do dual channels */
1293 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1294 if (!(nbcap & NBCAP_128Bit)) {
1295 goto single_channel;
1297 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1298 unsigned device0, device1;
1301 /* If I don't have a dimm skip this one */
1302 if (!(dimm_mask & (1 << i))) {
1305 device0 = ctrl->channel0[i];
1306 device1 = ctrl->channel1[i];
1307 for(j = 0; j < ARRAY_SIZE(addresses); j++) {
1309 addr = addresses[j];
1310 value0 = spd_read_byte(device0, addr);
1314 value1 = spd_read_byte(device1, addr);
1318 if (value0 != value1) {
1319 goto single_channel;
1323 print_spew("Enabling dual channel memory\r\n");
1325 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1326 dcl &= ~DCL_32ByteEn;
1327 dcl |= DCL_128BitEn;
1328 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1331 dimm_mask &= ~((1 << (DIMM_SOCKETS *2)) - (1 << DIMM_SOCKETS));
1337 uint8_t divisor; /* In 1/2 ns increments */
1340 uint32_t dch_memclk;
1341 uint16_t dch_tref4k, dch_tref8k;
1344 uint8_t dtl_trwt[3][3]; /* first index is CAS_LAT 2/2.5/3 and 128/registered64/64 */
1345 uint8_t rdpreamble[4]; /* 0 is for registered, 1 for 1-2 DIMMS, 2 and 3 for 3 or 4 unreg dimm slots */
1349 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1351 static const struct mem_param speed[] = {
1353 .name = "100Mhz\r\n",
1355 .divisor = (10 <<1),
1358 .dch_memclk = DCH_MEMCLK_100MHZ << DCH_MEMCLK_SHIFT,
1359 .dch_tref4k = DTH_TREF_100MHZ_4K,
1360 .dch_tref8k = DTH_TREF_100MHZ_8K,
1363 .dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1364 .rdpreamble = { ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0) }
1367 .name = "133Mhz\r\n",
1369 .divisor = (7<<1)+1,
1372 .dch_memclk = DCH_MEMCLK_133MHZ << DCH_MEMCLK_SHIFT,
1373 .dch_tref4k = DTH_TREF_133MHZ_4K,
1374 .dch_tref8k = DTH_TREF_133MHZ_8K,
1377 .dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1378 .rdpreamble = { ((8 << 1) + 0), ((7 << 1) + 0), ((7 << 1) + 1), ((7 << 1) + 0) }
1381 .name = "166Mhz\r\n",
1386 .dch_memclk = DCH_MEMCLK_166MHZ << DCH_MEMCLK_SHIFT,
1387 .dch_tref4k = DTH_TREF_166MHZ_4K,
1388 .dch_tref8k = DTH_TREF_166MHZ_8K,
1391 .dtl_trwt = { { 3, 2, 3 }, { 3, 3, 4 }, { 4, 3, 4 }},
1392 .rdpreamble = { ((7 << 1) + 1), ((6 << 1) + 0), ((6 << 1) + 1), ((6 << 1) + 0) }
1395 .name = "200Mhz\r\n",
1400 .dch_memclk = DCH_MEMCLK_200MHZ << DCH_MEMCLK_SHIFT,
1401 .dch_tref4k = DTH_TREF_200MHZ_4K,
1402 .dch_tref8k = DTH_TREF_200MHZ_8K,
1405 .dtl_trwt = { { 0, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1406 .rdpreamble = { ((7 << 1) + 0), ((5 << 1) + 0), ((5 << 1) + 1), ((5 << 1) + 1) }
1412 const struct mem_param *param;
1413 for(param = &speed[0]; param->cycle_time ; param++) {
1414 if (min_cycle_time > (param+1)->cycle_time) {
1418 if (!param->cycle_time) {
1419 die("min_cycle_time to low");
1421 print_spew(param->name);
1422 #ifdef DRAM_MIN_CYCLE_TIME
1423 print_debug(param->name);
1428 struct spd_set_memclk_result {
1429 const struct mem_param *param;
1432 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask)
1434 /* Compute the minimum cycle time for these dimms */
1435 struct spd_set_memclk_result result;
1436 unsigned min_cycle_time, min_latency, bios_cycle_time;
1440 static const uint8_t latency_indicies[] = { 26, 23, 9 };
1441 static const unsigned char min_cycle_times[] = {
1442 [NBCAP_MEMCLK_200MHZ] = 0x50, /* 5ns */
1443 [NBCAP_MEMCLK_166MHZ] = 0x60, /* 6ns */
1444 [NBCAP_MEMCLK_133MHZ] = 0x75, /* 7.5ns */
1445 [NBCAP_MEMCLK_100MHZ] = 0xa0, /* 10ns */
1448 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1450 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1451 bios_cycle_time = min_cycle_times[
1452 read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
1453 if (HAVE_OPTION_TABLE && bios_cycle_time > min_cycle_time) {
1454 min_cycle_time = bios_cycle_time;
1458 /* Compute the least latency with the fastest clock supported
1459 * by both the memory controller and the dimms.
1461 for(i = 0; i < DIMM_SOCKETS; i++) {
1462 int new_cycle_time, new_latency;
1467 if (!(dimm_mask & (1 << i))) {
1471 /* First find the supported CAS latencies
1472 * Byte 18 for DDR SDRAM is interpreted:
1473 * bit 0 == CAS Latency = 1.0
1474 * bit 1 == CAS Latency = 1.5
1475 * bit 2 == CAS Latency = 2.0
1476 * bit 3 == CAS Latency = 2.5
1477 * bit 4 == CAS Latency = 3.0
1478 * bit 5 == CAS Latency = 3.5
1482 new_cycle_time = 0xa0;
1485 latencies = spd_read_byte(ctrl->channel0[i], 18);
1486 if (latencies <= 0) continue;
1488 /* Compute the lowest cas latency supported */
1489 latency = log2(latencies) -2;
1491 /* Loop through and find a fast clock with a low latency */
1492 for(index = 0; index < 3; index++, latency++) {
1494 if ((latency < 2) || (latency > 4) ||
1495 (!(latencies & (1 << latency)))) {
1498 spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1499 if (spd_value < 0) {
1503 /* Only increase the latency if we decreas the clock */
1504 if ((spd_value >= min_cycle_time) && (spd_value < new_cycle_time)) {
1505 new_cycle_time = spd_value;
1506 new_latency = latency;
1509 if (new_latency > 4){
1512 /* Does min_latency need to be increased? */
1513 if (new_cycle_time > min_cycle_time) {
1514 min_cycle_time = new_cycle_time;
1516 /* Does min_cycle_time need to be increased? */
1517 if (new_latency > min_latency) {
1518 min_latency = new_latency;
1521 /* Make a second pass through the dimms and disable
1522 * any that cannot support the selected memclk and cas latency.
1525 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1530 if (!(dimm_mask & (1 << i))) {
1533 latencies = spd_read_byte(ctrl->channel0[i], 18);
1534 if (latencies < 0) goto hw_error;
1535 if (latencies == 0) {
1539 /* Compute the lowest cas latency supported */
1540 latency = log2(latencies) -2;
1542 /* Walk through searching for the selected latency */
1543 for(index = 0; index < 3; index++, latency++) {
1544 if (!(latencies & (1 << latency))) {
1547 if (latency == min_latency)
1550 /* If I can't find the latency or my index is bad error */
1551 if ((latency != min_latency) || (index >= 3)) {
1555 /* Read the min_cycle_time for this latency */
1556 spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1557 if (spd_value < 0) goto hw_error;
1559 /* All is good if the selected clock speed
1560 * is what I need or slower.
1562 if (spd_value <= min_cycle_time) {
1565 /* Otherwise I have an error, disable the dimm */
1567 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
1570 //down speed for full load 4 rank support
1571 #if QRANK_DIMM_SUPPORT
1572 if(dimm_mask == (3|(3<<DIMM_SOCKETS)) ) {
1574 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1576 if (!(dimm_mask & (1 << i))) {
1579 val = spd_read_byte(ctrl->channel0[i], 5);
1586 if(min_cycle_time <= 0x50 ) {
1587 min_cycle_time = 0x60;
1594 /* Now that I know the minimum cycle time lookup the memory parameters */
1595 result.param = get_mem_param(min_cycle_time);
1597 /* Update DRAM Config High with our selected memory speed */
1598 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1599 value &= ~(DCH_MEMCLK_MASK << DCH_MEMCLK_SHIFT);
1601 /* Improves DQS centering by correcting for case when core speed multiplier and MEMCLK speed result in odd clock divisor, by selecting the next lowest memory speed, required only at DDR400 and higher speeds with certain DIMM loadings ---- cheating???*/
1602 if(!is_cpu_pre_e0()) {
1603 if(min_cycle_time==0x50) {
1609 value |= result.param->dch_memclk;
1610 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1612 static const unsigned latencies[] = { DTL_CL_2, DTL_CL_2_5, DTL_CL_3 };
1613 /* Update DRAM Timing Low with our selected cas latency */
1614 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1615 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1616 value |= latencies[min_latency - 2] << DTL_TCL_SHIFT;
1617 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1619 result.dimm_mask = dimm_mask;
1622 result.param = (const struct mem_param *)0;
1623 result.dimm_mask = -1;
1628 static int update_dimm_Trc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1630 unsigned clocks, old_clocks;
1633 value = spd_read_byte(ctrl->channel0[i], 41);
1634 if (value < 0) return -1;
1635 if ((value == 0) || (value == 0xff)) {
1638 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1639 if (clocks < DTL_TRC_MIN) {
1640 clocks = DTL_TRC_MIN;
1642 if (clocks > DTL_TRC_MAX) {
1646 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1647 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
1648 if (old_clocks > clocks) {
1649 clocks = old_clocks;
1651 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
1652 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
1653 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1657 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1659 unsigned clocks, old_clocks;
1662 value = spd_read_byte(ctrl->channel0[i], 42);
1663 if (value < 0) return -1;
1664 if ((value == 0) || (value == 0xff)) {
1665 value = param->tRFC;
1667 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1668 if (clocks < DTL_TRFC_MIN) {
1669 clocks = DTL_TRFC_MIN;
1671 if (clocks > DTL_TRFC_MAX) {
1674 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1675 old_clocks = ((dtl >> DTL_TRFC_SHIFT) & DTL_TRFC_MASK) + DTL_TRFC_BASE;
1676 if (old_clocks > clocks) {
1677 clocks = old_clocks;
1679 dtl &= ~(DTL_TRFC_MASK << DTL_TRFC_SHIFT);
1680 dtl |= ((clocks - DTL_TRFC_BASE) << DTL_TRFC_SHIFT);
1681 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1686 static int update_dimm_Trcd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1688 unsigned clocks, old_clocks;
1691 value = spd_read_byte(ctrl->channel0[i], 29);
1692 if (value < 0) return -1;
1693 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1694 if (clocks < DTL_TRCD_MIN) {
1695 clocks = DTL_TRCD_MIN;
1697 if (clocks > DTL_TRCD_MAX) {
1700 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1701 old_clocks = ((dtl >> DTL_TRCD_SHIFT) & DTL_TRCD_MASK) + DTL_TRCD_BASE;
1702 if (old_clocks > clocks) {
1703 clocks = old_clocks;
1705 dtl &= ~(DTL_TRCD_MASK << DTL_TRCD_SHIFT);
1706 dtl |= ((clocks - DTL_TRCD_BASE) << DTL_TRCD_SHIFT);
1707 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1711 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1713 unsigned clocks, old_clocks;
1716 value = spd_read_byte(ctrl->channel0[i], 28);
1717 if (value < 0) return -1;
1718 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1719 if (clocks < DTL_TRRD_MIN) {
1720 clocks = DTL_TRRD_MIN;
1722 if (clocks > DTL_TRRD_MAX) {
1725 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1726 old_clocks = ((dtl >> DTL_TRRD_SHIFT) & DTL_TRRD_MASK) + DTL_TRRD_BASE;
1727 if (old_clocks > clocks) {
1728 clocks = old_clocks;
1730 dtl &= ~(DTL_TRRD_MASK << DTL_TRRD_SHIFT);
1731 dtl |= ((clocks - DTL_TRRD_BASE) << DTL_TRRD_SHIFT);
1732 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1736 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1738 unsigned clocks, old_clocks;
1741 value = spd_read_byte(ctrl->channel0[i], 30);
1742 if (value < 0) return -1;
1743 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1744 if (clocks < DTL_TRAS_MIN) {
1745 clocks = DTL_TRAS_MIN;
1747 if (clocks > DTL_TRAS_MAX) {
1750 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1751 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
1752 if (old_clocks > clocks) {
1753 clocks = old_clocks;
1755 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
1756 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
1757 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1761 static int update_dimm_Trp(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1763 unsigned clocks, old_clocks;
1766 value = spd_read_byte(ctrl->channel0[i], 27);
1767 if (value < 0) return -1;
1768 clocks = (value + (param->divisor << 1) - 1)/(param->divisor << 1);
1769 if (clocks < DTL_TRP_MIN) {
1770 clocks = DTL_TRP_MIN;
1772 if (clocks > DTL_TRP_MAX) {
1775 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1776 old_clocks = ((dtl >> DTL_TRP_SHIFT) & DTL_TRP_MASK) + DTL_TRP_BASE;
1777 if (old_clocks > clocks) {
1778 clocks = old_clocks;
1780 dtl &= ~(DTL_TRP_MASK << DTL_TRP_SHIFT);
1781 dtl |= ((clocks - DTL_TRP_BASE) << DTL_TRP_SHIFT);
1782 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1786 static void set_Twr(const struct mem_controller *ctrl, const struct mem_param *param)
1789 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1790 dtl &= ~(DTL_TWR_MASK << DTL_TWR_SHIFT);
1791 dtl |= (param->dtl_twr - DTL_TWR_BASE) << DTL_TWR_SHIFT;
1792 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1796 static void init_Tref(const struct mem_controller *ctrl, const struct mem_param *param)
1799 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1800 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1801 dth |= (param->dch_tref4k << DTH_TREF_SHIFT);
1802 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1805 static int update_dimm_Tref(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1809 unsigned tref, old_tref;
1810 value = spd_read_byte(ctrl->channel0[i], 3);
1811 if (value < 0) return -1;
1814 tref = param->dch_tref8k;
1816 tref = param->dch_tref4k;
1819 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1820 old_tref = (dth >> DTH_TREF_SHIFT) & DTH_TREF_MASK;
1821 if ((value == 12) && (old_tref == param->dch_tref4k)) {
1822 tref = param->dch_tref4k;
1824 tref = param->dch_tref8k;
1826 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1827 dth |= (tref << DTH_TREF_SHIFT);
1828 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1833 static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1837 #if QRANK_DIMM_SUPPORT == 1
1841 value = spd_read_byte(ctrl->channel0[i], 13);
1846 #if QRANK_DIMM_SUPPORT == 1
1847 rank = spd_read_byte(ctrl->channel0[i], 5); /* number of physical banks */
1853 dimm = 1<<(DCL_x4DIMM_SHIFT+i);
1854 #if QRANK_DIMM_SUPPORT == 1
1856 dimm |= 1<<(DCL_x4DIMM_SHIFT+i+2);
1859 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1864 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1868 static int update_dimm_ecc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1872 value = spd_read_byte(ctrl->channel0[i], 11);
1877 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1878 dcl &= ~DCL_DimmEccEn;
1879 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1884 static int count_dimms(const struct mem_controller *ctrl)
1889 for(index = 0; index < 8; index += 2) {
1891 csbase = pci_read_config32(ctrl->f2, (DRAM_CSBASE + (index << 2)));
1899 static void set_Twtr(const struct mem_controller *ctrl, const struct mem_param *param)
1903 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1904 dth &= ~(DTH_TWTR_MASK << DTH_TWTR_SHIFT);
1905 dth |= ((param->dtl_twtr - DTH_TWTR_BASE) << DTH_TWTR_SHIFT);
1906 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1909 static void set_Trwt(const struct mem_controller *ctrl, const struct mem_param *param)
1917 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1918 latency = (dtl >> DTL_TCL_SHIFT) & DTL_TCL_MASK;
1920 if (is_opteron(ctrl)) {
1921 mtype = 0; /* dual channel */
1922 } else if (is_registered(ctrl)) {
1923 mtype = 1; /* registered 64bit interface */
1925 mtype = 2; /* unbuffered 64bit interface */
1939 die("Unknown LAT for Trwt");
1942 clocks = param->dtl_trwt[lat][mtype];
1943 if ((clocks < DTH_TRWT_MIN) || (clocks > DTH_TRWT_MAX)) {
1944 die("Unknown Trwt\r\n");
1947 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1948 dth &= ~(DTH_TRWT_MASK << DTH_TRWT_SHIFT);
1949 dth |= ((clocks - DTH_TRWT_BASE) << DTH_TRWT_SHIFT);
1950 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1954 static void set_Twcl(const struct mem_controller *ctrl, const struct mem_param *param)
1956 /* Memory Clocks after CAS# */
1959 if (is_registered(ctrl)) {
1964 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1965 dth &= ~(DTH_TWCL_MASK << DTH_TWCL_SHIFT);
1966 dth |= ((clocks - DTH_TWCL_BASE) << DTH_TWCL_SHIFT);
1967 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1971 static void set_read_preamble(const struct mem_controller *ctrl, const struct mem_param *param)
1974 unsigned rdpreamble;
1979 for(i = 0; i < 4; i++) {
1980 if (ctrl->channel0[i]) {
1985 /* map to index to param.rdpreamble array */
1986 if (is_registered(ctrl)) {
1988 } else if (slots < 3) {
1990 } else if (slots == 3) {
1992 } else if (slots == 4) {
1995 die("Unknown rdpreamble for this nr of slots");
1998 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1999 dch &= ~(DCH_RDPREAMBLE_MASK << DCH_RDPREAMBLE_SHIFT);
2000 rdpreamble = param->rdpreamble[i];
2002 if ((rdpreamble < DCH_RDPREAMBLE_MIN) || (rdpreamble > DCH_RDPREAMBLE_MAX)) {
2003 die("Unknown rdpreamble");
2006 dch |= (rdpreamble - DCH_RDPREAMBLE_BASE) << DCH_RDPREAMBLE_SHIFT;
2007 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2010 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
2016 dimms = count_dimms(ctrl);
2018 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2019 dch &= ~(DCH_ASYNC_LAT_MASK << DCH_ASYNC_LAT_SHIFT);
2021 if (is_registered(ctrl)) {
2033 die("Too many unbuffered dimms");
2035 else if (dimms == 3) {
2044 dch |= ((async_lat - DCH_ASYNC_LAT_BASE) << DCH_ASYNC_LAT_SHIFT);
2045 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2048 static void set_idle_cycle_limit(const struct mem_controller *ctrl, const struct mem_param *param)
2051 /* AMD says to Hardcode this */
2052 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2053 dch &= ~(DCH_IDLE_LIMIT_MASK << DCH_IDLE_LIMIT_SHIFT);
2054 dch |= DCH_IDLE_LIMIT_16 << DCH_IDLE_LIMIT_SHIFT;
2055 dch |= DCH_DYN_IDLE_CTR_EN;
2056 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2059 static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param, long dimm_mask)
2063 init_Tref(ctrl, param);
2064 for(i = 0; i < DIMM_SOCKETS; i++) {
2066 if (!(dimm_mask & (1 << i))) {
2069 /* DRAM Timing Low Register */
2070 if ((rc = update_dimm_Trc (ctrl, param, i)) <= 0) goto dimm_err;
2071 if ((rc = update_dimm_Trfc(ctrl, param, i)) <= 0) goto dimm_err;
2072 if ((rc = update_dimm_Trcd(ctrl, param, i)) <= 0) goto dimm_err;
2073 if ((rc = update_dimm_Trrd(ctrl, param, i)) <= 0) goto dimm_err;
2074 if ((rc = update_dimm_Tras(ctrl, param, i)) <= 0) goto dimm_err;
2075 if ((rc = update_dimm_Trp (ctrl, param, i)) <= 0) goto dimm_err;
2077 /* DRAM Timing High Register */
2078 if ((rc = update_dimm_Tref(ctrl, param, i)) <= 0) goto dimm_err;
2081 /* DRAM Config Low */
2082 if ((rc = update_dimm_x4 (ctrl, param, i)) <= 0) goto dimm_err;
2083 if ((rc = update_dimm_ecc(ctrl, param, i)) <= 0) goto dimm_err;
2089 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
2091 /* DRAM Timing Low Register */
2092 set_Twr(ctrl, param);
2094 /* DRAM Timing High Register */
2095 set_Twtr(ctrl, param);
2096 set_Trwt(ctrl, param);
2097 set_Twcl(ctrl, param);
2099 /* DRAM Config High */
2100 set_read_preamble(ctrl, param);
2101 set_max_async_latency(ctrl, param);
2102 set_idle_cycle_limit(ctrl, param);
2106 #if RAMINIT_SYSINFO==1
2107 static void sdram_set_spd_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
2109 static void sdram_set_spd_registers(const struct mem_controller *ctrl)
2112 struct spd_set_memclk_result result;
2113 const struct mem_param *param;
2116 if (!controller_present(ctrl)) {
2117 // print_debug("No memory controller present\r\n");
2121 hw_enable_ecc(ctrl);
2122 activate_spd_rom(ctrl);
2123 dimm_mask = spd_detect_dimms(ctrl);
2124 if (!(dimm_mask & ((1 << DIMM_SOCKETS) - 1))) {
2125 print_debug("No memory for this cpu\r\n");
2128 dimm_mask = spd_enable_2channels(ctrl, dimm_mask);
2131 dimm_mask = spd_set_ram_size(ctrl , dimm_mask);
2134 dimm_mask = spd_handle_unbuffered_dimms(ctrl, dimm_mask);
2137 result = spd_set_memclk(ctrl, dimm_mask);
2138 param = result.param;
2139 dimm_mask = result.dimm_mask;
2142 dimm_mask = spd_set_dram_timing(ctrl, param , dimm_mask);
2148 /* Unrecoverable error reading SPD data */
2149 print_err("SPD error - reset\r\n");
2154 #if HW_MEM_HOLE_SIZEK != 0
2155 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
2158 uint32_t carry_over;
2160 uint32_t base, limit;
2165 carry_over = (4*1024*1024) - hole_startk;
2167 for(ii=controllers - 1;ii>i;ii--) {
2168 base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
2169 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2172 limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
2173 for(j = 0; j < controllers; j++) {
2174 pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit + (carry_over << 2));
2175 pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base + (carry_over << 2));
2178 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2179 for(j = 0; j < controllers; j++) {
2180 pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit + (carry_over << 2));
2183 base = pci_read_config32(dev, 0x40 + (i << 3));
2184 basek = (base & 0xffff0000) >> 2;
2185 if(basek == hole_startk) {
2186 //don't need set memhole here, because hole off set will be 0, overflow
2187 //so need to change base reg instead, new basek will be 4*1024*1024
2189 base |= (4*1024*1024)<<2;
2190 for(j = 0; j < controllers; j++) {
2191 pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
2195 hoist = /* hole start address */
2196 ((hole_startk << 10) & 0xff000000) +
2197 /* hole address to memory controller address */
2198 (((basek + carry_over) >> 6) & 0x0000ff00) +
2201 pci_write_config32(dev, 0xf0, hoist);
2207 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
2210 uint32_t hole_startk;
2213 hole_startk = 4*1024*1024 - HW_MEM_HOLE_SIZEK;
2215 #if HW_MEM_HOLE_SIZE_AUTO_INC == 1
2216 /* We need to double check if hole_startk is valid.
2217 * If it is equal to the dram base address in K (base_k),
2218 * we need to decrease it.
2221 for(i=0; i<controllers; i++) {
2224 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2225 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2228 base_k = (base & 0xffff0000) >> 2;
2229 if(base_k == hole_startk) {
2230 /* decrease memory hole startk to make sure it is
2231 * in the middle of the previous node
2233 hole_startk -= (base_k - basek_pri)>>1;
2234 break; /* only one hole */
2240 /* Find node number that needs the memory hole configured */
2241 for(i=0; i<controllers; i++) {
2242 uint32_t base, limit;
2243 unsigned base_k, limit_k;
2244 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2245 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2248 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2249 base_k = (base & 0xffff0000) >> 2;
2250 limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
2251 if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
2253 hoist_memory(controllers, ctrl, hole_startk, i);
2254 end_k = memory_end_k(ctrl, controllers);
2255 set_top_mem(end_k, hole_startk);
2256 break; /* only one hole */
2264 #define TIMEOUT_LOOPS 300000
2265 #if RAMINIT_SYSINFO == 1
2266 static void sdram_enable(int controllers, const struct mem_controller *ctrl, struct sys_info *sysinfo)
2268 static void sdram_enable(int controllers, const struct mem_controller *ctrl)
2273 /* Error if I don't have memory */
2274 if (memory_end_k(ctrl, controllers) == 0) {
2275 die("No memory\r\n");
2278 /* Before enabling memory start the memory clocks */
2279 for(i = 0; i < controllers; i++) {
2281 if (!controller_present(ctrl + i))
2283 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2284 if (dch & (DCH_MEMCLK_EN0|DCH_MEMCLK_EN1|DCH_MEMCLK_EN2|DCH_MEMCLK_EN3)) {
2285 dch |= DCH_MEMCLK_VALID;
2286 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2289 /* Disable dram receivers */
2291 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2292 dcl |= DCL_DisInRcvrs;
2293 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2297 /* And if necessary toggle the the reset on the dimms by hand */
2298 memreset(controllers, ctrl);
2300 /* We need to wait a mimmium of 20 MEMCLKS to enable the InitDram */
2302 for(i = 0; i < controllers; i++) {
2304 if (!controller_present(ctrl + i))
2306 /* Skip everything if I don't have any memory on this controller */
2307 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2308 if (!(dch & DCH_MEMCLK_VALID)) {
2312 /* Toggle DisDqsHys to get it working */
2313 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2314 if (dcl & DCL_DimmEccEn) {
2316 print_spew("ECC enabled\r\n");
2317 mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
2319 if (dcl & DCL_128BitEn) {
2320 mnc |= MNC_CHIPKILL_EN;
2322 pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
2324 dcl |= DCL_DisDqsHys;
2325 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2326 dcl &= ~DCL_DisDqsHys;
2327 dcl &= ~DCL_DLL_Disable;
2330 dcl |= DCL_DramInit;
2331 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2334 for(i = 0; i < controllers; i++) {
2336 if (!controller_present(ctrl + i))
2338 /* Skip everything if I don't have any memory on this controller */
2339 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2340 if (!(dch & DCH_MEMCLK_VALID)) {
2344 print_debug("Initializing memory: ");
2348 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2350 if ((loops & 1023) == 0) {
2353 } while(((dcl & DCL_DramInit) != 0) && (loops < TIMEOUT_LOOPS));
2354 if (loops >= TIMEOUT_LOOPS) {
2355 print_debug(" failed\r\n");
2359 if (!is_cpu_pre_c0()) {
2360 /* Wait until it is safe to touch memory */
2361 dcl &= ~(DCL_MemClrStatus | DCL_DramEnable);
2362 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2364 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2365 } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
2368 print_debug(" done\r\n");
2371 #if HW_MEM_HOLE_SIZEK != 0
2372 // init hw mem hole here
2373 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
2374 if(!is_cpu_pre_e0())
2375 set_hw_mem_hole(controllers, ctrl);
2378 //FIXME add enable node interleaving here -- yhlu
2380 1. check how many nodes we have , if not all has ram installed get out
2381 2. check cs_base lo is 0, node 0 f2 0x40,,,,, if any one is not using lo is CS_BASE, get out
2382 3. check if other node is the same as node 0 about f2 0x40,,,,, otherwise get out
2383 4. if all ready enable node_interleaving in f1 0x40..... of every node
2384 5. for node interleaving we need to set mem hole to every node ( need recalcute hole offset in f0 for every node)
2387 #if USE_DCACHE_RAM == 0
2388 /* Make certain the first 1M of memory is intialized */
2389 print_debug("Clearing initial memory region: ");
2391 /* Use write combine caching while we setup the first 1M */
2392 cache_lbmem(MTRR_TYPE_WRCOMB);
2394 /* clear memory 1meg */
2395 clear_memory((void *)0, CONFIG_LB_MEM_TOPK << 10);
2397 /* The first 1M is now setup, use it */
2398 cache_lbmem(MTRR_TYPE_WRBACK);
2400 print_debug(" done\r\n");
2404 #if USE_DCACHE_RAM == 1
2405 static void set_sysinfo_in_ram(unsigned val)
2409 static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a, const uint16_t *spd_addr)
2413 struct mem_controller *ctrl;
2414 for(i=0;i<controllers; i++) {
2417 ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
2418 ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
2419 ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
2420 ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
2422 if(spd_addr == (void *)0) continue;
2424 for(j=0;j<DIMM_SOCKETS;j++) {
2425 ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
2426 ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];