1 /* This should be done by Eric
2 2004.11 yhlu add 4 rank DIMM support
3 2004.12 yhlu add D0 support
4 2005.02 yhlu add E0 memory hole support
6 #if K8_REV_F_SUPPORT == 1
10 #include <cpu/x86/mem.h>
11 #include <cpu/x86/cache.h>
12 #include <cpu/x86/mtrr.h>
16 #if (CONFIG_LB_MEM_TOPK & (CONFIG_LB_MEM_TOPK -1)) != 0
17 # error "CONFIG_LB_MEM_TOPK must be a power of 2"
20 #ifndef QRANK_DIMM_SUPPORT
21 #define QRANK_DIMM_SUPPORT 0
24 #if defined (__GNUC__)
25 static void hard_reset(void);
29 static void setup_resource_map(const unsigned int *register_values, int max)
32 // print_debug("setting up resource map....");
36 for(i = 0; i < max; i += 3) {
41 #if CONFIG_USE_PRINTK_IN_CAR
42 prink_debug("%08x <- %08x\r\n", register_values[i], register_values[i+2]);
44 print_debug_hex32(register_values[i]);
46 print_debug_hex32(register_values[i+2]);
50 dev = register_values[i] & ~0xfff;
51 where = register_values[i] & 0xfff;
52 reg = pci_read_config32(dev, where);
53 reg &= register_values[i+1];
54 reg |= register_values[i+2];
55 pci_write_config32(dev, where, reg);
57 reg = pci_read_config32(register_values[i]);
58 reg &= register_values[i+1];
59 reg |= register_values[i+2] & ~register_values[i+1];
60 pci_write_config32(register_values[i], reg);
63 // print_debug("done.\r\n");
67 static int controller_present(const struct mem_controller *ctrl)
69 return pci_read_config32(ctrl->f0, 0) == 0x11001022;
72 #if RAMINIT_SYSINFO==1
73 static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
75 static void sdram_set_registers(const struct mem_controller *ctrl)
78 static const unsigned int register_values[] = {
80 /* Careful set limit registers before base registers which contain the enables */
81 /* DRAM Limit i Registers
90 * [ 2: 0] Destination Node ID
100 * [10: 8] Interleave select
101 * specifies the values of A[14:12] to use with interleave enable.
103 * [31:16] DRAM Limit Address i Bits 39-24
104 * This field defines the upper address bits of a 40 bit address
105 * that define the end of the DRAM region.
107 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
108 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
109 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
110 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
111 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
112 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
113 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
114 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
115 /* DRAM Base i Registers
124 * [ 0: 0] Read Enable
127 * [ 1: 1] Write Enable
128 * 0 = Writes Disabled
131 * [10: 8] Interleave Enable
132 * 000 = No interleave
133 * 001 = Interleave on A[12] (2 nodes)
135 * 011 = Interleave on A[12] and A[14] (4 nodes)
139 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
141 * [13:16] DRAM Base Address i Bits 39-24
142 * This field defines the upper address bits of a 40-bit address
143 * that define the start of the DRAM region.
145 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
146 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
147 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
148 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
149 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
150 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
151 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
152 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
154 /* DRAM CS Base Address i Registers
163 * [ 0: 0] Chip-Select Bank Enable
167 * [15: 9] Base Address (19-13)
168 * An optimization used when all DIMM are the same size...
170 * [31:21] Base Address (35-25)
171 * This field defines the top 11 addresses bit of a 40-bit
172 * address that define the memory address space. These
173 * bits decode 32-MByte blocks of memory.
175 PCI_ADDR(0, 0x18, 2, 0x40), 0x001f01fe, 0x00000000,
176 PCI_ADDR(0, 0x18, 2, 0x44), 0x001f01fe, 0x00000000,
177 PCI_ADDR(0, 0x18, 2, 0x48), 0x001f01fe, 0x00000000,
178 PCI_ADDR(0, 0x18, 2, 0x4C), 0x001f01fe, 0x00000000,
179 PCI_ADDR(0, 0x18, 2, 0x50), 0x001f01fe, 0x00000000,
180 PCI_ADDR(0, 0x18, 2, 0x54), 0x001f01fe, 0x00000000,
181 PCI_ADDR(0, 0x18, 2, 0x58), 0x001f01fe, 0x00000000,
182 PCI_ADDR(0, 0x18, 2, 0x5C), 0x001f01fe, 0x00000000,
183 /* DRAM CS Mask Address i Registers
192 * Select bits to exclude from comparison with the DRAM Base address register.
194 * [15: 9] Address Mask (19-13)
195 * Address to be excluded from the optimized case
197 * [29:21] Address Mask (33-25)
198 * The bits with an address mask of 1 are excluded from address comparison
202 PCI_ADDR(0, 0x18, 2, 0x60), 0xC01f01ff, 0x00000000,
203 PCI_ADDR(0, 0x18, 2, 0x64), 0xC01f01ff, 0x00000000,
204 PCI_ADDR(0, 0x18, 2, 0x68), 0xC01f01ff, 0x00000000,
205 PCI_ADDR(0, 0x18, 2, 0x6C), 0xC01f01ff, 0x00000000,
206 PCI_ADDR(0, 0x18, 2, 0x70), 0xC01f01ff, 0x00000000,
207 PCI_ADDR(0, 0x18, 2, 0x74), 0xC01f01ff, 0x00000000,
208 PCI_ADDR(0, 0x18, 2, 0x78), 0xC01f01ff, 0x00000000,
209 PCI_ADDR(0, 0x18, 2, 0x7C), 0xC01f01ff, 0x00000000,
210 /* DRAM Bank Address Mapping Register
212 * Specify the memory module size
217 * 000 = 32Mbyte (Rows = 12 & Col = 8)
218 * 001 = 64Mbyte (Rows = 12 & Col = 9)
219 * 010 = 128Mbyte (Rows = 13 & Col = 9)|(Rows = 12 & Col = 10)
220 * 011 = 256Mbyte (Rows = 13 & Col = 10)|(Rows = 12 & Col = 11)
221 * 100 = 512Mbyte (Rows = 13 & Col = 11)|(Rows = 14 & Col = 10)
222 * 101 = 1Gbyte (Rows = 14 & Col = 11)|(Rows = 13 & Col = 12)
223 * 110 = 2Gbyte (Rows = 14 & Col = 12)
230 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff8888, 0x00000000,
231 /* DRAM Timing Low Register
233 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
243 * [ 7: 4] Trc (Row Cycle Time, Ras#-active to Ras#-active/bank auto refresh)
244 * 0000 = 7 bus clocks
245 * 0001 = 8 bus clocks
247 * 1110 = 21 bus clocks
248 * 1111 = 22 bus clocks
249 * [11: 8] Trfc (Row refresh Cycle time, Auto-refresh-active to RAS#-active or RAS#auto-refresh)
250 * 0000 = 9 bus clocks
251 * 0010 = 10 bus clocks
253 * 1110 = 23 bus clocks
254 * 1111 = 24 bus clocks
255 * [14:12] Trcd (Ras#-active to Case#-read/write Delay)
265 * [18:16] Trrd (Ras# to Ras# Delay)
275 * [23:20] Tras (Minmum Ras# Active Time)
276 * 0000 to 0100 = reserved
277 * 0101 = 5 bus clocks
279 * 1111 = 15 bus clocks
280 * [26:24] Trp (Row Precharge Time)
290 * [28:28] Twr (Write Recovery Time)
295 PCI_ADDR(0, 0x18, 2, 0x88), 0xe8088008, 0x02522001 /* 0x03623125 */ ,
296 /* DRAM Timing High Register
298 * [ 0: 0] Twtr (Write to Read Delay)
302 * [ 6: 4] Trwt (Read to Write Delay)
312 * [12: 8] Tref (Refresh Rate)
313 * 00000 = 100Mhz 4K rows
314 * 00001 = 133Mhz 4K rows
315 * 00010 = 166Mhz 4K rows
316 * 00011 = 200Mhz 4K rows
317 * 01000 = 100Mhz 8K/16K rows
318 * 01001 = 133Mhz 8K/16K rows
319 * 01010 = 166Mhz 8K/16K rows
320 * 01011 = 200Mhz 8K/16K rows
322 * [22:20] Twcl (Write CAS Latency)
323 * 000 = 1 Mem clock after CAS# (Unbuffered Dimms)
324 * 001 = 2 Mem clocks after CAS# (Registered Dimms)
327 PCI_ADDR(0, 0x18, 2, 0x8c), 0xff8fe08e, (0 << 20)|(0 << 8)|(0 << 4)|(0 << 0),
328 /* DRAM Config Low Register
330 * [ 0: 0] DLL Disable
339 * [ 3: 3] Disable DQS Hystersis (FIXME handle this one carefully)
340 * 0 = Enable DQS input filter
341 * 1 = Disable DQS input filtering
344 * 0 = Initialization done or not yet started.
345 * 1 = Initiate DRAM intialization sequence
346 * [ 9: 9] SO-Dimm Enable
348 * 1 = SO-Dimms present
350 * 0 = DRAM not enabled
351 * 1 = DRAM initialized and enabled
352 * [11:11] Memory Clear Status
353 * 0 = Memory Clear function has not completed
354 * 1 = Memory Clear function has completed
355 * [12:12] Exit Self-Refresh
356 * 0 = Exit from self-refresh done or not yet started
357 * 1 = DRAM exiting from self refresh
358 * [13:13] Self-Refresh Status
359 * 0 = Normal Operation
360 * 1 = Self-refresh mode active
361 * [15:14] Read/Write Queue Bypass Count
366 * [16:16] 128-bit/64-Bit
367 * 0 = 64bit Interface to DRAM
368 * 1 = 128bit Interface to DRAM
369 * [17:17] DIMM ECC Enable
370 * 0 = Some DIMMs do not have ECC
371 * 1 = ALL DIMMS have ECC bits
372 * [18:18] UnBuffered DIMMs
374 * 1 = Unbuffered DIMMS
375 * [19:19] Enable 32-Byte Granularity
376 * 0 = Optimize for 64byte bursts
377 * 1 = Optimize for 32byte bursts
378 * [20:20] DIMM 0 is x4
379 * [21:21] DIMM 1 is x4
380 * [22:22] DIMM 2 is x4
381 * [23:23] DIMM 3 is x4
383 * 1 = x4 DIMM present
384 * [24:24] Disable DRAM Receivers
385 * 0 = Receivers enabled
386 * 1 = Receivers disabled
388 * 000 = Arbiters chois is always respected
389 * 001 = Oldest entry in DCQ can be bypassed 1 time
390 * 010 = Oldest entry in DCQ can be bypassed 2 times
391 * 011 = Oldest entry in DCQ can be bypassed 3 times
392 * 100 = Oldest entry in DCQ can be bypassed 4 times
393 * 101 = Oldest entry in DCQ can be bypassed 5 times
394 * 110 = Oldest entry in DCQ can be bypassed 6 times
395 * 111 = Oldest entry in DCQ can be bypassed 7 times
398 PCI_ADDR(0, 0x18, 2, 0x90), 0xf0000000,
400 (0 << 23)|(0 << 22)|(0 << 21)|(0 << 20)|
401 (1 << 19)|(0 << 18)|(1 << 17)|(0 << 16)|
402 (2 << 14)|(0 << 13)|(0 << 12)|
403 (0 << 11)|(0 << 10)|(0 << 9)|(0 << 8)|
404 (0 << 3) |(0 << 1) |(0 << 0),
405 /* DRAM Config High Register
407 * [ 0: 3] Maximum Asynchronous Latency
412 * [11: 8] Read Preamble
430 * [18:16] Idle Cycle Limit
439 * [19:19] Dynamic Idle Cycle Center Enable
440 * 0 = Use Idle Cycle Limit
441 * 1 = Generate a dynamic Idle cycle limit
442 * [22:20] DRAM MEMCLK Frequency
452 * [25:25] Memory Clock Ratio Valid (FIXME carefully enable memclk)
453 * 0 = Disable MemClks
455 * [26:26] Memory Clock 0 Enable
458 * [27:27] Memory Clock 1 Enable
461 * [28:28] Memory Clock 2 Enable
464 * [29:29] Memory Clock 3 Enable
469 PCI_ADDR(0, 0x18, 2, 0x94), 0xc180f0f0,
470 (0 << 29)|(0 << 28)|(0 << 27)|(0 << 26)|(0 << 25)|
471 (0 << 20)|(0 << 19)|(DCH_IDLE_LIMIT_16 << 16)|(0 << 8)|(0 << 0),
472 /* DRAM Delay Line Register
474 * Adjust the skew of the input DQS strobe relative to DATA
476 * [23:16] Delay Line Adjust
477 * Adjusts the DLL derived PDL delay by one or more delay stages
478 * in either the faster or slower direction.
479 * [24:24} Adjust Slower
481 * 1 = Adj is used to increase the PDL delay
482 * [25:25] Adjust Faster
484 * 1 = Adj is used to decrease the PDL delay
487 PCI_ADDR(0, 0x18, 2, 0x98), 0xfc00ffff, 0x00000000,
488 /* MCA NB Status Low reg */
489 PCI_ADDR(0, 0x18, 3, 0x48), 0x00f00000, 0x00000000,
490 /* MCA NB Status high reg */
491 PCI_ADDR(0, 0x18, 3, 0x4c), 0x01801e8c, 0x00000000,
492 /* MCA NB address Low reg */
493 PCI_ADDR(0, 0x18, 3, 0x50), 0x00000007, 0x00000000,
494 /* MCA NB address high reg */
495 PCI_ADDR(0, 0x18, 3, 0x54), 0xffffff00, 0x00000000,
496 /* DRAM Scrub Control Register
498 * [ 4: 0] DRAM Scrube Rate
500 * [12: 8] L2 Scrub Rate
502 * [20:16] Dcache Scrub
505 * 00000 = Do not scrub
527 * All Others = Reserved
529 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
530 /* DRAM Scrub Address Low Register
532 * [ 0: 0] DRAM Scrubber Redirect Enable
534 * 1 = Scrubber Corrects errors found in normal operation
536 * [31: 6] DRAM Scrub Address 31-6
538 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
539 /* DRAM Scrub Address High Register
541 * [ 7: 0] DRAM Scrubb Address 39-32
544 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
550 if (!controller_present(ctrl)) {
551 // print_debug("No memory controller present\r\n");
555 print_spew("setting up CPU");
556 print_spew_hex8(ctrl->node_id);
557 print_spew(" northbridge registers\r\n");
558 max = sizeof(register_values)/sizeof(register_values[0]);
559 for(i = 0; i < max; i += 3) {
564 #if CONFIG_USE_PRINTK_IN_CAR
565 prink_debug("%08x <- %08x\r\n", register_values[i], register_values[i+2]);
567 print_spew_hex32(register_values[i]);
569 print_spew_hex32(register_values[i+2]);
573 dev = (register_values[i] & ~0xfff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
574 where = register_values[i] & 0xfff;
575 reg = pci_read_config32(dev, where);
576 reg &= register_values[i+1];
577 reg |= register_values[i+2];
578 pci_write_config32(dev, where, reg);
581 reg = pci_read_config32(register_values[i]);
582 reg &= register_values[i+1];
583 reg |= register_values[i+2];
584 pci_write_config32(register_values[i], reg);
587 print_spew("done.\r\n");
591 static void hw_enable_ecc(const struct mem_controller *ctrl)
594 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
595 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
596 dcl &= ~DCL_DimmEccEn;
597 if (nbcap & NBCAP_ECC) {
598 dcl |= DCL_DimmEccEn;
600 if (read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
601 dcl &= ~DCL_DimmEccEn;
603 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
607 static int is_dual_channel(const struct mem_controller *ctrl)
610 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
611 return dcl & DCL_128BitEn;
614 static int is_opteron(const struct mem_controller *ctrl)
616 /* Test to see if I am an Opteron.
617 * FIXME Socket 939 based Athlon64 have dual channel capability,
618 * too, so we need a better test for Opterons
620 #warning "FIXME: Implement a better test for Opterons"
622 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
623 return !!(nbcap & NBCAP_128Bit);
626 static int is_registered(const struct mem_controller *ctrl)
628 /* Test to see if we are dealing with registered SDRAM.
629 * If we are not registered we are unbuffered.
630 * This function must be called after spd_handle_unbuffered_dimms.
633 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
634 return !(dcl & DCL_UnBufDimm);
642 #if QRANK_DIMM_SUPPORT == 1
647 static struct dimm_size spd_get_dimm_size(unsigned device)
649 /* Calculate the log base 2 size of a DIMM in bits */
656 #if QRANK_DIMM_SUPPORT == 1
660 /* Note it might be easier to use byte 31 here, it has the DIMM size as
661 * a multiple of 4MB. The way we do it now we can size both
662 * sides of an assymetric dimm.
664 value = spd_read_byte(device, 3); /* rows */
665 if (value < 0) goto hw_err;
666 if ((value & 0xf) == 0) goto val_err;
667 sz.side1 += value & 0xf;
668 sz.rows = value & 0xf;
670 value = spd_read_byte(device, 4); /* columns */
671 if (value < 0) goto hw_err;
672 if ((value & 0xf) == 0) goto val_err;
673 sz.side1 += value & 0xf;
674 sz.col = value & 0xf;
676 value = spd_read_byte(device, 17); /* banks */
677 if (value < 0) goto hw_err;
678 if ((value & 0xff) == 0) goto val_err;
679 sz.side1 += log2(value & 0xff);
681 /* Get the module data width and convert it to a power of two */
682 value = spd_read_byte(device, 7); /* (high byte) */
683 if (value < 0) goto hw_err;
687 low = spd_read_byte(device, 6); /* (low byte) */
688 if (low < 0) goto hw_err;
689 value = value | (low & 0xff);
690 if ((value != 72) && (value != 64)) goto val_err;
691 sz.side1 += log2(value);
694 value = spd_read_byte(device, 5); /* number of physical banks */
695 if (value < 0) goto hw_err;
696 if (value == 1) goto out;
697 if ((value != 2) && (value != 4 )) {
700 #if QRANK_DIMM_SUPPORT == 1
704 /* Start with the symmetrical case */
707 value = spd_read_byte(device, 3); /* rows */
708 if (value < 0) goto hw_err;
709 if ((value & 0xf0) == 0) goto out; /* If symmetrical we are done */
710 sz.side2 -= (value & 0x0f); /* Subtract out rows on side 1 */
711 sz.side2 += ((value >> 4) & 0x0f); /* Add in rows on side 2 */
713 value = spd_read_byte(device, 4); /* columns */
714 if (value < 0) goto hw_err;
715 if ((value & 0xff) == 0) goto val_err;
716 sz.side2 -= (value & 0x0f); /* Subtract out columns on side 1 */
717 sz.side2 += ((value >> 4) & 0x0f); /* Add in columsn on side 2 */
722 die("Bad SPD value\r\n");
723 /* If an hw_error occurs report that I have no memory */
729 #if QRANK_DIMM_SUPPORT == 1
737 static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
739 uint32_t base0, base1;
742 if (sz.side1 != sz.side2) {
746 /* For each base register.
747 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
748 * The initialize dimm size is in bits.
749 * Set the base enable bit0.
754 /* Make certain side1 of the dimm is at least 32MB */
755 if (sz.side1 >= (25 +3)) {
756 base0 = (1 << ((sz.side1 - (25 + 3)) + 21)) | 1;
759 /* Make certain side2 of the dimm is at least 32MB */
760 if (sz.side2 >= (25 + 3)) {
761 base1 = (1 << ((sz.side2 - (25 + 3)) + 21)) | 1;
764 /* Double the size if we are using dual channel memory */
765 if (is_dual_channel(ctrl)) {
766 base0 = (base0 << 1) | (base0 & 1);
767 base1 = (base1 << 1) | (base1 & 1);
770 /* Clear the reserved bits */
771 base0 &= ~0x001ffffe;
772 base1 &= ~0x001ffffe;
774 /* Set the appropriate DIMM base address register */
775 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
776 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
777 #if QRANK_DIMM_SUPPORT == 1
779 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
780 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
784 /* Enable the memory clocks for this DIMM */
786 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
787 dch |= DCH_MEMCLK_EN0 << index;
788 #if QRANK_DIMM_SUPPORT == 1
790 dch |= DCH_MEMCLK_EN0 << (index + 2);
793 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
797 static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
799 static const unsigned cs_map_aa[] = {
800 /* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
809 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
810 map &= ~(0xf << (index * 4));
811 #if QRANK_DIMM_SUPPORT == 1
813 map &= ~(0xf << ( (index + 2) * 4));
818 /* Make certain side1 of the dimm is at least 32MB */
819 if (sz.side1 >= (25 +3)) {
820 if(is_cpu_pre_d0()) {
821 map |= (sz.side1 - (25 + 3)) << (index *4);
822 #if QRANK_DIMM_SUPPORT == 1
824 map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
829 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
830 #if QRANK_DIMM_SUPPORT == 1
832 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
838 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
842 static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
846 for(i = 0; i < DIMM_SOCKETS; i++) {
848 if (!(dimm_mask & (1 << i))) {
851 sz = spd_get_dimm_size(ctrl->channel0[i]);
853 return -1; /* Report SPD error */
855 set_dimm_size(ctrl, sz, i);
856 set_dimm_map (ctrl, sz, i);
861 static void route_dram_accesses(const struct mem_controller *ctrl,
862 unsigned long base_k, unsigned long limit_k)
864 /* Route the addresses to the controller node */
869 unsigned limit_reg, base_reg;
872 node_id = ctrl->node_id;
873 index = (node_id << 3);
874 limit = (limit_k << 2);
877 limit |= ( 0 << 8) | (node_id << 0);
878 base = (base_k << 2);
880 base |= (0 << 8) | (1<<1) | (1<<0);
882 limit_reg = 0x44 + index;
883 base_reg = 0x40 + index;
884 for(device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1); device += PCI_DEV(0, 1, 0)) {
885 pci_write_config32(device, limit_reg, limit);
886 pci_write_config32(device, base_reg, base);
890 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
892 /* Error if I don't have memory */
897 /* Report the amount of memory. */
898 print_spew("RAM: 0x");
899 print_spew_hex32(tom_k);
900 print_spew(" KB\r\n");
902 /* Now set top of memory */
904 if(tom_k > (4*1024*1024)) {
905 msr.lo = (tom_k & 0x003fffff) << 10;
906 msr.hi = (tom_k & 0xffc00000) >> 22;
907 wrmsr(TOP_MEM2, msr);
910 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
911 * so I can see my rom chip and other I/O devices.
913 if (tom_k >= 0x003f0000) {
914 #if HW_MEM_HOLE_SIZEK != 0
915 if(hole_startk != 0) {
921 msr.lo = (tom_k & 0x003fffff) << 10;
922 msr.hi = (tom_k & 0xffc00000) >> 22;
926 static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
929 static const uint8_t csbase_low_shift[] = {
932 /* 128MB */ (14 - 4),
933 /* 256MB */ (15 - 4),
934 /* 512MB */ (15 - 4),
939 static const uint8_t csbase_low_d0_shift[] = {
942 /* 128MB */ (14 - 4),
943 /* 128MB */ (15 - 4),
944 /* 256MB */ (15 - 4),
945 /* 512MB */ (15 - 4),
946 /* 256MB */ (16 - 4),
947 /* 512MB */ (16 - 4),
953 /* cs_base_high is not changed */
956 int chip_selects, index;
958 unsigned common_size;
959 unsigned common_cs_mode;
960 uint32_t csbase, csmask;
962 /* See if all of the memory chip selects are the same size
963 * and if so count them.
968 for(index = 0; index < 8; index++) {
973 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
981 if (common_size == 0) {
984 /* The size differed fail */
985 if (common_size != size) {
989 value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
990 cs_mode =( value >> ((index>>1)*4)) & 0xf;
991 if(cs_mode == 0 ) continue;
992 if(common_cs_mode == 0) {
993 common_cs_mode = cs_mode;
995 /* The size differed fail */
996 if(common_cs_mode != cs_mode) {
1001 /* Chip selects can only be interleaved when there is
1002 * more than one and their is a power of two of them.
1004 bits = log2(chip_selects);
1005 if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) {
1009 /* Find the bits of csbase that we need to interleave on */
1010 if(is_cpu_pre_d0()){
1011 csbase_inc = 1 << csbase_low_shift[common_cs_mode];
1012 if(is_dual_channel(ctrl)) {
1013 /* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
1014 if ((bits == 3) && (common_size == (1 << (32 - 3)))) {
1015 // print_debug("8 4GB chip selects cannot be interleaved\r\n");
1022 csbase_inc = 1 << csbase_low_d0_shift[common_cs_mode];
1023 if(is_dual_channel(ctrl)) {
1024 if( (bits==3) && (common_cs_mode > 8)) {
1025 // print_debug("8 cs_mode>8 chip selects cannot be interleaved\r\n");
1032 /* Compute the initial values for csbase and csbask.
1033 * In csbase just set the enable bit and the base to zero.
1034 * In csmask set the mask bits for the size and page level interleave.
1037 csmask = (((common_size << bits) - 1) << 21);
1038 csmask |= 0xfe00 & ~((csbase_inc << bits) - csbase_inc);
1039 for(index = 0; index < 8; index++) {
1042 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1043 /* Is it enabled? */
1047 pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
1048 pci_write_config32(ctrl->f2, DRAM_CSMASK + (index << 2), csmask);
1049 csbase += csbase_inc;
1052 print_spew("Interleaved\r\n");
1054 /* Return the memory size in K */
1055 return common_size << (15 + bits);
1058 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
1062 /* Remember which registers we have used in the high 8 bits of tom */
1065 /* Find the largest remaining canidate */
1066 unsigned index, canidate;
1067 uint32_t csbase, csmask;
1071 for(index = 0; index < 8; index++) {
1073 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1075 /* Is it enabled? */
1080 /* Is it greater? */
1081 if (value <= csbase) {
1085 /* Has it already been selected */
1086 if (tom & (1 << (index + 24))) {
1089 /* I have a new canidate */
1093 /* See if I have found a new canidate */
1098 /* Remember the dimm size */
1099 size = csbase >> 21;
1101 /* Remember I have used this register */
1102 tom |= (1 << (canidate + 24));
1104 /* Recompute the cs base register value */
1105 csbase = (tom << 21) | 1;
1107 /* Increment the top of memory */
1110 /* Compute the memory mask */
1111 csmask = ((size -1) << 21);
1112 csmask |= 0xfe00; /* For now don't optimize */
1114 /* Write the new base register */
1115 pci_write_config32(ctrl->f2, DRAM_CSBASE + (canidate << 2), csbase);
1116 /* Write the new mask register */
1117 pci_write_config32(ctrl->f2, DRAM_CSMASK + (canidate << 2), csmask);
1120 /* Return the memory size in K */
1121 return (tom & ~0xff000000) << 15;
1124 unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
1128 /* Find the last memory address used */
1130 for(node_id = 0; node_id < max_node_id; node_id++) {
1131 uint32_t limit, base;
1133 index = node_id << 3;
1134 base = pci_read_config32(ctrl->f1, 0x40 + index);
1135 /* Only look at the limit if the base is enabled */
1136 if ((base & 3) == 3) {
1137 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1138 end_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1144 static void order_dimms(const struct mem_controller *ctrl)
1146 unsigned long tom_k, base_k;
1148 if (read_option(CMOS_VSTART_interleave_chip_selects, CMOS_VLEN_interleave_chip_selects, 1) != 0) {
1149 tom_k = interleave_chip_selects(ctrl);
1151 print_debug("Interleaving disabled\r\n");
1155 tom_k = order_chip_selects(ctrl);
1157 /* Compute the memory base address */
1158 base_k = memory_end_k(ctrl, ctrl->node_id);
1160 route_dram_accesses(ctrl, base_k, tom_k);
1161 set_top_mem(tom_k, 0);
1164 static long disable_dimm(const struct mem_controller *ctrl, unsigned index, long dimm_mask)
1166 print_debug("disabling dimm");
1167 print_debug_hex8(index);
1168 print_debug("\r\n");
1169 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), 0);
1170 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), 0);
1171 dimm_mask &= ~(1 << index);
1175 static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl, long dimm_mask)
1180 int has_dualch = is_opteron(ctrl);
1184 for(i = 0; (i < DIMM_SOCKETS); i++) {
1186 if (!(dimm_mask & (1 << i))) {
1189 value = spd_read_byte(ctrl->channel0[i], 21);
1193 /* Registered dimm ? */
1194 if (value & (1 << 1)) {
1197 /* Otherwise it must be an unbuffered dimm */
1202 if (unbuffered && registered) {
1203 die("Mixed buffered and registered dimms not supported");
1206 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1207 dcl &= ~DCL_UnBufDimm;
1209 if ((has_dualch) && (!is_cpu_pre_d0())) {
1210 dcl |= DCL_UnBufDimm; /* set DCL_DualDIMMen too? */
1212 /* set DCL_En2T if you have non-equal DDR mem types! */
1214 if ((cpuid_eax(1) & 0x30) == 0x30) {
1215 /* CS[7:4] is copy of CS[3:0], should be set for 939 socket */
1216 dcl |= DCL_UpperCSMap;
1219 dcl |= DCL_UnBufDimm;
1222 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1224 if (is_registered(ctrl)) {
1225 print_debug("Registered\r\n");
1227 print_debug("Unbuffered\r\n");
1233 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
1238 for(i = 0; i < DIMM_SOCKETS; i++) {
1241 device = ctrl->channel0[i];
1243 byte = spd_read_byte(ctrl->channel0[i], 2); /* Type */
1245 dimm_mask |= (1 << i);
1248 device = ctrl->channel1[i];
1250 byte = spd_read_byte(ctrl->channel1[i], 2);
1252 dimm_mask |= (1 << (i + DIMM_SOCKETS));
1259 static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_mask)
1263 /* SPD addresses to verify are identical */
1264 static const uint8_t addresses[] = {
1265 2, /* Type should be DDR SDRAM */
1266 3, /* *Row addresses */
1267 4, /* *Column addresses */
1268 5, /* *Physical Banks */
1269 6, /* *Module Data Width low */
1270 7, /* *Module Data Width high */
1271 9, /* *Cycle time at highest CAS Latency CL=X */
1272 11, /* *SDRAM Type */
1273 13, /* *SDRAM Width */
1274 17, /* *Logical Banks */
1275 18, /* *Supported CAS Latencies */
1276 21, /* *SDRAM Module Attributes */
1277 23, /* *Cycle time at CAS Latnecy (CLX - 0.5) */
1278 26, /* *Cycle time at CAS Latnecy (CLX - 1.0) */
1279 27, /* *tRP Row precharge time */
1280 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1281 29, /* *tRCD RAS to CAS */
1282 30, /* *tRAS Activate to Precharge */
1283 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1284 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1286 /* If the dimms are not in pairs do not do dual channels */
1287 if ((dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1288 ((dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1289 goto single_channel;
1291 /* If the cpu is not capable of doing dual channels don't do dual channels */
1292 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1293 if (!(nbcap & NBCAP_128Bit)) {
1294 goto single_channel;
1296 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1297 unsigned device0, device1;
1300 /* If I don't have a dimm skip this one */
1301 if (!(dimm_mask & (1 << i))) {
1304 device0 = ctrl->channel0[i];
1305 device1 = ctrl->channel1[i];
1306 for(j = 0; j < sizeof(addresses)/sizeof(addresses[0]); j++) {
1308 addr = addresses[j];
1309 value0 = spd_read_byte(device0, addr);
1313 value1 = spd_read_byte(device1, addr);
1317 if (value0 != value1) {
1318 goto single_channel;
1322 print_spew("Enabling dual channel memory\r\n");
1324 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1325 dcl &= ~DCL_32ByteEn;
1326 dcl |= DCL_128BitEn;
1327 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1330 dimm_mask &= ~((1 << (DIMM_SOCKETS *2)) - (1 << DIMM_SOCKETS));
1336 uint8_t divisor; /* In 1/2 ns increments */
1339 uint32_t dch_memclk;
1340 uint16_t dch_tref4k, dch_tref8k;
1343 uint8_t dtl_trwt[3][3]; /* first index is CAS_LAT 2/2.5/3 and 128/registered64/64 */
1344 uint8_t rdpreamble[4]; /* 0 is for registered, 1 for 1-2 DIMMS, 2 and 3 for 3 or 4 unreg dimm slots */
1348 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1350 static const struct mem_param speed[] = {
1352 .name = "100Mhz\r\n",
1354 .divisor = (10 <<1),
1357 .dch_memclk = DCH_MEMCLK_100MHZ << DCH_MEMCLK_SHIFT,
1358 .dch_tref4k = DTH_TREF_100MHZ_4K,
1359 .dch_tref8k = DTH_TREF_100MHZ_8K,
1362 .dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1363 .rdpreamble = { ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0) }
1366 .name = "133Mhz\r\n",
1368 .divisor = (7<<1)+1,
1371 .dch_memclk = DCH_MEMCLK_133MHZ << DCH_MEMCLK_SHIFT,
1372 .dch_tref4k = DTH_TREF_133MHZ_4K,
1373 .dch_tref8k = DTH_TREF_133MHZ_8K,
1376 .dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1377 .rdpreamble = { ((8 << 1) + 0), ((7 << 1) + 0), ((7 << 1) + 1), ((7 << 1) + 0) }
1380 .name = "166Mhz\r\n",
1385 .dch_memclk = DCH_MEMCLK_166MHZ << DCH_MEMCLK_SHIFT,
1386 .dch_tref4k = DTH_TREF_166MHZ_4K,
1387 .dch_tref8k = DTH_TREF_166MHZ_8K,
1390 .dtl_trwt = { { 3, 2, 3 }, { 3, 3, 4 }, { 4, 3, 4 }},
1391 .rdpreamble = { ((7 << 1) + 1), ((6 << 1) + 0), ((6 << 1) + 1), ((6 << 1) + 0) }
1394 .name = "200Mhz\r\n",
1399 .dch_memclk = DCH_MEMCLK_200MHZ << DCH_MEMCLK_SHIFT,
1400 .dch_tref4k = DTH_TREF_200MHZ_4K,
1401 .dch_tref8k = DTH_TREF_200MHZ_8K,
1404 .dtl_trwt = { { 0, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1405 .rdpreamble = { ((7 << 1) + 0), ((5 << 1) + 0), ((5 << 1) + 1), ((5 << 1) + 1) }
1411 const struct mem_param *param;
1412 for(param = &speed[0]; param->cycle_time ; param++) {
1413 if (min_cycle_time > (param+1)->cycle_time) {
1417 if (!param->cycle_time) {
1418 die("min_cycle_time to low");
1420 print_spew(param->name);
1421 #ifdef DRAM_MIN_CYCLE_TIME
1422 print_debug(param->name);
1427 struct spd_set_memclk_result {
1428 const struct mem_param *param;
1431 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask)
1433 /* Compute the minimum cycle time for these dimms */
1434 struct spd_set_memclk_result result;
1435 unsigned min_cycle_time, min_latency, bios_cycle_time;
1439 static const uint8_t latency_indicies[] = { 26, 23, 9 };
1440 static const unsigned char min_cycle_times[] = {
1441 [NBCAP_MEMCLK_200MHZ] = 0x50, /* 5ns */
1442 [NBCAP_MEMCLK_166MHZ] = 0x60, /* 6ns */
1443 [NBCAP_MEMCLK_133MHZ] = 0x75, /* 7.5ns */
1444 [NBCAP_MEMCLK_100MHZ] = 0xa0, /* 10ns */
1447 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1449 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1450 bios_cycle_time = min_cycle_times[
1451 read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
1452 if (bios_cycle_time > min_cycle_time) {
1453 min_cycle_time = bios_cycle_time;
1457 /* Compute the least latency with the fastest clock supported
1458 * by both the memory controller and the dimms.
1460 for(i = 0; i < DIMM_SOCKETS; i++) {
1461 int new_cycle_time, new_latency;
1466 if (!(dimm_mask & (1 << i))) {
1470 /* First find the supported CAS latencies
1471 * Byte 18 for DDR SDRAM is interpreted:
1472 * bit 0 == CAS Latency = 1.0
1473 * bit 1 == CAS Latency = 1.5
1474 * bit 2 == CAS Latency = 2.0
1475 * bit 3 == CAS Latency = 2.5
1476 * bit 4 == CAS Latency = 3.0
1477 * bit 5 == CAS Latency = 3.5
1481 new_cycle_time = 0xa0;
1484 latencies = spd_read_byte(ctrl->channel0[i], 18);
1485 if (latencies <= 0) continue;
1487 /* Compute the lowest cas latency supported */
1488 latency = log2(latencies) -2;
1490 /* Loop through and find a fast clock with a low latency */
1491 for(index = 0; index < 3; index++, latency++) {
1493 if ((latency < 2) || (latency > 4) ||
1494 (!(latencies & (1 << latency)))) {
1497 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1502 /* Only increase the latency if we decreas the clock */
1503 if ((value >= min_cycle_time) && (value < new_cycle_time)) {
1504 new_cycle_time = value;
1505 new_latency = latency;
1508 if (new_latency > 4){
1511 /* Does min_latency need to be increased? */
1512 if (new_cycle_time > min_cycle_time) {
1513 min_cycle_time = new_cycle_time;
1515 /* Does min_cycle_time need to be increased? */
1516 if (new_latency > min_latency) {
1517 min_latency = new_latency;
1520 /* Make a second pass through the dimms and disable
1521 * any that cannot support the selected memclk and cas latency.
1524 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1529 if (!(dimm_mask & (1 << i))) {
1532 latencies = spd_read_byte(ctrl->channel0[i], 18);
1533 if (latencies < 0) goto hw_error;
1534 if (latencies == 0) {
1538 /* Compute the lowest cas latency supported */
1539 latency = log2(latencies) -2;
1541 /* Walk through searching for the selected latency */
1542 for(index = 0; index < 3; index++, latency++) {
1543 if (!(latencies & (1 << latency))) {
1546 if (latency == min_latency)
1549 /* If I can't find the latency or my index is bad error */
1550 if ((latency != min_latency) || (index >= 3)) {
1554 /* Read the min_cycle_time for this latency */
1555 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1556 if (value < 0) goto hw_error;
1558 /* All is good if the selected clock speed
1559 * is what I need or slower.
1561 if (value <= min_cycle_time) {
1564 /* Otherwise I have an error, disable the dimm */
1566 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
1569 //down speed for full load 4 rank support
1570 #if QRANK_DIMM_SUPPORT
1571 if(dimm_mask == (3|(3<<DIMM_SOCKETS)) ) {
1573 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1575 if (!(dimm_mask & (1 << i))) {
1578 val = spd_read_byte(ctrl->channel0[i], 5);
1585 if(min_cycle_time <= 0x50 ) {
1586 min_cycle_time = 0x60;
1593 /* Now that I know the minimum cycle time lookup the memory parameters */
1594 result.param = get_mem_param(min_cycle_time);
1596 /* Update DRAM Config High with our selected memory speed */
1597 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1598 value &= ~(DCH_MEMCLK_MASK << DCH_MEMCLK_SHIFT);
1600 /* Improves DQS centering by correcting for case when core speed multiplier and MEMCLK speed result in odd clock divisor, by selecting the next lowest memory speed, required only at DDR400 and higher speeds with certain DIMM loadings ---- cheating???*/
1601 if(!is_cpu_pre_e0()) {
1602 if(min_cycle_time==0x50) {
1608 value |= result.param->dch_memclk;
1609 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1611 static const unsigned latencies[] = { DTL_CL_2, DTL_CL_2_5, DTL_CL_3 };
1612 /* Update DRAM Timing Low with our selected cas latency */
1613 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1614 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1615 value |= latencies[min_latency - 2] << DTL_TCL_SHIFT;
1616 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1618 result.dimm_mask = dimm_mask;
1621 result.param = (const struct mem_param *)0;
1622 result.dimm_mask = -1;
1627 static int update_dimm_Trc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1629 unsigned clocks, old_clocks;
1632 value = spd_read_byte(ctrl->channel0[i], 41);
1633 if (value < 0) return -1;
1634 if ((value == 0) || (value == 0xff)) {
1637 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1638 if (clocks < DTL_TRC_MIN) {
1639 clocks = DTL_TRC_MIN;
1641 if (clocks > DTL_TRC_MAX) {
1645 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1646 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
1647 if (old_clocks > clocks) {
1648 clocks = old_clocks;
1650 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
1651 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
1652 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1656 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1658 unsigned clocks, old_clocks;
1661 value = spd_read_byte(ctrl->channel0[i], 42);
1662 if (value < 0) return -1;
1663 if ((value == 0) || (value == 0xff)) {
1664 value = param->tRFC;
1666 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1667 if (clocks < DTL_TRFC_MIN) {
1668 clocks = DTL_TRFC_MIN;
1670 if (clocks > DTL_TRFC_MAX) {
1673 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1674 old_clocks = ((dtl >> DTL_TRFC_SHIFT) & DTL_TRFC_MASK) + DTL_TRFC_BASE;
1675 if (old_clocks > clocks) {
1676 clocks = old_clocks;
1678 dtl &= ~(DTL_TRFC_MASK << DTL_TRFC_SHIFT);
1679 dtl |= ((clocks - DTL_TRFC_BASE) << DTL_TRFC_SHIFT);
1680 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1685 static int update_dimm_Trcd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1687 unsigned clocks, old_clocks;
1690 value = spd_read_byte(ctrl->channel0[i], 29);
1691 if (value < 0) return -1;
1692 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1693 if (clocks < DTL_TRCD_MIN) {
1694 clocks = DTL_TRCD_MIN;
1696 if (clocks > DTL_TRCD_MAX) {
1699 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1700 old_clocks = ((dtl >> DTL_TRCD_SHIFT) & DTL_TRCD_MASK) + DTL_TRCD_BASE;
1701 if (old_clocks > clocks) {
1702 clocks = old_clocks;
1704 dtl &= ~(DTL_TRCD_MASK << DTL_TRCD_SHIFT);
1705 dtl |= ((clocks - DTL_TRCD_BASE) << DTL_TRCD_SHIFT);
1706 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1710 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1712 unsigned clocks, old_clocks;
1715 value = spd_read_byte(ctrl->channel0[i], 28);
1716 if (value < 0) return -1;
1717 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1718 if (clocks < DTL_TRRD_MIN) {
1719 clocks = DTL_TRRD_MIN;
1721 if (clocks > DTL_TRRD_MAX) {
1724 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1725 old_clocks = ((dtl >> DTL_TRRD_SHIFT) & DTL_TRRD_MASK) + DTL_TRRD_BASE;
1726 if (old_clocks > clocks) {
1727 clocks = old_clocks;
1729 dtl &= ~(DTL_TRRD_MASK << DTL_TRRD_SHIFT);
1730 dtl |= ((clocks - DTL_TRRD_BASE) << DTL_TRRD_SHIFT);
1731 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1735 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1737 unsigned clocks, old_clocks;
1740 value = spd_read_byte(ctrl->channel0[i], 30);
1741 if (value < 0) return -1;
1742 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1743 if (clocks < DTL_TRAS_MIN) {
1744 clocks = DTL_TRAS_MIN;
1746 if (clocks > DTL_TRAS_MAX) {
1749 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1750 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
1751 if (old_clocks > clocks) {
1752 clocks = old_clocks;
1754 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
1755 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
1756 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1760 static int update_dimm_Trp(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1762 unsigned clocks, old_clocks;
1765 value = spd_read_byte(ctrl->channel0[i], 27);
1766 if (value < 0) return -1;
1767 clocks = (value + (param->divisor << 1) - 1)/(param->divisor << 1);
1768 if (clocks < DTL_TRP_MIN) {
1769 clocks = DTL_TRP_MIN;
1771 if (clocks > DTL_TRP_MAX) {
1774 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1775 old_clocks = ((dtl >> DTL_TRP_SHIFT) & DTL_TRP_MASK) + DTL_TRP_BASE;
1776 if (old_clocks > clocks) {
1777 clocks = old_clocks;
1779 dtl &= ~(DTL_TRP_MASK << DTL_TRP_SHIFT);
1780 dtl |= ((clocks - DTL_TRP_BASE) << DTL_TRP_SHIFT);
1781 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1785 static void set_Twr(const struct mem_controller *ctrl, const struct mem_param *param)
1788 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1789 dtl &= ~(DTL_TWR_MASK << DTL_TWR_SHIFT);
1790 dtl |= (param->dtl_twr - DTL_TWR_BASE) << DTL_TWR_SHIFT;
1791 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1795 static void init_Tref(const struct mem_controller *ctrl, const struct mem_param *param)
1798 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1799 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1800 dth |= (param->dch_tref4k << DTH_TREF_SHIFT);
1801 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1804 static int update_dimm_Tref(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1808 unsigned tref, old_tref;
1809 value = spd_read_byte(ctrl->channel0[i], 3);
1810 if (value < 0) return -1;
1813 tref = param->dch_tref8k;
1815 tref = param->dch_tref4k;
1818 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1819 old_tref = (dth >> DTH_TREF_SHIFT) & DTH_TREF_MASK;
1820 if ((value == 12) && (old_tref == param->dch_tref4k)) {
1821 tref = param->dch_tref4k;
1823 tref = param->dch_tref8k;
1825 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1826 dth |= (tref << DTH_TREF_SHIFT);
1827 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1832 static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1836 #if QRANK_DIMM_SUPPORT == 1
1840 value = spd_read_byte(ctrl->channel0[i], 13);
1845 #if QRANK_DIMM_SUPPORT == 1
1846 rank = spd_read_byte(ctrl->channel0[i], 5); /* number of physical banks */
1852 dimm = 1<<(DCL_x4DIMM_SHIFT+i);
1853 #if QRANK_DIMM_SUPPORT == 1
1855 dimm |= 1<<(DCL_x4DIMM_SHIFT+i+2);
1858 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1863 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1867 static int update_dimm_ecc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1871 value = spd_read_byte(ctrl->channel0[i], 11);
1876 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1877 dcl &= ~DCL_DimmEccEn;
1878 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1883 static int count_dimms(const struct mem_controller *ctrl)
1888 for(index = 0; index < 8; index += 2) {
1890 csbase = pci_read_config32(ctrl->f2, (DRAM_CSBASE + (index << 2)));
1898 static void set_Twtr(const struct mem_controller *ctrl, const struct mem_param *param)
1902 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1903 dth &= ~(DTH_TWTR_MASK << DTH_TWTR_SHIFT);
1904 dth |= ((param->dtl_twtr - DTH_TWTR_BASE) << DTH_TWTR_SHIFT);
1905 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1908 static void set_Trwt(const struct mem_controller *ctrl, const struct mem_param *param)
1916 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1917 latency = (dtl >> DTL_TCL_SHIFT) & DTL_TCL_MASK;
1919 if (is_opteron(ctrl)) {
1920 mtype = 0; /* dual channel */
1921 } else if (is_registered(ctrl)) {
1922 mtype = 1; /* registered 64bit interface */
1924 mtype = 2; /* unbuffered 64bit interface */
1938 die("Unknown LAT for Trwt");
1941 clocks = param->dtl_trwt[lat][mtype];
1942 if ((clocks < DTH_TRWT_MIN) || (clocks > DTH_TRWT_MAX)) {
1943 die("Unknown Trwt\r\n");
1946 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1947 dth &= ~(DTH_TRWT_MASK << DTH_TRWT_SHIFT);
1948 dth |= ((clocks - DTH_TRWT_BASE) << DTH_TRWT_SHIFT);
1949 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1953 static void set_Twcl(const struct mem_controller *ctrl, const struct mem_param *param)
1955 /* Memory Clocks after CAS# */
1958 if (is_registered(ctrl)) {
1963 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1964 dth &= ~(DTH_TWCL_MASK << DTH_TWCL_SHIFT);
1965 dth |= ((clocks - DTH_TWCL_BASE) << DTH_TWCL_SHIFT);
1966 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1970 static void set_read_preamble(const struct mem_controller *ctrl, const struct mem_param *param)
1973 unsigned rdpreamble;
1978 for(i = 0; i < 4; i++) {
1979 if (ctrl->channel0[i]) {
1984 /* map to index to param.rdpreamble array */
1985 if (is_registered(ctrl)) {
1987 } else if (slots < 3) {
1989 } else if (slots == 3) {
1991 } else if (slots == 4) {
1994 die("Unknown rdpreamble for this nr of slots");
1997 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1998 dch &= ~(DCH_RDPREAMBLE_MASK << DCH_RDPREAMBLE_SHIFT);
1999 rdpreamble = param->rdpreamble[i];
2001 if ((rdpreamble < DCH_RDPREAMBLE_MIN) || (rdpreamble > DCH_RDPREAMBLE_MAX)) {
2002 die("Unknown rdpreamble");
2005 dch |= (rdpreamble - DCH_RDPREAMBLE_BASE) << DCH_RDPREAMBLE_SHIFT;
2006 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2009 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
2015 dimms = count_dimms(ctrl);
2017 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2018 dch &= ~(DCH_ASYNC_LAT_MASK << DCH_ASYNC_LAT_SHIFT);
2020 if (is_registered(ctrl)) {
2032 die("Too many unbuffered dimms");
2034 else if (dimms == 3) {
2043 dch |= ((async_lat - DCH_ASYNC_LAT_BASE) << DCH_ASYNC_LAT_SHIFT);
2044 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2047 static void set_idle_cycle_limit(const struct mem_controller *ctrl, const struct mem_param *param)
2050 /* AMD says to Hardcode this */
2051 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2052 dch &= ~(DCH_IDLE_LIMIT_MASK << DCH_IDLE_LIMIT_SHIFT);
2053 dch |= DCH_IDLE_LIMIT_16 << DCH_IDLE_LIMIT_SHIFT;
2054 dch |= DCH_DYN_IDLE_CTR_EN;
2055 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2058 static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param, long dimm_mask)
2062 init_Tref(ctrl, param);
2063 for(i = 0; i < DIMM_SOCKETS; i++) {
2065 if (!(dimm_mask & (1 << i))) {
2068 /* DRAM Timing Low Register */
2069 if ((rc = update_dimm_Trc (ctrl, param, i)) <= 0) goto dimm_err;
2070 if ((rc = update_dimm_Trfc(ctrl, param, i)) <= 0) goto dimm_err;
2071 if ((rc = update_dimm_Trcd(ctrl, param, i)) <= 0) goto dimm_err;
2072 if ((rc = update_dimm_Trrd(ctrl, param, i)) <= 0) goto dimm_err;
2073 if ((rc = update_dimm_Tras(ctrl, param, i)) <= 0) goto dimm_err;
2074 if ((rc = update_dimm_Trp (ctrl, param, i)) <= 0) goto dimm_err;
2076 /* DRAM Timing High Register */
2077 if ((rc = update_dimm_Tref(ctrl, param, i)) <= 0) goto dimm_err;
2080 /* DRAM Config Low */
2081 if ((rc = update_dimm_x4 (ctrl, param, i)) <= 0) goto dimm_err;
2082 if ((rc = update_dimm_ecc(ctrl, param, i)) <= 0) goto dimm_err;
2088 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
2090 /* DRAM Timing Low Register */
2091 set_Twr(ctrl, param);
2093 /* DRAM Timing High Register */
2094 set_Twtr(ctrl, param);
2095 set_Trwt(ctrl, param);
2096 set_Twcl(ctrl, param);
2098 /* DRAM Config High */
2099 set_read_preamble(ctrl, param);
2100 set_max_async_latency(ctrl, param);
2101 set_idle_cycle_limit(ctrl, param);
2105 #if RAMINIT_SYSINFO==1
2106 static void sdram_set_spd_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
2108 static void sdram_set_spd_registers(const struct mem_controller *ctrl)
2111 struct spd_set_memclk_result result;
2112 const struct mem_param *param;
2115 if (!controller_present(ctrl)) {
2116 // print_debug("No memory controller present\r\n");
2120 hw_enable_ecc(ctrl);
2121 activate_spd_rom(ctrl);
2122 dimm_mask = spd_detect_dimms(ctrl);
2123 if (!(dimm_mask & ((1 << DIMM_SOCKETS) - 1))) {
2124 print_debug("No memory for this cpu\r\n");
2127 dimm_mask = spd_enable_2channels(ctrl, dimm_mask);
2130 dimm_mask = spd_set_ram_size(ctrl , dimm_mask);
2133 dimm_mask = spd_handle_unbuffered_dimms(ctrl, dimm_mask);
2136 result = spd_set_memclk(ctrl, dimm_mask);
2137 param = result.param;
2138 dimm_mask = result.dimm_mask;
2141 dimm_mask = spd_set_dram_timing(ctrl, param , dimm_mask);
2147 /* Unrecoverable error reading SPD data */
2148 print_err("SPD error - reset\r\n");
2153 #if HW_MEM_HOLE_SIZEK != 0
2154 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
2157 uint32_t carry_over;
2159 uint32_t base, limit;
2164 carry_over = (4*1024*1024) - hole_startk;
2166 for(ii=controllers - 1;ii>i;ii--) {
2167 base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
2168 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2171 limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
2172 for(j = 0; j < controllers; j++) {
2173 pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit + (carry_over << 2));
2174 pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base + (carry_over << 2));
2177 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2178 for(j = 0; j < controllers; j++) {
2179 pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit + (carry_over << 2));
2182 base = pci_read_config32(dev, 0x40 + (i << 3));
2183 basek = (base & 0xffff0000) >> 2;
2184 if(basek == hole_startk) {
2185 //don't need set memhole here, because hole off set will be 0, overflow
2186 //so need to change base reg instead, new basek will be 4*1024*1024
2188 base |= (4*1024*1024)<<2;
2189 for(j = 0; j < controllers; j++) {
2190 pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
2194 hoist = /* hole start address */
2195 ((hole_startk << 10) & 0xff000000) +
2196 /* hole address to memory controller address */
2197 (((basek + carry_over) >> 6) & 0x0000ff00) +
2200 pci_write_config32(dev, 0xf0, hoist);
2206 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
2209 uint32_t hole_startk;
2212 hole_startk = 4*1024*1024 - HW_MEM_HOLE_SIZEK;
2214 #if HW_MEM_HOLE_SIZE_AUTO_INC == 1
2215 /* We need to double check if hole_startk is valid.
2216 * If it is equal to the dram base address in K (base_k),
2217 * we need to decrease it.
2220 for(i=0; i<controllers; i++) {
2223 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2224 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2227 base_k = (base & 0xffff0000) >> 2;
2228 if(base_k == hole_startk) {
2229 /* decrease memory hole startk to make sure it is
2230 * in the middle of the previous node
2232 hole_startk -= (base_k - basek_pri)>>1;
2233 break; /* only one hole */
2239 /* Find node number that needs the memory hole configured */
2240 for(i=0; i<controllers; i++) {
2241 uint32_t base, limit;
2242 unsigned base_k, limit_k;
2243 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2244 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2247 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2248 base_k = (base & 0xffff0000) >> 2;
2249 limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
2250 if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
2252 hoist_memory(controllers, ctrl, hole_startk, i);
2253 end_k = memory_end_k(ctrl, controllers);
2254 set_top_mem(end_k, hole_startk);
2255 break; /* only one hole */
2263 #define TIMEOUT_LOOPS 300000
2264 #if RAMINIT_SYSINFO == 1
2265 static void sdram_enable(int controllers, const struct mem_controller *ctrl, struct sys_info *sysinfo)
2267 static void sdram_enable(int controllers, const struct mem_controller *ctrl)
2272 /* Error if I don't have memory */
2273 if (memory_end_k(ctrl, controllers) == 0) {
2274 die("No memory\r\n");
2277 /* Before enabling memory start the memory clocks */
2278 for(i = 0; i < controllers; i++) {
2280 if (!controller_present(ctrl + i))
2282 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2283 if (dch & (DCH_MEMCLK_EN0|DCH_MEMCLK_EN1|DCH_MEMCLK_EN2|DCH_MEMCLK_EN3)) {
2284 dch |= DCH_MEMCLK_VALID;
2285 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2288 /* Disable dram receivers */
2290 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2291 dcl |= DCL_DisInRcvrs;
2292 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2296 /* And if necessary toggle the the reset on the dimms by hand */
2297 memreset(controllers, ctrl);
2299 /* We need to wait a mimmium of 20 MEMCLKS to enable the InitDram */
2301 for(i = 0; i < controllers; i++) {
2303 if (!controller_present(ctrl + i))
2305 /* Skip everything if I don't have any memory on this controller */
2306 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2307 if (!(dch & DCH_MEMCLK_VALID)) {
2311 /* Toggle DisDqsHys to get it working */
2312 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2313 if (dcl & DCL_DimmEccEn) {
2315 print_spew("ECC enabled\r\n");
2316 mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
2318 if (dcl & DCL_128BitEn) {
2319 mnc |= MNC_CHIPKILL_EN;
2321 pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
2323 dcl |= DCL_DisDqsHys;
2324 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2325 dcl &= ~DCL_DisDqsHys;
2326 dcl &= ~DCL_DLL_Disable;
2329 dcl |= DCL_DramInit;
2330 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2333 for(i = 0; i < controllers; i++) {
2335 if (!controller_present(ctrl + i))
2337 /* Skip everything if I don't have any memory on this controller */
2338 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2339 if (!(dch & DCH_MEMCLK_VALID)) {
2343 print_debug("Initializing memory: ");
2347 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2349 if ((loops & 1023) == 0) {
2352 } while(((dcl & DCL_DramInit) != 0) && (loops < TIMEOUT_LOOPS));
2353 if (loops >= TIMEOUT_LOOPS) {
2354 print_debug(" failed\r\n");
2358 if (!is_cpu_pre_c0()) {
2359 /* Wait until it is safe to touch memory */
2360 dcl &= ~(DCL_MemClrStatus | DCL_DramEnable);
2361 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2363 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2364 } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
2367 print_debug(" done\r\n");
2370 #if HW_MEM_HOLE_SIZEK != 0
2371 // init hw mem hole here
2372 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
2373 if(!is_cpu_pre_e0())
2374 set_hw_mem_hole(controllers, ctrl);
2377 //FIXME add enable node interleaving here -- yhlu
2379 1. check how many nodes we have , if not all has ram installed get out
2380 2. check cs_base lo is 0, node 0 f2 0x40,,,,, if any one is not using lo is CS_BASE, get out
2381 3. check if other node is the same as node 0 about f2 0x40,,,,, otherwise get out
2382 4. if all ready enable node_interleaving in f1 0x40..... of every node
2383 5. for node interleaving we need to set mem hole to every node ( need recalcute hole offset in f0 for every node)
2386 #if USE_DCACHE_RAM == 0
2387 /* Make certain the first 1M of memory is intialized */
2388 print_debug("Clearing initial memory region: ");
2390 /* Use write combine caching while we setup the first 1M */
2391 cache_lbmem(MTRR_TYPE_WRCOMB);
2393 /* clear memory 1meg */
2394 clear_memory((void *)0, CONFIG_LB_MEM_TOPK << 10);
2396 /* The first 1M is now setup, use it */
2397 cache_lbmem(MTRR_TYPE_WRBACK);
2399 print_debug(" done\r\n");
2403 #if USE_DCACHE_RAM == 1
2404 static void set_sysinfo_in_ram(unsigned val)
2408 static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a, const uint16_t *spd_addr)
2412 struct mem_controller *ctrl;
2413 for(i=0;i<controllers; i++) {
2416 ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
2417 ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
2418 ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
2419 ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
2421 if(spd_addr == (void *)0) continue;
2423 for(j=0;j<DIMM_SOCKETS;j++) {
2424 ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
2425 ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];