1 /* This should be done by Eric
2 2004.11 yhlu add 4 rank DIMM support
3 2004.12 yhlu add D0 support
4 2005.02 yhlu add E0 memory hole support
7 #include <cpu/x86/cache.h>
8 #include <cpu/x86/mtrr.h>
13 #if CONFIG_HAVE_OPTION_TABLE
14 #include "option_table.h"
17 #if (CONFIG_RAMTOP & (CONFIG_RAMTOP -1)) != 0
18 # error "CONFIG_RAMTOP must be a power of 2"
21 #ifndef QRANK_DIMM_SUPPORT
22 #define QRANK_DIMM_SUPPORT 0
25 void setup_resource_map(const unsigned int *register_values, int max)
28 // printk(BIOS_DEBUG, "setting up resource map....");
29 for (i = 0; i < max; i += 3) {
33 dev = register_values[i] & ~0xfff;
34 where = register_values[i] & 0xfff;
35 reg = pci_read_config32(dev, where);
36 reg &= register_values[i+1];
37 reg |= register_values[i+2];
38 pci_write_config32(dev, where, reg);
40 // printk(BIOS_DEBUG, "done.\n");
43 static int controller_present(const struct mem_controller *ctrl)
45 return pci_read_config32(ctrl->f0, 0) == 0x11001022;
48 #if CONFIG_RAMINIT_SYSINFO
49 static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
51 static void sdram_set_registers(const struct mem_controller *ctrl)
54 static const unsigned int register_values[] = {
56 /* Careful set limit registers before base registers which
57 contain the enables */
58 /* DRAM Limit i Registers
67 * [ 2: 0] Destination Node ID
77 * [10: 8] Interleave select
78 * specifies the values of A[14:12] to use with interleave enable.
80 * [31:16] DRAM Limit Address i Bits 39-24
81 * This field defines the upper address bits of a 40 bit address
82 * that define the end of the DRAM region.
84 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
85 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
86 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
87 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
88 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
89 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
90 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
91 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
92 /* DRAM Base i Registers
101 * [ 0: 0] Read Enable
104 * [ 1: 1] Write Enable
105 * 0 = Writes Disabled
108 * [10: 8] Interleave Enable
109 * 000 = No interleave
110 * 001 = Interleave on A[12] (2 nodes)
112 * 011 = Interleave on A[12] and A[14] (4 nodes)
116 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
118 * [13:16] DRAM Base Address i Bits 39-24
119 * This field defines the upper address bits of a 40-bit address
120 * that define the start of the DRAM region.
122 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
123 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
124 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
125 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
126 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
127 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
128 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
129 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
131 /* DRAM CS Base Address i Registers
140 * [ 0: 0] Chip-Select Bank Enable
144 * [15: 9] Base Address (19-13)
145 * An optimization used when all DIMM are the same size...
147 * [31:21] Base Address (35-25)
148 * This field defines the top 11 addresses bit of a 40-bit
149 * address that define the memory address space. These
150 * bits decode 32-MByte blocks of memory.
152 PCI_ADDR(0, 0x18, 2, 0x40), 0x001f01fe, 0x00000000,
153 PCI_ADDR(0, 0x18, 2, 0x44), 0x001f01fe, 0x00000000,
154 PCI_ADDR(0, 0x18, 2, 0x48), 0x001f01fe, 0x00000000,
155 PCI_ADDR(0, 0x18, 2, 0x4C), 0x001f01fe, 0x00000000,
156 PCI_ADDR(0, 0x18, 2, 0x50), 0x001f01fe, 0x00000000,
157 PCI_ADDR(0, 0x18, 2, 0x54), 0x001f01fe, 0x00000000,
158 PCI_ADDR(0, 0x18, 2, 0x58), 0x001f01fe, 0x00000000,
159 PCI_ADDR(0, 0x18, 2, 0x5C), 0x001f01fe, 0x00000000,
160 /* DRAM CS Mask Address i Registers
169 * Select bits to exclude from comparison with the DRAM Base address register.
171 * [15: 9] Address Mask (19-13)
172 * Address to be excluded from the optimized case
174 * [29:21] Address Mask (33-25)
175 * The bits with an address mask of 1 are excluded from address comparison
179 PCI_ADDR(0, 0x18, 2, 0x60), 0xC01f01ff, 0x00000000,
180 PCI_ADDR(0, 0x18, 2, 0x64), 0xC01f01ff, 0x00000000,
181 PCI_ADDR(0, 0x18, 2, 0x68), 0xC01f01ff, 0x00000000,
182 PCI_ADDR(0, 0x18, 2, 0x6C), 0xC01f01ff, 0x00000000,
183 PCI_ADDR(0, 0x18, 2, 0x70), 0xC01f01ff, 0x00000000,
184 PCI_ADDR(0, 0x18, 2, 0x74), 0xC01f01ff, 0x00000000,
185 PCI_ADDR(0, 0x18, 2, 0x78), 0xC01f01ff, 0x00000000,
186 PCI_ADDR(0, 0x18, 2, 0x7C), 0xC01f01ff, 0x00000000,
187 /* DRAM Bank Address Mapping Register
189 * Specify the memory module size
194 * 000 = 32Mbyte (Rows = 12 & Col = 8)
195 * 001 = 64Mbyte (Rows = 12 & Col = 9)
196 * 010 = 128Mbyte (Rows = 13 & Col = 9)|(Rows = 12 & Col = 10)
197 * 011 = 256Mbyte (Rows = 13 & Col = 10)|(Rows = 12 & Col = 11)
198 * 100 = 512Mbyte (Rows = 13 & Col = 11)|(Rows = 14 & Col = 10)
199 * 101 = 1Gbyte (Rows = 14 & Col = 11)|(Rows = 13 & Col = 12)
200 * 110 = 2Gbyte (Rows = 14 & Col = 12)
207 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff8888, 0x00000000,
208 /* DRAM Timing Low Register
210 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
220 * [ 7: 4] Trc (Row Cycle Time, Ras#-active to Ras#-active/bank auto refresh)
221 * 0000 = 7 bus clocks
222 * 0001 = 8 bus clocks
224 * 1110 = 21 bus clocks
225 * 1111 = 22 bus clocks
226 * [11: 8] Trfc (Row refresh Cycle time, Auto-refresh-active to RAS#-active or RAS#auto-refresh)
227 * 0000 = 9 bus clocks
228 * 0010 = 10 bus clocks
230 * 1110 = 23 bus clocks
231 * 1111 = 24 bus clocks
232 * [14:12] Trcd (Ras#-active to Case#-read/write Delay)
242 * [18:16] Trrd (Ras# to Ras# Delay)
252 * [23:20] Tras (Minmum Ras# Active Time)
253 * 0000 to 0100 = reserved
254 * 0101 = 5 bus clocks
256 * 1111 = 15 bus clocks
257 * [26:24] Trp (Row Precharge Time)
267 * [28:28] Twr (Write Recovery Time)
272 PCI_ADDR(0, 0x18, 2, 0x88), 0xe8088008, 0x02522001 /* 0x03623125 */ ,
273 /* DRAM Timing High Register
275 * [ 0: 0] Twtr (Write to Read Delay)
279 * [ 6: 4] Trwt (Read to Write Delay)
289 * [12: 8] Tref (Refresh Rate)
290 * 00000 = 100Mhz 4K rows
291 * 00001 = 133Mhz 4K rows
292 * 00010 = 166Mhz 4K rows
293 * 00011 = 200Mhz 4K rows
294 * 01000 = 100Mhz 8K/16K rows
295 * 01001 = 133Mhz 8K/16K rows
296 * 01010 = 166Mhz 8K/16K rows
297 * 01011 = 200Mhz 8K/16K rows
299 * [22:20] Twcl (Write CAS Latency)
300 * 000 = 1 Mem clock after CAS# (Unbuffered Dimms)
301 * 001 = 2 Mem clocks after CAS# (Registered Dimms)
304 PCI_ADDR(0, 0x18, 2, 0x8c), 0xff8fe08e, (0 << 20)|(0 << 8)|(0 << 4)|(0 << 0),
305 /* DRAM Config Low Register
307 * [ 0: 0] DLL Disable
316 * [ 3: 3] Disable DQS Hystersis (FIXME handle this one carefully)
317 * 0 = Enable DQS input filter
318 * 1 = Disable DQS input filtering
321 * 0 = Initialization done or not yet started.
322 * 1 = Initiate DRAM intialization sequence
323 * [ 9: 9] SO-Dimm Enable
325 * 1 = SO-Dimms present
327 * 0 = DRAM not enabled
328 * 1 = DRAM initialized and enabled
329 * [11:11] Memory Clear Status
330 * 0 = Memory Clear function has not completed
331 * 1 = Memory Clear function has completed
332 * [12:12] Exit Self-Refresh
333 * 0 = Exit from self-refresh done or not yet started
334 * 1 = DRAM exiting from self refresh
335 * [13:13] Self-Refresh Status
336 * 0 = Normal Operation
337 * 1 = Self-refresh mode active
338 * [15:14] Read/Write Queue Bypass Count
343 * [16:16] 128-bit/64-Bit
344 * 0 = 64bit Interface to DRAM
345 * 1 = 128bit Interface to DRAM
346 * [17:17] DIMM ECC Enable
347 * 0 = Some DIMMs do not have ECC
348 * 1 = ALL DIMMS have ECC bits
349 * [18:18] UnBuffered DIMMs
351 * 1 = Unbuffered DIMMS
352 * [19:19] Enable 32-Byte Granularity
353 * 0 = Optimize for 64byte bursts
354 * 1 = Optimize for 32byte bursts
355 * [20:20] DIMM 0 is x4
356 * [21:21] DIMM 1 is x4
357 * [22:22] DIMM 2 is x4
358 * [23:23] DIMM 3 is x4
360 * 1 = x4 DIMM present
361 * [24:24] Disable DRAM Receivers
362 * 0 = Receivers enabled
363 * 1 = Receivers disabled
365 * 000 = Arbiters chois is always respected
366 * 001 = Oldest entry in DCQ can be bypassed 1 time
367 * 010 = Oldest entry in DCQ can be bypassed 2 times
368 * 011 = Oldest entry in DCQ can be bypassed 3 times
369 * 100 = Oldest entry in DCQ can be bypassed 4 times
370 * 101 = Oldest entry in DCQ can be bypassed 5 times
371 * 110 = Oldest entry in DCQ can be bypassed 6 times
372 * 111 = Oldest entry in DCQ can be bypassed 7 times
375 PCI_ADDR(0, 0x18, 2, 0x90), 0xf0000000,
377 (0 << 23)|(0 << 22)|(0 << 21)|(0 << 20)|
378 (1 << 19)|(0 << 18)|(1 << 17)|(0 << 16)|
379 (2 << 14)|(0 << 13)|(0 << 12)|
380 (0 << 11)|(0 << 10)|(0 << 9)|(0 << 8)|
381 (0 << 3) |(0 << 1) |(0 << 0),
382 /* DRAM Config High Register
384 * [ 0: 3] Maximum Asynchronous Latency
389 * [11: 8] Read Preamble
407 * [18:16] Idle Cycle Limit
416 * [19:19] Dynamic Idle Cycle Center Enable
417 * 0 = Use Idle Cycle Limit
418 * 1 = Generate a dynamic Idle cycle limit
419 * [22:20] DRAM MEMCLK Frequency
429 * [25:25] Memory Clock Ratio Valid (FIXME carefully enable memclk)
430 * 0 = Disable MemClks
432 * [26:26] Memory Clock 0 Enable
435 * [27:27] Memory Clock 1 Enable
438 * [28:28] Memory Clock 2 Enable
441 * [29:29] Memory Clock 3 Enable
446 PCI_ADDR(0, 0x18, 2, 0x94), 0xc180f0f0,
447 (0 << 29)|(0 << 28)|(0 << 27)|(0 << 26)|(0 << 25)|
448 (0 << 20)|(0 << 19)|(DCH_IDLE_LIMIT_16 << 16)|(0 << 8)|(0 << 0),
449 /* DRAM Delay Line Register
451 * Adjust the skew of the input DQS strobe relative to DATA
453 * [23:16] Delay Line Adjust
454 * Adjusts the DLL derived PDL delay by one or more delay stages
455 * in either the faster or slower direction.
456 * [24:24} Adjust Slower
458 * 1 = Adj is used to increase the PDL delay
459 * [25:25] Adjust Faster
461 * 1 = Adj is used to decrease the PDL delay
464 PCI_ADDR(0, 0x18, 2, 0x98), 0xfc00ffff, 0x00000000,
465 /* MCA NB Status Low reg */
466 PCI_ADDR(0, 0x18, 3, 0x48), 0x00f00000, 0x00000000,
467 /* MCA NB Status high reg */
468 PCI_ADDR(0, 0x18, 3, 0x4c), 0x01801e8c, 0x00000000,
469 /* MCA NB address Low reg */
470 PCI_ADDR(0, 0x18, 3, 0x50), 0x00000007, 0x00000000,
471 /* MCA NB address high reg */
472 PCI_ADDR(0, 0x18, 3, 0x54), 0xffffff00, 0x00000000,
473 /* DRAM Scrub Control Register
475 * [ 4: 0] DRAM Scrube Rate
477 * [12: 8] L2 Scrub Rate
479 * [20:16] Dcache Scrub
482 * 00000 = Do not scrub
504 * All Others = Reserved
506 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
507 /* DRAM Scrub Address Low Register
509 * [ 0: 0] DRAM Scrubber Redirect Enable
511 * 1 = Scrubber Corrects errors found in normal operation
513 * [31: 6] DRAM Scrub Address 31-6
515 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
516 /* DRAM Scrub Address High Register
518 * [ 7: 0] DRAM Scrubb Address 39-32
521 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
526 if (!controller_present(ctrl)) {
527 // printk(BIOS_DEBUG, "No memory controller present\n");
530 printk(BIOS_SPEW, "setting up CPU%02x northbridge registers\n", ctrl->node_id);
531 max = ARRAY_SIZE(register_values);
532 for (i = 0; i < max; i += 3) {
536 dev = (register_values[i] & ~0xfff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
537 where = register_values[i] & 0xfff;
538 reg = pci_read_config32(dev, where);
539 reg &= register_values[i+1];
540 reg |= register_values[i+2];
541 pci_write_config32(dev, where, reg);
543 printk(BIOS_SPEW, "done.\n");
546 static void hw_enable_ecc(const struct mem_controller *ctrl)
549 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
550 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
551 dcl &= ~DCL_DimmEccEn;
552 if (nbcap & NBCAP_ECC) {
553 dcl |= DCL_DimmEccEn;
555 if (read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
556 dcl &= ~DCL_DimmEccEn;
558 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
561 static int is_dual_channel(const struct mem_controller *ctrl)
564 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
565 return dcl & DCL_128BitEn;
568 static int is_opteron(const struct mem_controller *ctrl)
570 /* Test to see if I am an Opteron. Socket 939 based Athlon64
571 * have dual channel capability, too, so we need a better test
573 * However, all code uses is_opteron() to find out whether to
574 * use dual channel, so if we really check for opteron here, we
575 * need to fix up all code using this function, too.
578 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
579 return !!(nbcap & NBCAP_128Bit);
582 static int is_registered(const struct mem_controller *ctrl)
584 /* Test to see if we are dealing with registered SDRAM.
585 * If we are not registered we are unbuffered.
586 * This function must be called after spd_handle_unbuffered_dimms.
589 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
590 return !(dcl & DCL_UnBuffDimm);
598 #if QRANK_DIMM_SUPPORT == 1
603 static struct dimm_size spd_get_dimm_size(unsigned device)
605 /* Calculate the log base 2 size of a DIMM in bits */
612 #if QRANK_DIMM_SUPPORT == 1
616 /* Note it might be easier to use byte 31 here, it has the DIMM size as
617 * a multiple of 4MB. The way we do it now we can size both
618 * sides of an assymetric dimm.
620 value = spd_read_byte(device, 3); /* rows */
621 if (value < 0) goto hw_err;
622 if ((value & 0xf) == 0) goto val_err;
623 sz.side1 += value & 0xf;
624 sz.rows = value & 0xf;
626 value = spd_read_byte(device, 4); /* columns */
627 if (value < 0) goto hw_err;
628 if ((value & 0xf) == 0) goto val_err;
629 sz.side1 += value & 0xf;
630 sz.col = value & 0xf;
632 value = spd_read_byte(device, 17); /* banks */
633 if (value < 0) goto hw_err;
634 if ((value & 0xff) == 0) goto val_err;
635 sz.side1 += log2(value & 0xff);
637 /* Get the module data width and convert it to a power of two */
638 value = spd_read_byte(device, 7); /* (high byte) */
639 if (value < 0) goto hw_err;
643 low = spd_read_byte(device, 6); /* (low byte) */
644 if (low < 0) goto hw_err;
645 value = value | (low & 0xff);
646 if ((value != 72) && (value != 64)) goto val_err;
647 sz.side1 += log2(value);
650 value = spd_read_byte(device, 5); /* number of physical banks */
651 if (value < 0) goto hw_err;
652 if (value == 1) goto out;
653 if ((value != 2) && (value != 4 )) {
656 #if QRANK_DIMM_SUPPORT == 1
660 /* Start with the symmetrical case */
663 value = spd_read_byte(device, 3); /* rows */
664 if (value < 0) goto hw_err;
665 if ((value & 0xf0) == 0) goto out; /* If symmetrical we are done */
666 sz.side2 -= (value & 0x0f); /* Subtract out rows on side 1 */
667 sz.side2 += ((value >> 4) & 0x0f); /* Add in rows on side 2 */
669 value = spd_read_byte(device, 4); /* columns */
670 if (value < 0) goto hw_err;
671 if ((value & 0xff) == 0) goto val_err;
672 sz.side2 -= (value & 0x0f); /* Subtract out columns on side 1 */
673 sz.side2 += ((value >> 4) & 0x0f); /* Add in columsn on side 2 */
678 die("Bad SPD value\n");
679 /* If an hw_error occurs report that I have no memory */
685 #if QRANK_DIMM_SUPPORT == 1
693 static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
695 uint32_t base0, base1;
698 if (sz.side1 != sz.side2) {
702 /* For each base register.
703 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
704 * The initialize dimm size is in bits.
705 * Set the base enable bit0.
710 /* Make certain side1 of the dimm is at least 32MB */
711 if (sz.side1 >= (25 +3)) {
712 base0 = (1 << ((sz.side1 - (25 + 3)) + 21)) | 1;
715 /* Make certain side2 of the dimm is at least 32MB */
716 if (sz.side2 >= (25 + 3)) {
717 base1 = (1 << ((sz.side2 - (25 + 3)) + 21)) | 1;
720 /* Double the size if we are using dual channel memory */
721 if (is_dual_channel(ctrl)) {
722 base0 = (base0 << 1) | (base0 & 1);
723 base1 = (base1 << 1) | (base1 & 1);
726 /* Clear the reserved bits */
727 base0 &= ~0x001ffffe;
728 base1 &= ~0x001ffffe;
730 /* Set the appropriate DIMM base address register */
731 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
732 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
733 #if QRANK_DIMM_SUPPORT == 1
735 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
736 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
740 /* Enable the memory clocks for this DIMM */
742 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
743 dch |= DCH_MEMCLK_EN0 << index;
744 #if QRANK_DIMM_SUPPORT == 1
746 dch |= DCH_MEMCLK_EN0 << (index + 2);
749 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
753 static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
755 static const unsigned cs_map_aa[] = {
756 /* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
764 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
765 map &= ~(0xf << (index * 4));
766 #if QRANK_DIMM_SUPPORT == 1
768 map &= ~(0xf << ( (index + 2) * 4));
773 /* Make certain side1 of the dimm is at least 32MB */
774 if (sz.side1 >= (25 +3)) {
775 if (is_cpu_pre_d0()) {
776 map |= (sz.side1 - (25 + 3)) << (index *4);
777 #if QRANK_DIMM_SUPPORT == 1
779 map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
784 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
785 #if QRANK_DIMM_SUPPORT == 1
787 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
793 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
797 static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
801 for (i = 0; i < DIMM_SOCKETS; i++) {
803 if (!(dimm_mask & (1 << i))) {
806 sz = spd_get_dimm_size(ctrl->channel0[i]);
808 return -1; /* Report SPD error */
810 set_dimm_size(ctrl, sz, i);
811 set_dimm_map (ctrl, sz, i);
816 static void route_dram_accesses(const struct mem_controller *ctrl,
817 unsigned long base_k, unsigned long limit_k)
819 /* Route the addresses to the controller node */
824 unsigned limit_reg, base_reg;
827 node_id = ctrl->node_id;
828 index = (node_id << 3);
829 limit = (limit_k << 2);
832 limit |= ( 0 << 8) | (node_id << 0);
833 base = (base_k << 2);
835 base |= (0 << 8) | (1<<1) | (1<<0);
837 limit_reg = 0x44 + index;
838 base_reg = 0x40 + index;
839 for (device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1); device += PCI_DEV(0, 1, 0)) {
840 pci_write_config32(device, limit_reg, limit);
841 pci_write_config32(device, base_reg, base);
845 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
847 /* Error if I don't have memory */
852 /* Report the amount of memory. */
853 printk(BIOS_DEBUG, "RAM end at 0x%08x kB\n", tom_k);
855 /* Now set top of memory */
857 if (tom_k > (4*1024*1024)) {
858 printk(BIOS_SPEW, "Handling memory mapped above 4 GB\n");
859 printk(BIOS_SPEW, "Upper RAM end at 0x%08x kB\n", tom_k);
860 msr.lo = (tom_k & 0x003fffff) << 10;
861 msr.hi = (tom_k & 0xffc00000) >> 22;
862 wrmsr(TOP_MEM2, msr);
863 printk(BIOS_SPEW, "Correcting memory amount mapped below 4 GB\n");
866 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
867 * so I can see my rom chip and other I/O devices.
869 if (tom_k >= 0x003f0000) {
870 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
871 if (hole_startk != 0) {
876 printk(BIOS_SPEW, "Adjusting lower RAM end\n");
878 printk(BIOS_SPEW, "Lower RAM end at 0x%08x kB\n", tom_k);
879 msr.lo = (tom_k & 0x003fffff) << 10;
880 msr.hi = (tom_k & 0xffc00000) >> 22;
884 static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
887 static const uint8_t csbase_low_shift[] = {
890 /* 128MB */ (14 - 4),
891 /* 256MB */ (15 - 4),
892 /* 512MB */ (15 - 4),
897 static const uint8_t csbase_low_d0_shift[] = {
900 /* 128MB */ (14 - 4),
901 /* 128MB */ (15 - 4),
902 /* 256MB */ (15 - 4),
903 /* 512MB */ (15 - 4),
904 /* 256MB */ (16 - 4),
905 /* 512MB */ (16 - 4),
911 /* cs_base_high is not changed */
914 int chip_selects, index;
916 unsigned common_size;
917 unsigned common_cs_mode;
918 uint32_t csbase, csmask;
920 /* See if all of the memory chip selects are the same size
921 * and if so count them.
926 for (index = 0; index < 8; index++) {
931 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
939 if (common_size == 0) {
942 /* The size differed fail */
943 if (common_size != size) {
947 value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
948 cs_mode =( value >> ((index>>1)*4)) & 0xf;
949 if (cs_mode == 0 ) continue;
950 if (common_cs_mode == 0) {
951 common_cs_mode = cs_mode;
953 /* The cs_mode differed fail */
954 if (common_cs_mode != cs_mode) {
959 /* Chip selects can only be interleaved when there is
960 * more than one and their is a power of two of them.
962 bits = log2(chip_selects);
963 if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) {
967 /* Find the bits of csbase that we need to interleave on */
968 if (is_cpu_pre_d0()){
969 csbase_inc = 1 << csbase_low_shift[common_cs_mode];
970 if (is_dual_channel(ctrl)) {
971 /* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
972 if ((bits == 3) && (common_size == (1 << (32 - 3)))) {
973 // printk(BIOS_DEBUG, "8 4GB chip selects cannot be interleaved\n");
980 csbase_inc = 1 << csbase_low_d0_shift[common_cs_mode];
981 if (is_dual_channel(ctrl)) {
982 if ( (bits==3) && (common_cs_mode > 8)) {
983 // printk(BIOS_DEBUG, "8 cs_mode>8 chip selects cannot be interleaved\n");
990 /* Compute the initial values for csbase and csbask.
991 * In csbase just set the enable bit and the base to zero.
992 * In csmask set the mask bits for the size and page level interleave.
995 csmask = (((common_size << bits) - 1) << 21);
996 csmask |= 0xfe00 & ~((csbase_inc << bits) - csbase_inc);
997 for (index = 0; index < 8; index++) {
1000 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1001 /* Is it enabled? */
1005 pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
1006 pci_write_config32(ctrl->f2, DRAM_CSMASK + (index << 2), csmask);
1007 csbase += csbase_inc;
1010 printk(BIOS_SPEW, "Interleaved\n");
1012 /* Return the memory size in K */
1013 return common_size << (15 + bits);
1016 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
1020 /* Remember which registers we have used in the high 8 bits of tom */
1023 /* Find the largest remaining candidate */
1024 unsigned index, candidate;
1025 uint32_t csbase, csmask;
1029 for (index = 0; index < 8; index++) {
1031 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1033 /* Is it enabled? */
1038 /* Is it greater? */
1039 if (value <= csbase) {
1043 /* Has it already been selected */
1044 if (tom & (1 << (index + 24))) {
1047 /* I have a new candidate */
1052 /* See if I have found a new candidate */
1057 /* Remember the dimm size */
1058 size = csbase >> 21;
1060 /* Remember I have used this register */
1061 tom |= (1 << (candidate + 24));
1063 /* Recompute the cs base register value */
1064 csbase = (tom << 21) | 1;
1066 /* Increment the top of memory */
1069 /* Compute the memory mask */
1070 csmask = ((size -1) << 21);
1071 csmask |= 0xfe00; /* For now don't optimize */
1073 /* Write the new base register */
1074 pci_write_config32(ctrl->f2, DRAM_CSBASE + (candidate << 2), csbase);
1075 /* Write the new mask register */
1076 pci_write_config32(ctrl->f2, DRAM_CSMASK + (candidate << 2), csmask);
1079 /* Return the memory size in K */
1080 return (tom & ~0xff000000) << 15;
1083 static unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
1087 /* Find the last memory address used */
1089 for (node_id = 0; node_id < max_node_id; node_id++) {
1090 uint32_t limit, base;
1092 index = node_id << 3;
1093 base = pci_read_config32(ctrl->f1, 0x40 + index);
1094 /* Only look at the limit if the base is enabled */
1095 if ((base & 3) == 3) {
1096 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1097 end_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1103 static void order_dimms(const struct mem_controller *ctrl)
1105 unsigned long tom_k, base_k;
1107 if (read_option(CMOS_VSTART_interleave_chip_selects, CMOS_VLEN_interleave_chip_selects, 1) != 0) {
1108 tom_k = interleave_chip_selects(ctrl);
1110 printk(BIOS_DEBUG, "Interleaving disabled\n");
1115 tom_k = order_chip_selects(ctrl);
1118 /* Compute the memory base address */
1119 base_k = memory_end_k(ctrl, ctrl->node_id);
1121 route_dram_accesses(ctrl, base_k, tom_k);
1122 set_top_mem(tom_k, 0);
1125 static long disable_dimm(const struct mem_controller *ctrl, unsigned index, long dimm_mask)
1127 printk(BIOS_DEBUG, "disabling dimm %02x\n", index);
1128 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), 0);
1129 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), 0);
1130 dimm_mask &= ~(1 << index);
1134 static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl,
1140 int has_dualch = is_opteron(ctrl);
1144 for (i = 0; (i < DIMM_SOCKETS); i++) {
1146 if (!(dimm_mask & (1 << i))) {
1149 value = spd_read_byte(ctrl->channel0[i], 21);
1154 /* Registered dimm ? */
1155 if (value & (1 << 1)) {
1158 /* Otherwise it must be an unbuffered dimm */
1163 if (unbuffered && registered) {
1164 die("Mixed buffered and registered dimms not supported");
1167 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1168 dcl &= ~DCL_UnBuffDimm;
1170 if ((has_dualch) && (!is_cpu_pre_d0())) {
1171 dcl |= DCL_UnBuffDimm; /* set DCL_DualDIMMen too? */
1173 /* set DCL_En2T if you have non-equal DDR mem types! */
1175 if ((cpuid_eax(1) & 0x30) == 0x30) {
1176 /* CS[7:4] is copy of CS[3:0], should be set for 939 socket */
1177 dcl |= DCL_UpperCSMap;
1180 dcl |= DCL_UnBuffDimm;
1183 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1185 if (is_registered(ctrl)) {
1186 printk(BIOS_SPEW, "Registered\n");
1188 printk(BIOS_SPEW, "Unbuffered\n");
1194 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
1199 for (i = 0; i < DIMM_SOCKETS; i++) {
1202 device = ctrl->channel0[i];
1204 byte = spd_read_byte(ctrl->channel0[i], 2); /* Type */
1206 dimm_mask |= (1 << i);
1209 device = ctrl->channel1[i];
1211 byte = spd_read_byte(ctrl->channel1[i], 2);
1213 dimm_mask |= (1 << (i + DIMM_SOCKETS));
1220 static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_mask)
1224 /* SPD addresses to verify are identical */
1225 static const uint8_t addresses[] = {
1226 2, /* Type should be DDR SDRAM */
1227 3, /* *Row addresses */
1228 4, /* *Column addresses */
1229 5, /* *Physical Banks */
1230 6, /* *Module Data Width low */
1231 7, /* *Module Data Width high */
1232 9, /* *Cycle time at highest CAS Latency CL=X */
1233 11, /* *SDRAM Type */
1234 13, /* *SDRAM Width */
1235 17, /* *Logical Banks */
1236 18, /* *Supported CAS Latencies */
1237 21, /* *SDRAM Module Attributes */
1238 23, /* *Cycle time at CAS Latnecy (CLX - 0.5) */
1239 26, /* *Cycle time at CAS Latnecy (CLX - 1.0) */
1240 27, /* *tRP Row precharge time */
1241 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1242 29, /* *tRCD RAS to CAS */
1243 30, /* *tRAS Activate to Precharge */
1244 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1245 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1247 /* If the dimms are not in pairs do not do dual channels */
1248 if ((dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1249 ((dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1250 goto single_channel;
1252 /* If the cpu is not capable of doing dual channels don't do dual channels */
1253 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1254 if (!(nbcap & NBCAP_128Bit)) {
1255 goto single_channel;
1257 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1258 unsigned device0, device1;
1261 /* If I don't have a dimm skip this one */
1262 if (!(dimm_mask & (1 << i))) {
1265 device0 = ctrl->channel0[i];
1266 device1 = ctrl->channel1[i];
1267 for (j = 0; j < ARRAY_SIZE(addresses); j++) {
1269 addr = addresses[j];
1270 value0 = spd_read_byte(device0, addr);
1274 value1 = spd_read_byte(device1, addr);
1278 if (value0 != value1) {
1279 goto single_channel;
1283 printk(BIOS_SPEW, "Enabling dual channel memory\n");
1285 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1286 dcl &= ~DCL_32ByteEn;
1287 dcl |= DCL_128BitEn;
1288 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1291 dimm_mask &= ~((1 << (DIMM_SOCKETS *2)) - (1 << DIMM_SOCKETS));
1297 uint8_t divisor; /* In 1/2 ns increments */
1300 uint32_t dch_memclk;
1301 uint16_t dch_tref4k, dch_tref8k;
1304 uint8_t dtl_trwt[3][3]; /* first index is CAS_LAT 2/2.5/3 and 128/registered64/64 */
1305 uint8_t rdpreamble[4]; /* 0 is for registered, 1 for 1-2 DIMMS, 2 and 3 for 3 or 4 unreg dimm slots */
1309 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1311 static const struct mem_param speed[] = {
1315 .divisor = (10 <<1),
1318 .dch_memclk = DCH_MEMCLK_100MHZ << DCH_MEMCLK_SHIFT,
1319 .dch_tref4k = DTH_TREF_100MHZ_4K,
1320 .dch_tref8k = DTH_TREF_100MHZ_8K,
1323 .dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1324 .rdpreamble = { ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0) }
1329 .divisor = (7<<1)+1,
1332 .dch_memclk = DCH_MEMCLK_133MHZ << DCH_MEMCLK_SHIFT,
1333 .dch_tref4k = DTH_TREF_133MHZ_4K,
1334 .dch_tref8k = DTH_TREF_133MHZ_8K,
1337 .dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1338 .rdpreamble = { ((8 << 1) + 0), ((7 << 1) + 0), ((7 << 1) + 1), ((7 << 1) + 0) }
1346 .dch_memclk = DCH_MEMCLK_166MHZ << DCH_MEMCLK_SHIFT,
1347 .dch_tref4k = DTH_TREF_166MHZ_4K,
1348 .dch_tref8k = DTH_TREF_166MHZ_8K,
1351 .dtl_trwt = { { 3, 2, 3 }, { 3, 3, 4 }, { 4, 3, 4 }},
1352 .rdpreamble = { ((7 << 1) + 1), ((6 << 1) + 0), ((6 << 1) + 1), ((6 << 1) + 0) }
1360 .dch_memclk = DCH_MEMCLK_200MHZ << DCH_MEMCLK_SHIFT,
1361 .dch_tref4k = DTH_TREF_200MHZ_4K,
1362 .dch_tref8k = DTH_TREF_200MHZ_8K,
1365 .dtl_trwt = { { 0, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1366 .rdpreamble = { ((7 << 1) + 0), ((5 << 1) + 0), ((5 << 1) + 1), ((5 << 1) + 1) }
1372 const struct mem_param *param;
1373 for (param = &speed[0]; param->cycle_time ; param++) {
1374 if (min_cycle_time > (param+1)->cycle_time) {
1378 if (!param->cycle_time) {
1379 die("min_cycle_time to low");
1381 printk(BIOS_SPEW, "%s\n", param->name);
1385 struct spd_set_memclk_result {
1386 const struct mem_param *param;
1389 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask)
1391 /* Compute the minimum cycle time for these dimms */
1392 struct spd_set_memclk_result result;
1393 unsigned min_cycle_time, min_latency, bios_cycle_time;
1397 static const uint8_t latency_indicies[] = { 26, 23, 9 };
1398 static const unsigned char min_cycle_times[] = {
1399 [NBCAP_MEMCLK_200MHZ] = 0x50, /* 5ns */
1400 [NBCAP_MEMCLK_166MHZ] = 0x60, /* 6ns */
1401 [NBCAP_MEMCLK_133MHZ] = 0x75, /* 7.5ns */
1402 [NBCAP_MEMCLK_100MHZ] = 0xa0, /* 10ns */
1405 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1407 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1408 bios_cycle_time = min_cycle_times[
1409 read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
1410 if (bios_cycle_time > min_cycle_time) {
1411 min_cycle_time = bios_cycle_time;
1415 /* Compute the least latency with the fastest clock supported
1416 * by both the memory controller and the dimms.
1418 for (i = 0; i < DIMM_SOCKETS; i++) {
1419 int new_cycle_time, new_latency;
1424 if (!(dimm_mask & (1 << i))) {
1428 /* First find the supported CAS latencies
1429 * Byte 18 for DDR SDRAM is interpreted:
1430 * bit 0 == CAS Latency = 1.0
1431 * bit 1 == CAS Latency = 1.5
1432 * bit 2 == CAS Latency = 2.0
1433 * bit 3 == CAS Latency = 2.5
1434 * bit 4 == CAS Latency = 3.0
1435 * bit 5 == CAS Latency = 3.5
1439 new_cycle_time = 0xa0;
1442 latencies = spd_read_byte(ctrl->channel0[i], 18);
1443 if (latencies <= 0) continue;
1445 /* Compute the lowest cas latency supported */
1446 latency = log2(latencies) -2;
1448 /* Loop through and find a fast clock with a low latency */
1449 for (index = 0; index < 3; index++, latency++) {
1451 if ((latency < 2) || (latency > 4) ||
1452 (!(latencies & (1 << latency)))) {
1455 spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1456 if (spd_value < 0) {
1460 /* Only increase the latency if we decreas the clock */
1461 if ((spd_value >= min_cycle_time) && (spd_value < new_cycle_time)) {
1462 new_cycle_time = spd_value;
1463 new_latency = latency;
1466 if (new_latency > 4){
1469 /* Does min_latency need to be increased? */
1470 if (new_cycle_time > min_cycle_time) {
1471 min_cycle_time = new_cycle_time;
1473 /* Does min_cycle_time need to be increased? */
1474 if (new_latency > min_latency) {
1475 min_latency = new_latency;
1478 /* Make a second pass through the dimms and disable
1479 * any that cannot support the selected memclk and cas latency.
1482 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1487 if (!(dimm_mask & (1 << i))) {
1491 latencies = spd_read_byte(ctrl->channel0[i], 18);
1492 if (latencies < 0) goto hw_error;
1493 if (latencies == 0) {
1497 /* Compute the lowest cas latency supported */
1498 latency = log2(latencies) -2;
1500 /* Walk through searching for the selected latency */
1501 for (index = 0; index < 3; index++, latency++) {
1502 if (!(latencies & (1 << latency))) {
1505 if (latency == min_latency)
1508 /* If I can't find the latency or my index is bad error */
1509 if ((latency != min_latency) || (index >= 3)) {
1513 /* Read the min_cycle_time for this latency */
1514 spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1515 if (spd_value < 0) goto hw_error;
1517 /* All is good if the selected clock speed
1518 * is what I need or slower.
1520 if (spd_value <= min_cycle_time) {
1523 /* Otherwise I have an error, disable the dimm */
1525 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
1528 //down speed for full load 4 rank support
1529 #if QRANK_DIMM_SUPPORT
1530 if (dimm_mask == (3|(3<<DIMM_SOCKETS)) ) {
1532 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1534 if (!(dimm_mask & (1 << i))) {
1537 val = spd_read_byte(ctrl->channel0[i], 5);
1544 if (min_cycle_time <= 0x50 ) {
1545 min_cycle_time = 0x60;
1552 /* Now that I know the minimum cycle time lookup the memory parameters */
1553 result.param = get_mem_param(min_cycle_time);
1555 /* Update DRAM Config High with our selected memory speed */
1556 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1557 value &= ~(DCH_MEMCLK_MASK << DCH_MEMCLK_SHIFT);
1559 /* Improves DQS centering by correcting for case when core speed multiplier and MEMCLK speed result in odd clock divisor, by selecting the next lowest memory speed, required only at DDR400 and higher speeds with certain DIMM loadings ---- cheating???*/
1560 if (!is_cpu_pre_e0()) {
1561 if (min_cycle_time==0x50) {
1567 value |= result.param->dch_memclk;
1568 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1570 static const unsigned latencies[] = { DTL_CL_2, DTL_CL_2_5, DTL_CL_3 };
1572 /* Update DRAM Timing Low with our selected cas latency */
1573 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1574 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1575 value |= latencies[min_latency - 2] << DTL_TCL_SHIFT;
1576 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1578 result.dimm_mask = dimm_mask;
1581 result.param = (const struct mem_param *)0;
1582 result.dimm_mask = -1;
1587 static int update_dimm_Trc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1589 unsigned clocks, old_clocks;
1592 value = spd_read_byte(ctrl->channel0[i], 41);
1593 if (value < 0) return -1;
1594 if ((value == 0) || (value == 0xff)) {
1597 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1598 if (clocks < DTL_TRC_MIN) {
1599 clocks = DTL_TRC_MIN;
1601 if (clocks > DTL_TRC_MAX) {
1605 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1606 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
1607 if (old_clocks > clocks) {
1608 clocks = old_clocks;
1610 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
1611 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
1612 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1616 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1618 unsigned clocks, old_clocks;
1621 value = spd_read_byte(ctrl->channel0[i], 42);
1622 if (value < 0) return -1;
1623 if ((value == 0) || (value == 0xff)) {
1624 value = param->tRFC;
1626 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1627 if (clocks < DTL_TRFC_MIN) {
1628 clocks = DTL_TRFC_MIN;
1630 if (clocks > DTL_TRFC_MAX) {
1633 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1634 old_clocks = ((dtl >> DTL_TRFC_SHIFT) & DTL_TRFC_MASK) + DTL_TRFC_BASE;
1635 if (old_clocks > clocks) {
1636 clocks = old_clocks;
1638 dtl &= ~(DTL_TRFC_MASK << DTL_TRFC_SHIFT);
1639 dtl |= ((clocks - DTL_TRFC_BASE) << DTL_TRFC_SHIFT);
1640 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1645 static int update_dimm_Trcd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1647 unsigned clocks, old_clocks;
1650 value = spd_read_byte(ctrl->channel0[i], 29);
1651 if (value < 0) return -1;
1652 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1653 if (clocks < DTL_TRCD_MIN) {
1654 clocks = DTL_TRCD_MIN;
1656 if (clocks > DTL_TRCD_MAX) {
1659 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1660 old_clocks = ((dtl >> DTL_TRCD_SHIFT) & DTL_TRCD_MASK) + DTL_TRCD_BASE;
1661 if (old_clocks > clocks) {
1662 clocks = old_clocks;
1664 dtl &= ~(DTL_TRCD_MASK << DTL_TRCD_SHIFT);
1665 dtl |= ((clocks - DTL_TRCD_BASE) << DTL_TRCD_SHIFT);
1666 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1670 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1672 unsigned clocks, old_clocks;
1675 value = spd_read_byte(ctrl->channel0[i], 28);
1676 if (value < 0) return -1;
1677 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1678 if (clocks < DTL_TRRD_MIN) {
1679 clocks = DTL_TRRD_MIN;
1681 if (clocks > DTL_TRRD_MAX) {
1684 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1685 old_clocks = ((dtl >> DTL_TRRD_SHIFT) & DTL_TRRD_MASK) + DTL_TRRD_BASE;
1686 if (old_clocks > clocks) {
1687 clocks = old_clocks;
1689 dtl &= ~(DTL_TRRD_MASK << DTL_TRRD_SHIFT);
1690 dtl |= ((clocks - DTL_TRRD_BASE) << DTL_TRRD_SHIFT);
1691 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1695 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1697 unsigned clocks, old_clocks;
1700 value = spd_read_byte(ctrl->channel0[i], 30);
1701 if (value < 0) return -1;
1702 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1703 if (clocks < DTL_TRAS_MIN) {
1704 clocks = DTL_TRAS_MIN;
1706 if (clocks > DTL_TRAS_MAX) {
1709 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1710 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
1711 if (old_clocks > clocks) {
1712 clocks = old_clocks;
1714 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
1715 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
1716 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1720 static int update_dimm_Trp(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1722 unsigned clocks, old_clocks;
1725 value = spd_read_byte(ctrl->channel0[i], 27);
1726 if (value < 0) return -1;
1727 clocks = (value + (param->divisor << 1) - 1)/(param->divisor << 1);
1728 if (clocks < DTL_TRP_MIN) {
1729 clocks = DTL_TRP_MIN;
1731 if (clocks > DTL_TRP_MAX) {
1734 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1735 old_clocks = ((dtl >> DTL_TRP_SHIFT) & DTL_TRP_MASK) + DTL_TRP_BASE;
1736 if (old_clocks > clocks) {
1737 clocks = old_clocks;
1739 dtl &= ~(DTL_TRP_MASK << DTL_TRP_SHIFT);
1740 dtl |= ((clocks - DTL_TRP_BASE) << DTL_TRP_SHIFT);
1741 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1745 static void set_Twr(const struct mem_controller *ctrl, const struct mem_param *param)
1748 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1749 dtl &= ~(DTL_TWR_MASK << DTL_TWR_SHIFT);
1750 dtl |= (param->dtl_twr - DTL_TWR_BASE) << DTL_TWR_SHIFT;
1751 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1755 static void init_Tref(const struct mem_controller *ctrl, const struct mem_param *param)
1758 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1759 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1760 dth |= (param->dch_tref4k << DTH_TREF_SHIFT);
1761 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1764 static int update_dimm_Tref(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1768 unsigned tref, old_tref;
1769 value = spd_read_byte(ctrl->channel0[i], 3);
1770 if (value < 0) return -1;
1773 tref = param->dch_tref8k;
1775 tref = param->dch_tref4k;
1778 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1779 old_tref = (dth >> DTH_TREF_SHIFT) & DTH_TREF_MASK;
1780 if ((value == 12) && (old_tref == param->dch_tref4k)) {
1781 tref = param->dch_tref4k;
1783 tref = param->dch_tref8k;
1785 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1786 dth |= (tref << DTH_TREF_SHIFT);
1787 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1792 static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1796 #if QRANK_DIMM_SUPPORT == 1
1800 value = spd_read_byte(ctrl->channel0[i], 13);
1805 #if QRANK_DIMM_SUPPORT == 1
1806 rank = spd_read_byte(ctrl->channel0[i], 5); /* number of physical banks */
1812 dimm = 1<<(DCL_x4DIMM_SHIFT+i);
1813 #if QRANK_DIMM_SUPPORT == 1
1815 dimm |= 1<<(DCL_x4DIMM_SHIFT+i+2);
1818 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1823 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1827 static int update_dimm_ecc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1831 value = spd_read_byte(ctrl->channel0[i], 11);
1836 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1837 dcl &= ~DCL_DimmEccEn;
1838 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1843 static int count_dimms(const struct mem_controller *ctrl)
1848 for (index = 0; index < 8; index += 2) {
1850 csbase = pci_read_config32(ctrl->f2, (DRAM_CSBASE + (index << 2)));
1858 static void set_Twtr(const struct mem_controller *ctrl, const struct mem_param *param)
1862 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1863 dth &= ~(DTH_TWTR_MASK << DTH_TWTR_SHIFT);
1864 dth |= ((param->dtl_twtr - DTH_TWTR_BASE) << DTH_TWTR_SHIFT);
1865 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1868 static void set_Trwt(const struct mem_controller *ctrl, const struct mem_param *param)
1876 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1877 latency = (dtl >> DTL_TCL_SHIFT) & DTL_TCL_MASK;
1879 if (is_opteron(ctrl)) {
1880 mtype = 0; /* dual channel */
1881 } else if (is_registered(ctrl)) {
1882 mtype = 1; /* registered 64bit interface */
1884 mtype = 2; /* unbuffered 64bit interface */
1898 die("Unknown LAT for Trwt");
1901 clocks = param->dtl_trwt[lat][mtype];
1902 if ((clocks < DTH_TRWT_MIN) || (clocks > DTH_TRWT_MAX)) {
1903 die("Unknown Trwt\n");
1906 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1907 dth &= ~(DTH_TRWT_MASK << DTH_TRWT_SHIFT);
1908 dth |= ((clocks - DTH_TRWT_BASE) << DTH_TRWT_SHIFT);
1909 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1913 static void set_Twcl(const struct mem_controller *ctrl, const struct mem_param *param)
1915 /* Memory Clocks after CAS# */
1918 if (is_registered(ctrl)) {
1923 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1924 dth &= ~(DTH_TWCL_MASK << DTH_TWCL_SHIFT);
1925 dth |= ((clocks - DTH_TWCL_BASE) << DTH_TWCL_SHIFT);
1926 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1930 static void set_read_preamble(const struct mem_controller *ctrl, const struct mem_param *param)
1933 unsigned rdpreamble;
1938 for (i = 0; i < 4; i++) {
1939 if (ctrl->channel0[i]) {
1944 /* map to index to param.rdpreamble array */
1945 if (is_registered(ctrl)) {
1947 } else if (slots < 3) {
1949 } else if (slots == 3) {
1951 } else if (slots == 4) {
1954 die("Unknown rdpreamble for this nr of slots");
1957 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1958 dch &= ~(DCH_RDPREAMBLE_MASK << DCH_RDPREAMBLE_SHIFT);
1959 rdpreamble = param->rdpreamble[i];
1961 if ((rdpreamble < DCH_RDPREAMBLE_MIN) || (rdpreamble > DCH_RDPREAMBLE_MAX)) {
1962 die("Unknown rdpreamble");
1965 dch |= (rdpreamble - DCH_RDPREAMBLE_BASE) << DCH_RDPREAMBLE_SHIFT;
1966 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
1969 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
1975 dimms = count_dimms(ctrl);
1977 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1978 dch &= ~(DCH_ASYNC_LAT_MASK << DCH_ASYNC_LAT_SHIFT);
1980 if (is_registered(ctrl)) {
1992 die("Too many unbuffered dimms");
1994 else if (dimms == 3) {
2003 dch |= ((async_lat - DCH_ASYNC_LAT_BASE) << DCH_ASYNC_LAT_SHIFT);
2004 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2007 static void set_idle_cycle_limit(const struct mem_controller *ctrl, const struct mem_param *param)
2010 /* AMD says to Hardcode this */
2011 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2012 dch &= ~(DCH_IDLE_LIMIT_MASK << DCH_IDLE_LIMIT_SHIFT);
2013 dch |= DCH_IDLE_LIMIT_16 << DCH_IDLE_LIMIT_SHIFT;
2014 dch |= DCH_DYN_IDLE_CTR_EN;
2015 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2018 static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param, long dimm_mask)
2022 init_Tref(ctrl, param);
2023 for (i = 0; i < DIMM_SOCKETS; i++) {
2025 if (!(dimm_mask & (1 << i))) {
2028 /* DRAM Timing Low Register */
2029 if ((rc = update_dimm_Trc (ctrl, param, i)) <= 0) goto dimm_err;
2030 if ((rc = update_dimm_Trfc(ctrl, param, i)) <= 0) goto dimm_err;
2031 if ((rc = update_dimm_Trcd(ctrl, param, i)) <= 0) goto dimm_err;
2032 if ((rc = update_dimm_Trrd(ctrl, param, i)) <= 0) goto dimm_err;
2033 if ((rc = update_dimm_Tras(ctrl, param, i)) <= 0) goto dimm_err;
2034 if ((rc = update_dimm_Trp (ctrl, param, i)) <= 0) goto dimm_err;
2036 /* DRAM Timing High Register */
2037 if ((rc = update_dimm_Tref(ctrl, param, i)) <= 0) goto dimm_err;
2040 /* DRAM Config Low */
2041 if ((rc = update_dimm_x4 (ctrl, param, i)) <= 0) goto dimm_err;
2042 if ((rc = update_dimm_ecc(ctrl, param, i)) <= 0) goto dimm_err;
2048 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
2050 /* DRAM Timing Low Register */
2051 set_Twr(ctrl, param);
2053 /* DRAM Timing High Register */
2054 set_Twtr(ctrl, param);
2055 set_Trwt(ctrl, param);
2056 set_Twcl(ctrl, param);
2058 /* DRAM Config High */
2059 set_read_preamble(ctrl, param);
2060 set_max_async_latency(ctrl, param);
2061 set_idle_cycle_limit(ctrl, param);
2065 #if CONFIG_RAMINIT_SYSINFO
2066 static void sdram_set_spd_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
2068 static void sdram_set_spd_registers(const struct mem_controller *ctrl)
2071 struct spd_set_memclk_result result;
2072 const struct mem_param *param;
2075 if (!controller_present(ctrl)) {
2076 // printk(BIOS_DEBUG, "No memory controller present\n");
2080 hw_enable_ecc(ctrl);
2081 activate_spd_rom(ctrl);
2082 dimm_mask = spd_detect_dimms(ctrl);
2083 if (!(dimm_mask & ((1 << DIMM_SOCKETS) - 1))) {
2084 printk(BIOS_DEBUG, "No memory for this cpu\n");
2087 dimm_mask = spd_enable_2channels(ctrl, dimm_mask);
2090 dimm_mask = spd_set_ram_size(ctrl , dimm_mask);
2093 dimm_mask = spd_handle_unbuffered_dimms(ctrl, dimm_mask);
2096 result = spd_set_memclk(ctrl, dimm_mask);
2097 param = result.param;
2098 dimm_mask = result.dimm_mask;
2101 dimm_mask = spd_set_dram_timing(ctrl, param , dimm_mask);
2107 /* Unrecoverable error reading SPD data */
2108 printk(BIOS_ERR, "SPD error - reset\n");
2113 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
2114 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
2117 uint32_t carry_over;
2119 uint32_t base, limit;
2124 carry_over = (4*1024*1024) - hole_startk;
2126 for (ii=controllers - 1;ii>i;ii--) {
2127 base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
2128 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2131 limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
2132 for (j = 0; j < controllers; j++) {
2133 pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit + (carry_over << 2));
2134 pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base + (carry_over << 2));
2137 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2138 for (j = 0; j < controllers; j++) {
2139 pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit + (carry_over << 2));
2142 base = pci_read_config32(dev, 0x40 + (i << 3));
2143 basek = (base & 0xffff0000) >> 2;
2144 if (basek == hole_startk) {
2145 //don't need set memhole here, because hole off set will be 0, overflow
2146 //so need to change base reg instead, new basek will be 4*1024*1024
2148 base |= (4*1024*1024)<<2;
2149 for (j = 0; j < controllers; j++) {
2150 pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
2154 hoist = /* hole start address */
2155 ((hole_startk << 10) & 0xff000000) +
2156 /* hole address to memory controller address */
2157 (((basek + carry_over) >> 6) & 0x0000ff00) +
2160 pci_write_config32(dev, 0xf0, hoist);
2166 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
2169 uint32_t hole_startk;
2172 hole_startk = 4*1024*1024 - CONFIG_HW_MEM_HOLE_SIZEK;
2174 printk(BIOS_SPEW, "Handling memory hole at 0x%08x (default)\n", hole_startk);
2175 #if CONFIG_HW_MEM_HOLE_SIZE_AUTO_INC == 1
2176 /* We need to double check if hole_startk is valid.
2177 * If it is equal to the dram base address in K (base_k),
2178 * we need to decrease it.
2181 for (i=0; i<controllers; i++) {
2184 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2185 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2188 base_k = (base & 0xffff0000) >> 2;
2189 if (base_k == hole_startk) {
2190 /* decrease memory hole startk to make sure it is
2191 * in the middle of the previous node
2193 hole_startk -= (base_k - basek_pri)>>1;
2194 break; /* only one hole */
2199 printk(BIOS_SPEW, "Handling memory hole at 0x%08x (adjusted)\n", hole_startk);
2201 /* Find node number that needs the memory hole configured */
2202 for (i=0; i<controllers; i++) {
2203 uint32_t base, limit;
2204 unsigned base_k, limit_k;
2205 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2206 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2209 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2210 base_k = (base & 0xffff0000) >> 2;
2211 limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
2212 if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
2214 hoist_memory(controllers, ctrl, hole_startk, i);
2215 end_k = memory_end_k(ctrl, controllers);
2216 set_top_mem(end_k, hole_startk);
2217 break; /* only one hole */
2225 #define TIMEOUT_LOOPS 300000
2226 #if CONFIG_RAMINIT_SYSINFO
2227 static void sdram_enable(int controllers, const struct mem_controller *ctrl, struct sys_info *sysinfo)
2229 static void sdram_enable(int controllers, const struct mem_controller *ctrl)
2234 /* Error if I don't have memory */
2235 if (memory_end_k(ctrl, controllers) == 0) {
2239 /* Before enabling memory start the memory clocks */
2240 for (i = 0; i < controllers; i++) {
2242 if (!controller_present(ctrl + i))
2244 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2245 if (dch & (DCH_MEMCLK_EN0|DCH_MEMCLK_EN1|DCH_MEMCLK_EN2|DCH_MEMCLK_EN3)) {
2246 dch |= DCH_MEMCLK_VALID;
2247 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2250 /* Disable dram receivers */
2252 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2253 dcl |= DCL_DisInRcvrs;
2254 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2258 /* We need to wait a minimum of 20 MEMCLKS to enable the InitDram */
2259 /* And if necessary toggle the the reset on the dimms by hand */
2260 memreset(controllers, ctrl);
2262 for (i = 0; i < controllers; i++) {
2264 if (!controller_present(ctrl + i))
2266 /* Skip everything if I don't have any memory on this controller */
2267 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2268 if (!(dch & DCH_MEMCLK_VALID)) {
2272 /* Toggle DisDqsHys to get it working */
2273 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2274 if (dcl & DCL_DimmEccEn) {
2276 printk(BIOS_SPEW, "ECC enabled\n");
2277 mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
2279 if (dcl & DCL_128BitEn) {
2280 mnc |= MNC_CHIPKILL_EN;
2282 pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
2284 dcl |= DCL_DisDqsHys;
2285 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2286 dcl &= ~DCL_DisDqsHys;
2287 dcl &= ~DCL_DLL_Disable;
2290 dcl |= DCL_DramInit;
2291 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2294 for (i = 0; i < controllers; i++) {
2296 if (!controller_present(ctrl + i))
2298 /* Skip everything if I don't have any memory on this controller */
2299 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2300 if (!(dch & DCH_MEMCLK_VALID)) {
2304 printk(BIOS_DEBUG, "Initializing memory: ");
2307 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2309 if ((loops & 1023) == 0) {
2310 printk(BIOS_DEBUG, ".");
2312 } while(((dcl & DCL_DramInit) != 0) && (loops < TIMEOUT_LOOPS));
2313 if (loops >= TIMEOUT_LOOPS) {
2314 printk(BIOS_DEBUG, " failed\n");
2318 if (!is_cpu_pre_c0()) {
2319 /* Wait until it is safe to touch memory */
2320 dcl &= ~(DCL_MemClrStatus | DCL_DramEnable);
2321 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2323 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2324 } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
2327 printk(BIOS_DEBUG, " done\n");
2330 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
2331 // init hw mem hole here
2332 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
2333 if (!is_cpu_pre_e0())
2334 set_hw_mem_hole(controllers, ctrl);
2337 //FIXME add enable node interleaving here -- yhlu
2339 1. check how many nodes we have , if not all has ram installed get out
2340 2. check cs_base lo is 0, node 0 f2 0x40,,,,, if any one is not using lo is CS_BASE, get out
2341 3. check if other node is the same as node 0 about f2 0x40,,,,, otherwise get out
2342 4. if all ready enable node_interleaving in f1 0x40..... of every node
2343 5. for node interleaving we need to set mem hole to every node ( need recalcute hole offset in f0 for every node)
2348 static void set_sysinfo_in_ram(unsigned val)
2352 void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
2353 const uint16_t *spd_addr)
2357 struct mem_controller *ctrl;
2358 for (i=0;i<controllers; i++) {
2361 ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
2362 ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
2363 ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
2364 ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
2366 if (spd_addr == (void *)0) continue;
2368 for (j=0;j<DIMM_SOCKETS;j++) {
2369 ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
2370 ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];