1 /* This should be done by Eric
2 2004.11 yhlu add 4 rank DIMM support
3 2004.12 yhlu add D0 support
4 2005.02 yhlu add E0 memory hole support
7 #include <cpu/x86/cache.h>
8 #include <cpu/x86/mtrr.h>
14 #if (CONFIG_RAMTOP & (CONFIG_RAMTOP -1)) != 0
15 # error "CONFIG_RAMTOP must be a power of 2"
18 #ifndef QRANK_DIMM_SUPPORT
19 #define QRANK_DIMM_SUPPORT 0
22 static void setup_resource_map(const unsigned int *register_values, int max)
25 // printk(BIOS_DEBUG, "setting up resource map....");
26 for (i = 0; i < max; i += 3) {
30 dev = register_values[i] & ~0xfff;
31 where = register_values[i] & 0xfff;
32 reg = pci_read_config32(dev, where);
33 reg &= register_values[i+1];
34 reg |= register_values[i+2];
35 pci_write_config32(dev, where, reg);
37 // printk(BIOS_DEBUG, "done.\n");
40 static int controller_present(const struct mem_controller *ctrl)
42 return pci_read_config32(ctrl->f0, 0) == 0x11001022;
45 #if RAMINIT_SYSINFO==1
46 static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
48 static void sdram_set_registers(const struct mem_controller *ctrl)
51 static const unsigned int register_values[] = {
53 /* Careful set limit registers before base registers which
54 contain the enables */
55 /* DRAM Limit i Registers
64 * [ 2: 0] Destination Node ID
74 * [10: 8] Interleave select
75 * specifies the values of A[14:12] to use with interleave enable.
77 * [31:16] DRAM Limit Address i Bits 39-24
78 * This field defines the upper address bits of a 40 bit address
79 * that define the end of the DRAM region.
81 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
82 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
83 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
84 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
85 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
86 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
87 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
88 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
89 /* DRAM Base i Registers
101 * [ 1: 1] Write Enable
102 * 0 = Writes Disabled
105 * [10: 8] Interleave Enable
106 * 000 = No interleave
107 * 001 = Interleave on A[12] (2 nodes)
109 * 011 = Interleave on A[12] and A[14] (4 nodes)
113 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
115 * [13:16] DRAM Base Address i Bits 39-24
116 * This field defines the upper address bits of a 40-bit address
117 * that define the start of the DRAM region.
119 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
120 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
121 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
122 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
123 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
124 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
125 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
126 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
128 /* DRAM CS Base Address i Registers
137 * [ 0: 0] Chip-Select Bank Enable
141 * [15: 9] Base Address (19-13)
142 * An optimization used when all DIMM are the same size...
144 * [31:21] Base Address (35-25)
145 * This field defines the top 11 addresses bit of a 40-bit
146 * address that define the memory address space. These
147 * bits decode 32-MByte blocks of memory.
149 PCI_ADDR(0, 0x18, 2, 0x40), 0x001f01fe, 0x00000000,
150 PCI_ADDR(0, 0x18, 2, 0x44), 0x001f01fe, 0x00000000,
151 PCI_ADDR(0, 0x18, 2, 0x48), 0x001f01fe, 0x00000000,
152 PCI_ADDR(0, 0x18, 2, 0x4C), 0x001f01fe, 0x00000000,
153 PCI_ADDR(0, 0x18, 2, 0x50), 0x001f01fe, 0x00000000,
154 PCI_ADDR(0, 0x18, 2, 0x54), 0x001f01fe, 0x00000000,
155 PCI_ADDR(0, 0x18, 2, 0x58), 0x001f01fe, 0x00000000,
156 PCI_ADDR(0, 0x18, 2, 0x5C), 0x001f01fe, 0x00000000,
157 /* DRAM CS Mask Address i Registers
166 * Select bits to exclude from comparison with the DRAM Base address register.
168 * [15: 9] Address Mask (19-13)
169 * Address to be excluded from the optimized case
171 * [29:21] Address Mask (33-25)
172 * The bits with an address mask of 1 are excluded from address comparison
176 PCI_ADDR(0, 0x18, 2, 0x60), 0xC01f01ff, 0x00000000,
177 PCI_ADDR(0, 0x18, 2, 0x64), 0xC01f01ff, 0x00000000,
178 PCI_ADDR(0, 0x18, 2, 0x68), 0xC01f01ff, 0x00000000,
179 PCI_ADDR(0, 0x18, 2, 0x6C), 0xC01f01ff, 0x00000000,
180 PCI_ADDR(0, 0x18, 2, 0x70), 0xC01f01ff, 0x00000000,
181 PCI_ADDR(0, 0x18, 2, 0x74), 0xC01f01ff, 0x00000000,
182 PCI_ADDR(0, 0x18, 2, 0x78), 0xC01f01ff, 0x00000000,
183 PCI_ADDR(0, 0x18, 2, 0x7C), 0xC01f01ff, 0x00000000,
184 /* DRAM Bank Address Mapping Register
186 * Specify the memory module size
191 * 000 = 32Mbyte (Rows = 12 & Col = 8)
192 * 001 = 64Mbyte (Rows = 12 & Col = 9)
193 * 010 = 128Mbyte (Rows = 13 & Col = 9)|(Rows = 12 & Col = 10)
194 * 011 = 256Mbyte (Rows = 13 & Col = 10)|(Rows = 12 & Col = 11)
195 * 100 = 512Mbyte (Rows = 13 & Col = 11)|(Rows = 14 & Col = 10)
196 * 101 = 1Gbyte (Rows = 14 & Col = 11)|(Rows = 13 & Col = 12)
197 * 110 = 2Gbyte (Rows = 14 & Col = 12)
204 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff8888, 0x00000000,
205 /* DRAM Timing Low Register
207 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
217 * [ 7: 4] Trc (Row Cycle Time, Ras#-active to Ras#-active/bank auto refresh)
218 * 0000 = 7 bus clocks
219 * 0001 = 8 bus clocks
221 * 1110 = 21 bus clocks
222 * 1111 = 22 bus clocks
223 * [11: 8] Trfc (Row refresh Cycle time, Auto-refresh-active to RAS#-active or RAS#auto-refresh)
224 * 0000 = 9 bus clocks
225 * 0010 = 10 bus clocks
227 * 1110 = 23 bus clocks
228 * 1111 = 24 bus clocks
229 * [14:12] Trcd (Ras#-active to Case#-read/write Delay)
239 * [18:16] Trrd (Ras# to Ras# Delay)
249 * [23:20] Tras (Minmum Ras# Active Time)
250 * 0000 to 0100 = reserved
251 * 0101 = 5 bus clocks
253 * 1111 = 15 bus clocks
254 * [26:24] Trp (Row Precharge Time)
264 * [28:28] Twr (Write Recovery Time)
269 PCI_ADDR(0, 0x18, 2, 0x88), 0xe8088008, 0x02522001 /* 0x03623125 */ ,
270 /* DRAM Timing High Register
272 * [ 0: 0] Twtr (Write to Read Delay)
276 * [ 6: 4] Trwt (Read to Write Delay)
286 * [12: 8] Tref (Refresh Rate)
287 * 00000 = 100Mhz 4K rows
288 * 00001 = 133Mhz 4K rows
289 * 00010 = 166Mhz 4K rows
290 * 00011 = 200Mhz 4K rows
291 * 01000 = 100Mhz 8K/16K rows
292 * 01001 = 133Mhz 8K/16K rows
293 * 01010 = 166Mhz 8K/16K rows
294 * 01011 = 200Mhz 8K/16K rows
296 * [22:20] Twcl (Write CAS Latency)
297 * 000 = 1 Mem clock after CAS# (Unbuffered Dimms)
298 * 001 = 2 Mem clocks after CAS# (Registered Dimms)
301 PCI_ADDR(0, 0x18, 2, 0x8c), 0xff8fe08e, (0 << 20)|(0 << 8)|(0 << 4)|(0 << 0),
302 /* DRAM Config Low Register
304 * [ 0: 0] DLL Disable
313 * [ 3: 3] Disable DQS Hystersis (FIXME handle this one carefully)
314 * 0 = Enable DQS input filter
315 * 1 = Disable DQS input filtering
318 * 0 = Initialization done or not yet started.
319 * 1 = Initiate DRAM intialization sequence
320 * [ 9: 9] SO-Dimm Enable
322 * 1 = SO-Dimms present
324 * 0 = DRAM not enabled
325 * 1 = DRAM initialized and enabled
326 * [11:11] Memory Clear Status
327 * 0 = Memory Clear function has not completed
328 * 1 = Memory Clear function has completed
329 * [12:12] Exit Self-Refresh
330 * 0 = Exit from self-refresh done or not yet started
331 * 1 = DRAM exiting from self refresh
332 * [13:13] Self-Refresh Status
333 * 0 = Normal Operation
334 * 1 = Self-refresh mode active
335 * [15:14] Read/Write Queue Bypass Count
340 * [16:16] 128-bit/64-Bit
341 * 0 = 64bit Interface to DRAM
342 * 1 = 128bit Interface to DRAM
343 * [17:17] DIMM ECC Enable
344 * 0 = Some DIMMs do not have ECC
345 * 1 = ALL DIMMS have ECC bits
346 * [18:18] UnBuffered DIMMs
348 * 1 = Unbuffered DIMMS
349 * [19:19] Enable 32-Byte Granularity
350 * 0 = Optimize for 64byte bursts
351 * 1 = Optimize for 32byte bursts
352 * [20:20] DIMM 0 is x4
353 * [21:21] DIMM 1 is x4
354 * [22:22] DIMM 2 is x4
355 * [23:23] DIMM 3 is x4
357 * 1 = x4 DIMM present
358 * [24:24] Disable DRAM Receivers
359 * 0 = Receivers enabled
360 * 1 = Receivers disabled
362 * 000 = Arbiters chois is always respected
363 * 001 = Oldest entry in DCQ can be bypassed 1 time
364 * 010 = Oldest entry in DCQ can be bypassed 2 times
365 * 011 = Oldest entry in DCQ can be bypassed 3 times
366 * 100 = Oldest entry in DCQ can be bypassed 4 times
367 * 101 = Oldest entry in DCQ can be bypassed 5 times
368 * 110 = Oldest entry in DCQ can be bypassed 6 times
369 * 111 = Oldest entry in DCQ can be bypassed 7 times
372 PCI_ADDR(0, 0x18, 2, 0x90), 0xf0000000,
374 (0 << 23)|(0 << 22)|(0 << 21)|(0 << 20)|
375 (1 << 19)|(0 << 18)|(1 << 17)|(0 << 16)|
376 (2 << 14)|(0 << 13)|(0 << 12)|
377 (0 << 11)|(0 << 10)|(0 << 9)|(0 << 8)|
378 (0 << 3) |(0 << 1) |(0 << 0),
379 /* DRAM Config High Register
381 * [ 0: 3] Maximum Asynchronous Latency
386 * [11: 8] Read Preamble
404 * [18:16] Idle Cycle Limit
413 * [19:19] Dynamic Idle Cycle Center Enable
414 * 0 = Use Idle Cycle Limit
415 * 1 = Generate a dynamic Idle cycle limit
416 * [22:20] DRAM MEMCLK Frequency
426 * [25:25] Memory Clock Ratio Valid (FIXME carefully enable memclk)
427 * 0 = Disable MemClks
429 * [26:26] Memory Clock 0 Enable
432 * [27:27] Memory Clock 1 Enable
435 * [28:28] Memory Clock 2 Enable
438 * [29:29] Memory Clock 3 Enable
443 PCI_ADDR(0, 0x18, 2, 0x94), 0xc180f0f0,
444 (0 << 29)|(0 << 28)|(0 << 27)|(0 << 26)|(0 << 25)|
445 (0 << 20)|(0 << 19)|(DCH_IDLE_LIMIT_16 << 16)|(0 << 8)|(0 << 0),
446 /* DRAM Delay Line Register
448 * Adjust the skew of the input DQS strobe relative to DATA
450 * [23:16] Delay Line Adjust
451 * Adjusts the DLL derived PDL delay by one or more delay stages
452 * in either the faster or slower direction.
453 * [24:24} Adjust Slower
455 * 1 = Adj is used to increase the PDL delay
456 * [25:25] Adjust Faster
458 * 1 = Adj is used to decrease the PDL delay
461 PCI_ADDR(0, 0x18, 2, 0x98), 0xfc00ffff, 0x00000000,
462 /* MCA NB Status Low reg */
463 PCI_ADDR(0, 0x18, 3, 0x48), 0x00f00000, 0x00000000,
464 /* MCA NB Status high reg */
465 PCI_ADDR(0, 0x18, 3, 0x4c), 0x01801e8c, 0x00000000,
466 /* MCA NB address Low reg */
467 PCI_ADDR(0, 0x18, 3, 0x50), 0x00000007, 0x00000000,
468 /* MCA NB address high reg */
469 PCI_ADDR(0, 0x18, 3, 0x54), 0xffffff00, 0x00000000,
470 /* DRAM Scrub Control Register
472 * [ 4: 0] DRAM Scrube Rate
474 * [12: 8] L2 Scrub Rate
476 * [20:16] Dcache Scrub
479 * 00000 = Do not scrub
501 * All Others = Reserved
503 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
504 /* DRAM Scrub Address Low Register
506 * [ 0: 0] DRAM Scrubber Redirect Enable
508 * 1 = Scrubber Corrects errors found in normal operation
510 * [31: 6] DRAM Scrub Address 31-6
512 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
513 /* DRAM Scrub Address High Register
515 * [ 7: 0] DRAM Scrubb Address 39-32
518 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
523 if (!controller_present(ctrl)) {
524 // printk(BIOS_DEBUG, "No memory controller present\n");
527 printk(BIOS_SPEW, "setting up CPU%02x northbridge registers\n", ctrl->node_id);
528 max = ARRAY_SIZE(register_values);
529 for (i = 0; i < max; i += 3) {
533 dev = (register_values[i] & ~0xfff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
534 where = register_values[i] & 0xfff;
535 reg = pci_read_config32(dev, where);
536 reg &= register_values[i+1];
537 reg |= register_values[i+2];
538 pci_write_config32(dev, where, reg);
540 printk(BIOS_SPEW, "done.\n");
543 static void hw_enable_ecc(const struct mem_controller *ctrl)
546 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
547 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
548 dcl &= ~DCL_DimmEccEn;
549 if (nbcap & NBCAP_ECC) {
550 dcl |= DCL_DimmEccEn;
552 if (CONFIG_HAVE_OPTION_TABLE &&
553 read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
554 dcl &= ~DCL_DimmEccEn;
556 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
559 static int is_dual_channel(const struct mem_controller *ctrl)
562 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
563 return dcl & DCL_128BitEn;
566 static int is_opteron(const struct mem_controller *ctrl)
568 /* Test to see if I am an Opteron. Socket 939 based Athlon64
569 * have dual channel capability, too, so we need a better test
571 * However, all code uses is_opteron() to find out whether to
572 * use dual channel, so if we really check for opteron here, we
573 * need to fix up all code using this function, too.
576 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
577 return !!(nbcap & NBCAP_128Bit);
580 static int is_registered(const struct mem_controller *ctrl)
582 /* Test to see if we are dealing with registered SDRAM.
583 * If we are not registered we are unbuffered.
584 * This function must be called after spd_handle_unbuffered_dimms.
587 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
588 return !(dcl & DCL_UnBuffDimm);
596 #if QRANK_DIMM_SUPPORT == 1
601 static struct dimm_size spd_get_dimm_size(unsigned device)
603 /* Calculate the log base 2 size of a DIMM in bits */
610 #if QRANK_DIMM_SUPPORT == 1
614 /* Note it might be easier to use byte 31 here, it has the DIMM size as
615 * a multiple of 4MB. The way we do it now we can size both
616 * sides of an assymetric dimm.
618 value = spd_read_byte(device, 3); /* rows */
619 if (value < 0) goto hw_err;
620 if ((value & 0xf) == 0) goto val_err;
621 sz.side1 += value & 0xf;
622 sz.rows = value & 0xf;
624 value = spd_read_byte(device, 4); /* columns */
625 if (value < 0) goto hw_err;
626 if ((value & 0xf) == 0) goto val_err;
627 sz.side1 += value & 0xf;
628 sz.col = value & 0xf;
630 value = spd_read_byte(device, 17); /* banks */
631 if (value < 0) goto hw_err;
632 if ((value & 0xff) == 0) goto val_err;
633 sz.side1 += log2(value & 0xff);
635 /* Get the module data width and convert it to a power of two */
636 value = spd_read_byte(device, 7); /* (high byte) */
637 if (value < 0) goto hw_err;
641 low = spd_read_byte(device, 6); /* (low byte) */
642 if (low < 0) goto hw_err;
643 value = value | (low & 0xff);
644 if ((value != 72) && (value != 64)) goto val_err;
645 sz.side1 += log2(value);
648 value = spd_read_byte(device, 5); /* number of physical banks */
649 if (value < 0) goto hw_err;
650 if (value == 1) goto out;
651 if ((value != 2) && (value != 4 )) {
654 #if QRANK_DIMM_SUPPORT == 1
658 /* Start with the symmetrical case */
661 value = spd_read_byte(device, 3); /* rows */
662 if (value < 0) goto hw_err;
663 if ((value & 0xf0) == 0) goto out; /* If symmetrical we are done */
664 sz.side2 -= (value & 0x0f); /* Subtract out rows on side 1 */
665 sz.side2 += ((value >> 4) & 0x0f); /* Add in rows on side 2 */
667 value = spd_read_byte(device, 4); /* columns */
668 if (value < 0) goto hw_err;
669 if ((value & 0xff) == 0) goto val_err;
670 sz.side2 -= (value & 0x0f); /* Subtract out columns on side 1 */
671 sz.side2 += ((value >> 4) & 0x0f); /* Add in columsn on side 2 */
676 die("Bad SPD value\n");
677 /* If an hw_error occurs report that I have no memory */
683 #if QRANK_DIMM_SUPPORT == 1
691 static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
693 uint32_t base0, base1;
696 if (sz.side1 != sz.side2) {
700 /* For each base register.
701 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
702 * The initialize dimm size is in bits.
703 * Set the base enable bit0.
708 /* Make certain side1 of the dimm is at least 32MB */
709 if (sz.side1 >= (25 +3)) {
710 base0 = (1 << ((sz.side1 - (25 + 3)) + 21)) | 1;
713 /* Make certain side2 of the dimm is at least 32MB */
714 if (sz.side2 >= (25 + 3)) {
715 base1 = (1 << ((sz.side2 - (25 + 3)) + 21)) | 1;
718 /* Double the size if we are using dual channel memory */
719 if (is_dual_channel(ctrl)) {
720 base0 = (base0 << 1) | (base0 & 1);
721 base1 = (base1 << 1) | (base1 & 1);
724 /* Clear the reserved bits */
725 base0 &= ~0x001ffffe;
726 base1 &= ~0x001ffffe;
728 /* Set the appropriate DIMM base address register */
729 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
730 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
731 #if QRANK_DIMM_SUPPORT == 1
733 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
734 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
738 /* Enable the memory clocks for this DIMM */
740 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
741 dch |= DCH_MEMCLK_EN0 << index;
742 #if QRANK_DIMM_SUPPORT == 1
744 dch |= DCH_MEMCLK_EN0 << (index + 2);
747 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
751 static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
753 static const unsigned cs_map_aa[] = {
754 /* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
762 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
763 map &= ~(0xf << (index * 4));
764 #if QRANK_DIMM_SUPPORT == 1
766 map &= ~(0xf << ( (index + 2) * 4));
771 /* Make certain side1 of the dimm is at least 32MB */
772 if (sz.side1 >= (25 +3)) {
773 if (is_cpu_pre_d0()) {
774 map |= (sz.side1 - (25 + 3)) << (index *4);
775 #if QRANK_DIMM_SUPPORT == 1
777 map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
782 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
783 #if QRANK_DIMM_SUPPORT == 1
785 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
791 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
795 static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
799 for (i = 0; i < DIMM_SOCKETS; i++) {
801 if (!(dimm_mask & (1 << i))) {
804 sz = spd_get_dimm_size(ctrl->channel0[i]);
806 return -1; /* Report SPD error */
808 set_dimm_size(ctrl, sz, i);
809 set_dimm_map (ctrl, sz, i);
814 static void route_dram_accesses(const struct mem_controller *ctrl,
815 unsigned long base_k, unsigned long limit_k)
817 /* Route the addresses to the controller node */
822 unsigned limit_reg, base_reg;
825 node_id = ctrl->node_id;
826 index = (node_id << 3);
827 limit = (limit_k << 2);
830 limit |= ( 0 << 8) | (node_id << 0);
831 base = (base_k << 2);
833 base |= (0 << 8) | (1<<1) | (1<<0);
835 limit_reg = 0x44 + index;
836 base_reg = 0x40 + index;
837 for (device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1); device += PCI_DEV(0, 1, 0)) {
838 pci_write_config32(device, limit_reg, limit);
839 pci_write_config32(device, base_reg, base);
843 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
845 /* Error if I don't have memory */
850 /* Report the amount of memory. */
851 printk(BIOS_DEBUG, "RAM end at 0x%08x kB\n", tom_k);
853 /* Now set top of memory */
855 if (tom_k > (4*1024*1024)) {
856 printk(BIOS_SPEW, "Handling memory mapped above 4 GB\n");
857 printk(BIOS_SPEW, "Upper RAM end at 0x%08x kB\n", tom_k);
858 msr.lo = (tom_k & 0x003fffff) << 10;
859 msr.hi = (tom_k & 0xffc00000) >> 22;
860 wrmsr(TOP_MEM2, msr);
861 printk(BIOS_SPEW, "Correcting memory amount mapped below 4 GB\n");
864 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
865 * so I can see my rom chip and other I/O devices.
867 if (tom_k >= 0x003f0000) {
868 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
869 if (hole_startk != 0) {
874 printk(BIOS_SPEW, "Adjusting lower RAM end\n");
876 printk(BIOS_SPEW, "Lower RAM end at 0x%08x kB\n", tom_k);
877 msr.lo = (tom_k & 0x003fffff) << 10;
878 msr.hi = (tom_k & 0xffc00000) >> 22;
882 static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
885 static const uint8_t csbase_low_shift[] = {
888 /* 128MB */ (14 - 4),
889 /* 256MB */ (15 - 4),
890 /* 512MB */ (15 - 4),
895 static const uint8_t csbase_low_d0_shift[] = {
898 /* 128MB */ (14 - 4),
899 /* 128MB */ (15 - 4),
900 /* 256MB */ (15 - 4),
901 /* 512MB */ (15 - 4),
902 /* 256MB */ (16 - 4),
903 /* 512MB */ (16 - 4),
909 /* cs_base_high is not changed */
912 int chip_selects, index;
914 unsigned common_size;
915 unsigned common_cs_mode;
916 uint32_t csbase, csmask;
918 /* See if all of the memory chip selects are the same size
919 * and if so count them.
924 for (index = 0; index < 8; index++) {
929 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
937 if (common_size == 0) {
940 /* The size differed fail */
941 if (common_size != size) {
945 value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
946 cs_mode =( value >> ((index>>1)*4)) & 0xf;
947 if (cs_mode == 0 ) continue;
948 if (common_cs_mode == 0) {
949 common_cs_mode = cs_mode;
951 /* The cs_mode differed fail */
952 if (common_cs_mode != cs_mode) {
957 /* Chip selects can only be interleaved when there is
958 * more than one and their is a power of two of them.
960 bits = log2(chip_selects);
961 if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) {
965 /* Find the bits of csbase that we need to interleave on */
966 if (is_cpu_pre_d0()){
967 csbase_inc = 1 << csbase_low_shift[common_cs_mode];
968 if (is_dual_channel(ctrl)) {
969 /* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
970 if ((bits == 3) && (common_size == (1 << (32 - 3)))) {
971 // printk(BIOS_DEBUG, "8 4GB chip selects cannot be interleaved\n");
978 csbase_inc = 1 << csbase_low_d0_shift[common_cs_mode];
979 if (is_dual_channel(ctrl)) {
980 if ( (bits==3) && (common_cs_mode > 8)) {
981 // printk(BIOS_DEBUG, "8 cs_mode>8 chip selects cannot be interleaved\n");
988 /* Compute the initial values for csbase and csbask.
989 * In csbase just set the enable bit and the base to zero.
990 * In csmask set the mask bits for the size and page level interleave.
993 csmask = (((common_size << bits) - 1) << 21);
994 csmask |= 0xfe00 & ~((csbase_inc << bits) - csbase_inc);
995 for (index = 0; index < 8; index++) {
998 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1003 pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
1004 pci_write_config32(ctrl->f2, DRAM_CSMASK + (index << 2), csmask);
1005 csbase += csbase_inc;
1008 printk(BIOS_SPEW, "Interleaved\n");
1010 /* Return the memory size in K */
1011 return common_size << (15 + bits);
1014 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
1018 /* Remember which registers we have used in the high 8 bits of tom */
1021 /* Find the largest remaining candidate */
1022 unsigned index, candidate;
1023 uint32_t csbase, csmask;
1027 for (index = 0; index < 8; index++) {
1029 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1031 /* Is it enabled? */
1036 /* Is it greater? */
1037 if (value <= csbase) {
1041 /* Has it already been selected */
1042 if (tom & (1 << (index + 24))) {
1045 /* I have a new candidate */
1050 /* See if I have found a new candidate */
1055 /* Remember the dimm size */
1056 size = csbase >> 21;
1058 /* Remember I have used this register */
1059 tom |= (1 << (candidate + 24));
1061 /* Recompute the cs base register value */
1062 csbase = (tom << 21) | 1;
1064 /* Increment the top of memory */
1067 /* Compute the memory mask */
1068 csmask = ((size -1) << 21);
1069 csmask |= 0xfe00; /* For now don't optimize */
1071 /* Write the new base register */
1072 pci_write_config32(ctrl->f2, DRAM_CSBASE + (candidate << 2), csbase);
1073 /* Write the new mask register */
1074 pci_write_config32(ctrl->f2, DRAM_CSMASK + (candidate << 2), csmask);
1077 /* Return the memory size in K */
1078 return (tom & ~0xff000000) << 15;
1081 static unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
1085 /* Find the last memory address used */
1087 for (node_id = 0; node_id < max_node_id; node_id++) {
1088 uint32_t limit, base;
1090 index = node_id << 3;
1091 base = pci_read_config32(ctrl->f1, 0x40 + index);
1092 /* Only look at the limit if the base is enabled */
1093 if ((base & 3) == 3) {
1094 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1095 end_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1101 static void order_dimms(const struct mem_controller *ctrl)
1103 unsigned long tom_k, base_k;
1105 if ((!CONFIG_HAVE_OPTION_TABLE) ||
1106 read_option(CMOS_VSTART_interleave_chip_selects, CMOS_VLEN_interleave_chip_selects, 1) != 0) {
1107 tom_k = interleave_chip_selects(ctrl);
1109 printk(BIOS_DEBUG, "Interleaving disabled\n");
1114 tom_k = order_chip_selects(ctrl);
1117 /* Compute the memory base address */
1118 base_k = memory_end_k(ctrl, ctrl->node_id);
1120 route_dram_accesses(ctrl, base_k, tom_k);
1121 set_top_mem(tom_k, 0);
1124 static long disable_dimm(const struct mem_controller *ctrl, unsigned index, long dimm_mask)
1126 printk(BIOS_DEBUG, "disabling dimm %02x\n", index);
1127 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), 0);
1128 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), 0);
1129 dimm_mask &= ~(1 << index);
1133 static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl,
1139 int has_dualch = is_opteron(ctrl);
1143 for (i = 0; (i < DIMM_SOCKETS); i++) {
1145 if (!(dimm_mask & (1 << i))) {
1148 value = spd_read_byte(ctrl->channel0[i], 21);
1153 /* Registered dimm ? */
1154 if (value & (1 << 1)) {
1157 /* Otherwise it must be an unbuffered dimm */
1162 if (unbuffered && registered) {
1163 die("Mixed buffered and registered dimms not supported");
1166 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1167 dcl &= ~DCL_UnBuffDimm;
1169 if ((has_dualch) && (!is_cpu_pre_d0())) {
1170 dcl |= DCL_UnBuffDimm; /* set DCL_DualDIMMen too? */
1172 /* set DCL_En2T if you have non-equal DDR mem types! */
1174 if ((cpuid_eax(1) & 0x30) == 0x30) {
1175 /* CS[7:4] is copy of CS[3:0], should be set for 939 socket */
1176 dcl |= DCL_UpperCSMap;
1179 dcl |= DCL_UnBuffDimm;
1182 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1184 if (is_registered(ctrl)) {
1185 printk(BIOS_SPEW, "Registered\n");
1187 printk(BIOS_SPEW, "Unbuffered\n");
1193 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
1198 for (i = 0; i < DIMM_SOCKETS; i++) {
1201 device = ctrl->channel0[i];
1203 byte = spd_read_byte(ctrl->channel0[i], 2); /* Type */
1205 dimm_mask |= (1 << i);
1208 device = ctrl->channel1[i];
1210 byte = spd_read_byte(ctrl->channel1[i], 2);
1212 dimm_mask |= (1 << (i + DIMM_SOCKETS));
1219 static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_mask)
1223 /* SPD addresses to verify are identical */
1224 static const uint8_t addresses[] = {
1225 2, /* Type should be DDR SDRAM */
1226 3, /* *Row addresses */
1227 4, /* *Column addresses */
1228 5, /* *Physical Banks */
1229 6, /* *Module Data Width low */
1230 7, /* *Module Data Width high */
1231 9, /* *Cycle time at highest CAS Latency CL=X */
1232 11, /* *SDRAM Type */
1233 13, /* *SDRAM Width */
1234 17, /* *Logical Banks */
1235 18, /* *Supported CAS Latencies */
1236 21, /* *SDRAM Module Attributes */
1237 23, /* *Cycle time at CAS Latnecy (CLX - 0.5) */
1238 26, /* *Cycle time at CAS Latnecy (CLX - 1.0) */
1239 27, /* *tRP Row precharge time */
1240 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1241 29, /* *tRCD RAS to CAS */
1242 30, /* *tRAS Activate to Precharge */
1243 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1244 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1246 /* If the dimms are not in pairs do not do dual channels */
1247 if ((dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1248 ((dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1249 goto single_channel;
1251 /* If the cpu is not capable of doing dual channels don't do dual channels */
1252 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1253 if (!(nbcap & NBCAP_128Bit)) {
1254 goto single_channel;
1256 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1257 unsigned device0, device1;
1260 /* If I don't have a dimm skip this one */
1261 if (!(dimm_mask & (1 << i))) {
1264 device0 = ctrl->channel0[i];
1265 device1 = ctrl->channel1[i];
1266 for (j = 0; j < ARRAY_SIZE(addresses); j++) {
1268 addr = addresses[j];
1269 value0 = spd_read_byte(device0, addr);
1273 value1 = spd_read_byte(device1, addr);
1277 if (value0 != value1) {
1278 goto single_channel;
1282 printk(BIOS_SPEW, "Enabling dual channel memory\n");
1284 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1285 dcl &= ~DCL_32ByteEn;
1286 dcl |= DCL_128BitEn;
1287 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1290 dimm_mask &= ~((1 << (DIMM_SOCKETS *2)) - (1 << DIMM_SOCKETS));
1296 uint8_t divisor; /* In 1/2 ns increments */
1299 uint32_t dch_memclk;
1300 uint16_t dch_tref4k, dch_tref8k;
1303 uint8_t dtl_trwt[3][3]; /* first index is CAS_LAT 2/2.5/3 and 128/registered64/64 */
1304 uint8_t rdpreamble[4]; /* 0 is for registered, 1 for 1-2 DIMMS, 2 and 3 for 3 or 4 unreg dimm slots */
1308 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1310 static const struct mem_param speed[] = {
1314 .divisor = (10 <<1),
1317 .dch_memclk = DCH_MEMCLK_100MHZ << DCH_MEMCLK_SHIFT,
1318 .dch_tref4k = DTH_TREF_100MHZ_4K,
1319 .dch_tref8k = DTH_TREF_100MHZ_8K,
1322 .dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1323 .rdpreamble = { ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0) }
1328 .divisor = (7<<1)+1,
1331 .dch_memclk = DCH_MEMCLK_133MHZ << DCH_MEMCLK_SHIFT,
1332 .dch_tref4k = DTH_TREF_133MHZ_4K,
1333 .dch_tref8k = DTH_TREF_133MHZ_8K,
1336 .dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1337 .rdpreamble = { ((8 << 1) + 0), ((7 << 1) + 0), ((7 << 1) + 1), ((7 << 1) + 0) }
1345 .dch_memclk = DCH_MEMCLK_166MHZ << DCH_MEMCLK_SHIFT,
1346 .dch_tref4k = DTH_TREF_166MHZ_4K,
1347 .dch_tref8k = DTH_TREF_166MHZ_8K,
1350 .dtl_trwt = { { 3, 2, 3 }, { 3, 3, 4 }, { 4, 3, 4 }},
1351 .rdpreamble = { ((7 << 1) + 1), ((6 << 1) + 0), ((6 << 1) + 1), ((6 << 1) + 0) }
1359 .dch_memclk = DCH_MEMCLK_200MHZ << DCH_MEMCLK_SHIFT,
1360 .dch_tref4k = DTH_TREF_200MHZ_4K,
1361 .dch_tref8k = DTH_TREF_200MHZ_8K,
1364 .dtl_trwt = { { 0, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1365 .rdpreamble = { ((7 << 1) + 0), ((5 << 1) + 0), ((5 << 1) + 1), ((5 << 1) + 1) }
1371 const struct mem_param *param;
1372 for (param = &speed[0]; param->cycle_time ; param++) {
1373 if (min_cycle_time > (param+1)->cycle_time) {
1377 if (!param->cycle_time) {
1378 die("min_cycle_time to low");
1380 printk(BIOS_SPEW, "%s\n", param->name);
1384 struct spd_set_memclk_result {
1385 const struct mem_param *param;
1388 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask)
1390 /* Compute the minimum cycle time for these dimms */
1391 struct spd_set_memclk_result result;
1392 unsigned min_cycle_time, min_latency, bios_cycle_time;
1396 static const uint8_t latency_indicies[] = { 26, 23, 9 };
1397 static const unsigned char min_cycle_times[] = {
1398 [NBCAP_MEMCLK_200MHZ] = 0x50, /* 5ns */
1399 [NBCAP_MEMCLK_166MHZ] = 0x60, /* 6ns */
1400 [NBCAP_MEMCLK_133MHZ] = 0x75, /* 7.5ns */
1401 [NBCAP_MEMCLK_100MHZ] = 0xa0, /* 10ns */
1404 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1406 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1407 bios_cycle_time = min_cycle_times[
1408 read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
1409 if (CONFIG_HAVE_OPTION_TABLE && bios_cycle_time > min_cycle_time) {
1410 min_cycle_time = bios_cycle_time;
1414 /* Compute the least latency with the fastest clock supported
1415 * by both the memory controller and the dimms.
1417 for (i = 0; i < DIMM_SOCKETS; i++) {
1418 int new_cycle_time, new_latency;
1423 if (!(dimm_mask & (1 << i))) {
1427 /* First find the supported CAS latencies
1428 * Byte 18 for DDR SDRAM is interpreted:
1429 * bit 0 == CAS Latency = 1.0
1430 * bit 1 == CAS Latency = 1.5
1431 * bit 2 == CAS Latency = 2.0
1432 * bit 3 == CAS Latency = 2.5
1433 * bit 4 == CAS Latency = 3.0
1434 * bit 5 == CAS Latency = 3.5
1438 new_cycle_time = 0xa0;
1441 latencies = spd_read_byte(ctrl->channel0[i], 18);
1442 if (latencies <= 0) continue;
1444 /* Compute the lowest cas latency supported */
1445 latency = log2(latencies) -2;
1447 /* Loop through and find a fast clock with a low latency */
1448 for (index = 0; index < 3; index++, latency++) {
1450 if ((latency < 2) || (latency > 4) ||
1451 (!(latencies & (1 << latency)))) {
1454 spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1455 if (spd_value < 0) {
1459 /* Only increase the latency if we decreas the clock */
1460 if ((spd_value >= min_cycle_time) && (spd_value < new_cycle_time)) {
1461 new_cycle_time = spd_value;
1462 new_latency = latency;
1465 if (new_latency > 4){
1468 /* Does min_latency need to be increased? */
1469 if (new_cycle_time > min_cycle_time) {
1470 min_cycle_time = new_cycle_time;
1472 /* Does min_cycle_time need to be increased? */
1473 if (new_latency > min_latency) {
1474 min_latency = new_latency;
1477 /* Make a second pass through the dimms and disable
1478 * any that cannot support the selected memclk and cas latency.
1481 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1486 if (!(dimm_mask & (1 << i))) {
1490 latencies = spd_read_byte(ctrl->channel0[i], 18);
1491 if (latencies < 0) goto hw_error;
1492 if (latencies == 0) {
1496 /* Compute the lowest cas latency supported */
1497 latency = log2(latencies) -2;
1499 /* Walk through searching for the selected latency */
1500 for (index = 0; index < 3; index++, latency++) {
1501 if (!(latencies & (1 << latency))) {
1504 if (latency == min_latency)
1507 /* If I can't find the latency or my index is bad error */
1508 if ((latency != min_latency) || (index >= 3)) {
1512 /* Read the min_cycle_time for this latency */
1513 spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1514 if (spd_value < 0) goto hw_error;
1516 /* All is good if the selected clock speed
1517 * is what I need or slower.
1519 if (spd_value <= min_cycle_time) {
1522 /* Otherwise I have an error, disable the dimm */
1524 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
1527 //down speed for full load 4 rank support
1528 #if QRANK_DIMM_SUPPORT
1529 if (dimm_mask == (3|(3<<DIMM_SOCKETS)) ) {
1531 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1533 if (!(dimm_mask & (1 << i))) {
1536 val = spd_read_byte(ctrl->channel0[i], 5);
1543 if (min_cycle_time <= 0x50 ) {
1544 min_cycle_time = 0x60;
1551 /* Now that I know the minimum cycle time lookup the memory parameters */
1552 result.param = get_mem_param(min_cycle_time);
1554 /* Update DRAM Config High with our selected memory speed */
1555 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1556 value &= ~(DCH_MEMCLK_MASK << DCH_MEMCLK_SHIFT);
1558 /* Improves DQS centering by correcting for case when core speed multiplier and MEMCLK speed result in odd clock divisor, by selecting the next lowest memory speed, required only at DDR400 and higher speeds with certain DIMM loadings ---- cheating???*/
1559 if (!is_cpu_pre_e0()) {
1560 if (min_cycle_time==0x50) {
1566 value |= result.param->dch_memclk;
1567 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1569 static const unsigned latencies[] = { DTL_CL_2, DTL_CL_2_5, DTL_CL_3 };
1571 /* Update DRAM Timing Low with our selected cas latency */
1572 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1573 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1574 value |= latencies[min_latency - 2] << DTL_TCL_SHIFT;
1575 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1577 result.dimm_mask = dimm_mask;
1580 result.param = (const struct mem_param *)0;
1581 result.dimm_mask = -1;
1586 static int update_dimm_Trc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1588 unsigned clocks, old_clocks;
1591 value = spd_read_byte(ctrl->channel0[i], 41);
1592 if (value < 0) return -1;
1593 if ((value == 0) || (value == 0xff)) {
1596 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1597 if (clocks < DTL_TRC_MIN) {
1598 clocks = DTL_TRC_MIN;
1600 if (clocks > DTL_TRC_MAX) {
1604 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1605 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
1606 if (old_clocks > clocks) {
1607 clocks = old_clocks;
1609 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
1610 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
1611 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1615 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1617 unsigned clocks, old_clocks;
1620 value = spd_read_byte(ctrl->channel0[i], 42);
1621 if (value < 0) return -1;
1622 if ((value == 0) || (value == 0xff)) {
1623 value = param->tRFC;
1625 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1626 if (clocks < DTL_TRFC_MIN) {
1627 clocks = DTL_TRFC_MIN;
1629 if (clocks > DTL_TRFC_MAX) {
1632 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1633 old_clocks = ((dtl >> DTL_TRFC_SHIFT) & DTL_TRFC_MASK) + DTL_TRFC_BASE;
1634 if (old_clocks > clocks) {
1635 clocks = old_clocks;
1637 dtl &= ~(DTL_TRFC_MASK << DTL_TRFC_SHIFT);
1638 dtl |= ((clocks - DTL_TRFC_BASE) << DTL_TRFC_SHIFT);
1639 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1644 static int update_dimm_Trcd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1646 unsigned clocks, old_clocks;
1649 value = spd_read_byte(ctrl->channel0[i], 29);
1650 if (value < 0) return -1;
1651 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1652 if (clocks < DTL_TRCD_MIN) {
1653 clocks = DTL_TRCD_MIN;
1655 if (clocks > DTL_TRCD_MAX) {
1658 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1659 old_clocks = ((dtl >> DTL_TRCD_SHIFT) & DTL_TRCD_MASK) + DTL_TRCD_BASE;
1660 if (old_clocks > clocks) {
1661 clocks = old_clocks;
1663 dtl &= ~(DTL_TRCD_MASK << DTL_TRCD_SHIFT);
1664 dtl |= ((clocks - DTL_TRCD_BASE) << DTL_TRCD_SHIFT);
1665 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1669 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1671 unsigned clocks, old_clocks;
1674 value = spd_read_byte(ctrl->channel0[i], 28);
1675 if (value < 0) return -1;
1676 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1677 if (clocks < DTL_TRRD_MIN) {
1678 clocks = DTL_TRRD_MIN;
1680 if (clocks > DTL_TRRD_MAX) {
1683 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1684 old_clocks = ((dtl >> DTL_TRRD_SHIFT) & DTL_TRRD_MASK) + DTL_TRRD_BASE;
1685 if (old_clocks > clocks) {
1686 clocks = old_clocks;
1688 dtl &= ~(DTL_TRRD_MASK << DTL_TRRD_SHIFT);
1689 dtl |= ((clocks - DTL_TRRD_BASE) << DTL_TRRD_SHIFT);
1690 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1694 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1696 unsigned clocks, old_clocks;
1699 value = spd_read_byte(ctrl->channel0[i], 30);
1700 if (value < 0) return -1;
1701 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1702 if (clocks < DTL_TRAS_MIN) {
1703 clocks = DTL_TRAS_MIN;
1705 if (clocks > DTL_TRAS_MAX) {
1708 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1709 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
1710 if (old_clocks > clocks) {
1711 clocks = old_clocks;
1713 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
1714 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
1715 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1719 static int update_dimm_Trp(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1721 unsigned clocks, old_clocks;
1724 value = spd_read_byte(ctrl->channel0[i], 27);
1725 if (value < 0) return -1;
1726 clocks = (value + (param->divisor << 1) - 1)/(param->divisor << 1);
1727 if (clocks < DTL_TRP_MIN) {
1728 clocks = DTL_TRP_MIN;
1730 if (clocks > DTL_TRP_MAX) {
1733 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1734 old_clocks = ((dtl >> DTL_TRP_SHIFT) & DTL_TRP_MASK) + DTL_TRP_BASE;
1735 if (old_clocks > clocks) {
1736 clocks = old_clocks;
1738 dtl &= ~(DTL_TRP_MASK << DTL_TRP_SHIFT);
1739 dtl |= ((clocks - DTL_TRP_BASE) << DTL_TRP_SHIFT);
1740 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1744 static void set_Twr(const struct mem_controller *ctrl, const struct mem_param *param)
1747 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1748 dtl &= ~(DTL_TWR_MASK << DTL_TWR_SHIFT);
1749 dtl |= (param->dtl_twr - DTL_TWR_BASE) << DTL_TWR_SHIFT;
1750 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1754 static void init_Tref(const struct mem_controller *ctrl, const struct mem_param *param)
1757 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1758 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1759 dth |= (param->dch_tref4k << DTH_TREF_SHIFT);
1760 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1763 static int update_dimm_Tref(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1767 unsigned tref, old_tref;
1768 value = spd_read_byte(ctrl->channel0[i], 3);
1769 if (value < 0) return -1;
1772 tref = param->dch_tref8k;
1774 tref = param->dch_tref4k;
1777 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1778 old_tref = (dth >> DTH_TREF_SHIFT) & DTH_TREF_MASK;
1779 if ((value == 12) && (old_tref == param->dch_tref4k)) {
1780 tref = param->dch_tref4k;
1782 tref = param->dch_tref8k;
1784 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1785 dth |= (tref << DTH_TREF_SHIFT);
1786 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1791 static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1795 #if QRANK_DIMM_SUPPORT == 1
1799 value = spd_read_byte(ctrl->channel0[i], 13);
1804 #if QRANK_DIMM_SUPPORT == 1
1805 rank = spd_read_byte(ctrl->channel0[i], 5); /* number of physical banks */
1811 dimm = 1<<(DCL_x4DIMM_SHIFT+i);
1812 #if QRANK_DIMM_SUPPORT == 1
1814 dimm |= 1<<(DCL_x4DIMM_SHIFT+i+2);
1817 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1822 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1826 static int update_dimm_ecc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1830 value = spd_read_byte(ctrl->channel0[i], 11);
1835 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1836 dcl &= ~DCL_DimmEccEn;
1837 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1842 static int count_dimms(const struct mem_controller *ctrl)
1847 for (index = 0; index < 8; index += 2) {
1849 csbase = pci_read_config32(ctrl->f2, (DRAM_CSBASE + (index << 2)));
1857 static void set_Twtr(const struct mem_controller *ctrl, const struct mem_param *param)
1861 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1862 dth &= ~(DTH_TWTR_MASK << DTH_TWTR_SHIFT);
1863 dth |= ((param->dtl_twtr - DTH_TWTR_BASE) << DTH_TWTR_SHIFT);
1864 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1867 static void set_Trwt(const struct mem_controller *ctrl, const struct mem_param *param)
1875 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1876 latency = (dtl >> DTL_TCL_SHIFT) & DTL_TCL_MASK;
1878 if (is_opteron(ctrl)) {
1879 mtype = 0; /* dual channel */
1880 } else if (is_registered(ctrl)) {
1881 mtype = 1; /* registered 64bit interface */
1883 mtype = 2; /* unbuffered 64bit interface */
1897 die("Unknown LAT for Trwt");
1900 clocks = param->dtl_trwt[lat][mtype];
1901 if ((clocks < DTH_TRWT_MIN) || (clocks > DTH_TRWT_MAX)) {
1902 die("Unknown Trwt\n");
1905 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1906 dth &= ~(DTH_TRWT_MASK << DTH_TRWT_SHIFT);
1907 dth |= ((clocks - DTH_TRWT_BASE) << DTH_TRWT_SHIFT);
1908 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1912 static void set_Twcl(const struct mem_controller *ctrl, const struct mem_param *param)
1914 /* Memory Clocks after CAS# */
1917 if (is_registered(ctrl)) {
1922 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1923 dth &= ~(DTH_TWCL_MASK << DTH_TWCL_SHIFT);
1924 dth |= ((clocks - DTH_TWCL_BASE) << DTH_TWCL_SHIFT);
1925 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1929 static void set_read_preamble(const struct mem_controller *ctrl, const struct mem_param *param)
1932 unsigned rdpreamble;
1937 for (i = 0; i < 4; i++) {
1938 if (ctrl->channel0[i]) {
1943 /* map to index to param.rdpreamble array */
1944 if (is_registered(ctrl)) {
1946 } else if (slots < 3) {
1948 } else if (slots == 3) {
1950 } else if (slots == 4) {
1953 die("Unknown rdpreamble for this nr of slots");
1956 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1957 dch &= ~(DCH_RDPREAMBLE_MASK << DCH_RDPREAMBLE_SHIFT);
1958 rdpreamble = param->rdpreamble[i];
1960 if ((rdpreamble < DCH_RDPREAMBLE_MIN) || (rdpreamble > DCH_RDPREAMBLE_MAX)) {
1961 die("Unknown rdpreamble");
1964 dch |= (rdpreamble - DCH_RDPREAMBLE_BASE) << DCH_RDPREAMBLE_SHIFT;
1965 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
1968 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
1974 dimms = count_dimms(ctrl);
1976 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1977 dch &= ~(DCH_ASYNC_LAT_MASK << DCH_ASYNC_LAT_SHIFT);
1979 if (is_registered(ctrl)) {
1991 die("Too many unbuffered dimms");
1993 else if (dimms == 3) {
2002 dch |= ((async_lat - DCH_ASYNC_LAT_BASE) << DCH_ASYNC_LAT_SHIFT);
2003 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2006 static void set_idle_cycle_limit(const struct mem_controller *ctrl, const struct mem_param *param)
2009 /* AMD says to Hardcode this */
2010 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2011 dch &= ~(DCH_IDLE_LIMIT_MASK << DCH_IDLE_LIMIT_SHIFT);
2012 dch |= DCH_IDLE_LIMIT_16 << DCH_IDLE_LIMIT_SHIFT;
2013 dch |= DCH_DYN_IDLE_CTR_EN;
2014 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2017 static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param, long dimm_mask)
2021 init_Tref(ctrl, param);
2022 for (i = 0; i < DIMM_SOCKETS; i++) {
2024 if (!(dimm_mask & (1 << i))) {
2027 /* DRAM Timing Low Register */
2028 if ((rc = update_dimm_Trc (ctrl, param, i)) <= 0) goto dimm_err;
2029 if ((rc = update_dimm_Trfc(ctrl, param, i)) <= 0) goto dimm_err;
2030 if ((rc = update_dimm_Trcd(ctrl, param, i)) <= 0) goto dimm_err;
2031 if ((rc = update_dimm_Trrd(ctrl, param, i)) <= 0) goto dimm_err;
2032 if ((rc = update_dimm_Tras(ctrl, param, i)) <= 0) goto dimm_err;
2033 if ((rc = update_dimm_Trp (ctrl, param, i)) <= 0) goto dimm_err;
2035 /* DRAM Timing High Register */
2036 if ((rc = update_dimm_Tref(ctrl, param, i)) <= 0) goto dimm_err;
2039 /* DRAM Config Low */
2040 if ((rc = update_dimm_x4 (ctrl, param, i)) <= 0) goto dimm_err;
2041 if ((rc = update_dimm_ecc(ctrl, param, i)) <= 0) goto dimm_err;
2047 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
2049 /* DRAM Timing Low Register */
2050 set_Twr(ctrl, param);
2052 /* DRAM Timing High Register */
2053 set_Twtr(ctrl, param);
2054 set_Trwt(ctrl, param);
2055 set_Twcl(ctrl, param);
2057 /* DRAM Config High */
2058 set_read_preamble(ctrl, param);
2059 set_max_async_latency(ctrl, param);
2060 set_idle_cycle_limit(ctrl, param);
2064 #if RAMINIT_SYSINFO==1
2065 static void sdram_set_spd_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
2067 static void sdram_set_spd_registers(const struct mem_controller *ctrl)
2070 struct spd_set_memclk_result result;
2071 const struct mem_param *param;
2074 if (!controller_present(ctrl)) {
2075 // printk(BIOS_DEBUG, "No memory controller present\n");
2079 hw_enable_ecc(ctrl);
2080 activate_spd_rom(ctrl);
2081 dimm_mask = spd_detect_dimms(ctrl);
2082 if (!(dimm_mask & ((1 << DIMM_SOCKETS) - 1))) {
2083 printk(BIOS_DEBUG, "No memory for this cpu\n");
2086 dimm_mask = spd_enable_2channels(ctrl, dimm_mask);
2089 dimm_mask = spd_set_ram_size(ctrl , dimm_mask);
2092 dimm_mask = spd_handle_unbuffered_dimms(ctrl, dimm_mask);
2095 result = spd_set_memclk(ctrl, dimm_mask);
2096 param = result.param;
2097 dimm_mask = result.dimm_mask;
2100 dimm_mask = spd_set_dram_timing(ctrl, param , dimm_mask);
2106 /* Unrecoverable error reading SPD data */
2107 printk(BIOS_ERR, "SPD error - reset\n");
2112 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
2113 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
2116 uint32_t carry_over;
2118 uint32_t base, limit;
2123 carry_over = (4*1024*1024) - hole_startk;
2125 for (ii=controllers - 1;ii>i;ii--) {
2126 base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
2127 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2130 limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
2131 for (j = 0; j < controllers; j++) {
2132 pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit + (carry_over << 2));
2133 pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base + (carry_over << 2));
2136 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2137 for (j = 0; j < controllers; j++) {
2138 pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit + (carry_over << 2));
2141 base = pci_read_config32(dev, 0x40 + (i << 3));
2142 basek = (base & 0xffff0000) >> 2;
2143 if (basek == hole_startk) {
2144 //don't need set memhole here, because hole off set will be 0, overflow
2145 //so need to change base reg instead, new basek will be 4*1024*1024
2147 base |= (4*1024*1024)<<2;
2148 for (j = 0; j < controllers; j++) {
2149 pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
2153 hoist = /* hole start address */
2154 ((hole_startk << 10) & 0xff000000) +
2155 /* hole address to memory controller address */
2156 (((basek + carry_over) >> 6) & 0x0000ff00) +
2159 pci_write_config32(dev, 0xf0, hoist);
2165 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
2168 uint32_t hole_startk;
2171 hole_startk = 4*1024*1024 - CONFIG_HW_MEM_HOLE_SIZEK;
2173 printk(BIOS_SPEW, "Handling memory hole at 0x%08x (default)\n", hole_startk);
2174 #if CONFIG_HW_MEM_HOLE_SIZE_AUTO_INC == 1
2175 /* We need to double check if hole_startk is valid.
2176 * If it is equal to the dram base address in K (base_k),
2177 * we need to decrease it.
2180 for (i=0; i<controllers; i++) {
2183 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2184 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2187 base_k = (base & 0xffff0000) >> 2;
2188 if (base_k == hole_startk) {
2189 /* decrease memory hole startk to make sure it is
2190 * in the middle of the previous node
2192 hole_startk -= (base_k - basek_pri)>>1;
2193 break; /* only one hole */
2198 printk(BIOS_SPEW, "Handling memory hole at 0x%08x (adjusted)\n", hole_startk);
2200 /* Find node number that needs the memory hole configured */
2201 for (i=0; i<controllers; i++) {
2202 uint32_t base, limit;
2203 unsigned base_k, limit_k;
2204 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2205 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2208 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2209 base_k = (base & 0xffff0000) >> 2;
2210 limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
2211 if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
2213 hoist_memory(controllers, ctrl, hole_startk, i);
2214 end_k = memory_end_k(ctrl, controllers);
2215 set_top_mem(end_k, hole_startk);
2216 break; /* only one hole */
2224 #define TIMEOUT_LOOPS 300000
2225 #if RAMINIT_SYSINFO == 1
2226 static void sdram_enable(int controllers, const struct mem_controller *ctrl, struct sys_info *sysinfo)
2228 static void sdram_enable(int controllers, const struct mem_controller *ctrl)
2233 /* Error if I don't have memory */
2234 if (memory_end_k(ctrl, controllers) == 0) {
2238 /* Before enabling memory start the memory clocks */
2239 for (i = 0; i < controllers; i++) {
2241 if (!controller_present(ctrl + i))
2243 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2244 if (dch & (DCH_MEMCLK_EN0|DCH_MEMCLK_EN1|DCH_MEMCLK_EN2|DCH_MEMCLK_EN3)) {
2245 dch |= DCH_MEMCLK_VALID;
2246 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2249 /* Disable dram receivers */
2251 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2252 dcl |= DCL_DisInRcvrs;
2253 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2257 /* We need to wait a minimum of 20 MEMCLKS to enable the InitDram */
2258 /* And if necessary toggle the the reset on the dimms by hand */
2259 memreset(controllers, ctrl);
2261 for (i = 0; i < controllers; i++) {
2263 if (!controller_present(ctrl + i))
2265 /* Skip everything if I don't have any memory on this controller */
2266 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2267 if (!(dch & DCH_MEMCLK_VALID)) {
2271 /* Toggle DisDqsHys to get it working */
2272 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2273 if (dcl & DCL_DimmEccEn) {
2275 printk(BIOS_SPEW, "ECC enabled\n");
2276 mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
2278 if (dcl & DCL_128BitEn) {
2279 mnc |= MNC_CHIPKILL_EN;
2281 pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
2283 dcl |= DCL_DisDqsHys;
2284 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2285 dcl &= ~DCL_DisDqsHys;
2286 dcl &= ~DCL_DLL_Disable;
2289 dcl |= DCL_DramInit;
2290 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2293 for (i = 0; i < controllers; i++) {
2295 if (!controller_present(ctrl + i))
2297 /* Skip everything if I don't have any memory on this controller */
2298 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2299 if (!(dch & DCH_MEMCLK_VALID)) {
2303 printk(BIOS_DEBUG, "Initializing memory: ");
2306 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2308 if ((loops & 1023) == 0) {
2309 printk(BIOS_DEBUG, ".");
2311 } while(((dcl & DCL_DramInit) != 0) && (loops < TIMEOUT_LOOPS));
2312 if (loops >= TIMEOUT_LOOPS) {
2313 printk(BIOS_DEBUG, " failed\n");
2317 if (!is_cpu_pre_c0()) {
2318 /* Wait until it is safe to touch memory */
2319 dcl &= ~(DCL_MemClrStatus | DCL_DramEnable);
2320 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2322 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2323 } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
2326 printk(BIOS_DEBUG, " done\n");
2329 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
2330 // init hw mem hole here
2331 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
2332 if (!is_cpu_pre_e0())
2333 set_hw_mem_hole(controllers, ctrl);
2336 //FIXME add enable node interleaving here -- yhlu
2338 1. check how many nodes we have , if not all has ram installed get out
2339 2. check cs_base lo is 0, node 0 f2 0x40,,,,, if any one is not using lo is CS_BASE, get out
2340 3. check if other node is the same as node 0 about f2 0x40,,,,, otherwise get out
2341 4. if all ready enable node_interleaving in f1 0x40..... of every node
2342 5. for node interleaving we need to set mem hole to every node ( need recalcute hole offset in f0 for every node)
2347 static void set_sysinfo_in_ram(unsigned val)
2351 static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
2352 const uint16_t *spd_addr)
2356 struct mem_controller *ctrl;
2357 for (i=0;i<controllers; i++) {
2360 ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
2361 ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
2362 ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
2363 ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
2365 if (spd_addr == (void *)0) continue;
2367 for (j=0;j<DIMM_SOCKETS;j++) {
2368 ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
2369 ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];