1 /* This should be done by Eric
2 2004.11 yhlu add 4 rank DIMM support
3 2004.12 yhlu add D0 support
4 2005.02 yhlu add E0 memory hole support
7 #include <cpu/x86/cache.h>
8 #include <cpu/x86/mtrr.h>
13 #if CONFIG_HAVE_OPTION_TABLE
14 #include "option_table.h"
17 #if (CONFIG_RAMTOP & (CONFIG_RAMTOP -1)) != 0
18 # error "CONFIG_RAMTOP must be a power of 2"
21 void setup_resource_map(const unsigned int *register_values, int max)
24 // printk(BIOS_DEBUG, "setting up resource map....");
25 for (i = 0; i < max; i += 3) {
29 dev = register_values[i] & ~0xfff;
30 where = register_values[i] & 0xfff;
31 reg = pci_read_config32(dev, where);
32 reg &= register_values[i+1];
33 reg |= register_values[i+2];
34 pci_write_config32(dev, where, reg);
36 // printk(BIOS_DEBUG, "done.\n");
39 static int controller_present(const struct mem_controller *ctrl)
41 return pci_read_config32(ctrl->f0, 0) == 0x11001022;
44 #if CONFIG_RAMINIT_SYSINFO
45 static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
47 static void sdram_set_registers(const struct mem_controller *ctrl)
50 static const unsigned int register_values[] = {
52 /* Careful set limit registers before base registers which
53 contain the enables */
54 /* DRAM Limit i Registers
63 * [ 2: 0] Destination Node ID
73 * [10: 8] Interleave select
74 * specifies the values of A[14:12] to use with interleave enable.
76 * [31:16] DRAM Limit Address i Bits 39-24
77 * This field defines the upper address bits of a 40 bit address
78 * that define the end of the DRAM region.
80 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
81 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
82 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
83 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
84 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
85 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
86 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
87 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
88 /* DRAM Base i Registers
100 * [ 1: 1] Write Enable
101 * 0 = Writes Disabled
104 * [10: 8] Interleave Enable
105 * 000 = No interleave
106 * 001 = Interleave on A[12] (2 nodes)
108 * 011 = Interleave on A[12] and A[14] (4 nodes)
112 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
114 * [13:16] DRAM Base Address i Bits 39-24
115 * This field defines the upper address bits of a 40-bit address
116 * that define the start of the DRAM region.
118 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
119 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
120 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
121 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
122 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
123 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
124 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
125 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
127 /* DRAM CS Base Address i Registers
136 * [ 0: 0] Chip-Select Bank Enable
140 * [15: 9] Base Address (19-13)
141 * An optimization used when all DIMM are the same size...
143 * [31:21] Base Address (35-25)
144 * This field defines the top 11 addresses bit of a 40-bit
145 * address that define the memory address space. These
146 * bits decode 32-MByte blocks of memory.
148 PCI_ADDR(0, 0x18, 2, 0x40), 0x001f01fe, 0x00000000,
149 PCI_ADDR(0, 0x18, 2, 0x44), 0x001f01fe, 0x00000000,
150 PCI_ADDR(0, 0x18, 2, 0x48), 0x001f01fe, 0x00000000,
151 PCI_ADDR(0, 0x18, 2, 0x4C), 0x001f01fe, 0x00000000,
152 PCI_ADDR(0, 0x18, 2, 0x50), 0x001f01fe, 0x00000000,
153 PCI_ADDR(0, 0x18, 2, 0x54), 0x001f01fe, 0x00000000,
154 PCI_ADDR(0, 0x18, 2, 0x58), 0x001f01fe, 0x00000000,
155 PCI_ADDR(0, 0x18, 2, 0x5C), 0x001f01fe, 0x00000000,
156 /* DRAM CS Mask Address i Registers
165 * Select bits to exclude from comparison with the DRAM Base address register.
167 * [15: 9] Address Mask (19-13)
168 * Address to be excluded from the optimized case
170 * [29:21] Address Mask (33-25)
171 * The bits with an address mask of 1 are excluded from address comparison
175 PCI_ADDR(0, 0x18, 2, 0x60), 0xC01f01ff, 0x00000000,
176 PCI_ADDR(0, 0x18, 2, 0x64), 0xC01f01ff, 0x00000000,
177 PCI_ADDR(0, 0x18, 2, 0x68), 0xC01f01ff, 0x00000000,
178 PCI_ADDR(0, 0x18, 2, 0x6C), 0xC01f01ff, 0x00000000,
179 PCI_ADDR(0, 0x18, 2, 0x70), 0xC01f01ff, 0x00000000,
180 PCI_ADDR(0, 0x18, 2, 0x74), 0xC01f01ff, 0x00000000,
181 PCI_ADDR(0, 0x18, 2, 0x78), 0xC01f01ff, 0x00000000,
182 PCI_ADDR(0, 0x18, 2, 0x7C), 0xC01f01ff, 0x00000000,
183 /* DRAM Bank Address Mapping Register
185 * Specify the memory module size
190 * 000 = 32Mbyte (Rows = 12 & Col = 8)
191 * 001 = 64Mbyte (Rows = 12 & Col = 9)
192 * 010 = 128Mbyte (Rows = 13 & Col = 9)|(Rows = 12 & Col = 10)
193 * 011 = 256Mbyte (Rows = 13 & Col = 10)|(Rows = 12 & Col = 11)
194 * 100 = 512Mbyte (Rows = 13 & Col = 11)|(Rows = 14 & Col = 10)
195 * 101 = 1Gbyte (Rows = 14 & Col = 11)|(Rows = 13 & Col = 12)
196 * 110 = 2Gbyte (Rows = 14 & Col = 12)
203 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff8888, 0x00000000,
204 /* DRAM Timing Low Register
206 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
216 * [ 7: 4] Trc (Row Cycle Time, Ras#-active to Ras#-active/bank auto refresh)
217 * 0000 = 7 bus clocks
218 * 0001 = 8 bus clocks
220 * 1110 = 21 bus clocks
221 * 1111 = 22 bus clocks
222 * [11: 8] Trfc (Row refresh Cycle time, Auto-refresh-active to RAS#-active or RAS#auto-refresh)
223 * 0000 = 9 bus clocks
224 * 0010 = 10 bus clocks
226 * 1110 = 23 bus clocks
227 * 1111 = 24 bus clocks
228 * [14:12] Trcd (Ras#-active to Case#-read/write Delay)
238 * [18:16] Trrd (Ras# to Ras# Delay)
248 * [23:20] Tras (Minmum Ras# Active Time)
249 * 0000 to 0100 = reserved
250 * 0101 = 5 bus clocks
252 * 1111 = 15 bus clocks
253 * [26:24] Trp (Row Precharge Time)
263 * [28:28] Twr (Write Recovery Time)
268 PCI_ADDR(0, 0x18, 2, 0x88), 0xe8088008, 0x02522001 /* 0x03623125 */ ,
269 /* DRAM Timing High Register
271 * [ 0: 0] Twtr (Write to Read Delay)
275 * [ 6: 4] Trwt (Read to Write Delay)
285 * [12: 8] Tref (Refresh Rate)
286 * 00000 = 100Mhz 4K rows
287 * 00001 = 133Mhz 4K rows
288 * 00010 = 166Mhz 4K rows
289 * 00011 = 200Mhz 4K rows
290 * 01000 = 100Mhz 8K/16K rows
291 * 01001 = 133Mhz 8K/16K rows
292 * 01010 = 166Mhz 8K/16K rows
293 * 01011 = 200Mhz 8K/16K rows
295 * [22:20] Twcl (Write CAS Latency)
296 * 000 = 1 Mem clock after CAS# (Unbuffered Dimms)
297 * 001 = 2 Mem clocks after CAS# (Registered Dimms)
300 PCI_ADDR(0, 0x18, 2, 0x8c), 0xff8fe08e, (0 << 20)|(0 << 8)|(0 << 4)|(0 << 0),
301 /* DRAM Config Low Register
303 * [ 0: 0] DLL Disable
312 * [ 3: 3] Disable DQS Hystersis (FIXME handle this one carefully)
313 * 0 = Enable DQS input filter
314 * 1 = Disable DQS input filtering
317 * 0 = Initialization done or not yet started.
318 * 1 = Initiate DRAM intialization sequence
319 * [ 9: 9] SO-Dimm Enable
321 * 1 = SO-Dimms present
323 * 0 = DRAM not enabled
324 * 1 = DRAM initialized and enabled
325 * [11:11] Memory Clear Status
326 * 0 = Memory Clear function has not completed
327 * 1 = Memory Clear function has completed
328 * [12:12] Exit Self-Refresh
329 * 0 = Exit from self-refresh done or not yet started
330 * 1 = DRAM exiting from self refresh
331 * [13:13] Self-Refresh Status
332 * 0 = Normal Operation
333 * 1 = Self-refresh mode active
334 * [15:14] Read/Write Queue Bypass Count
339 * [16:16] 128-bit/64-Bit
340 * 0 = 64bit Interface to DRAM
341 * 1 = 128bit Interface to DRAM
342 * [17:17] DIMM ECC Enable
343 * 0 = Some DIMMs do not have ECC
344 * 1 = ALL DIMMS have ECC bits
345 * [18:18] UnBuffered DIMMs
347 * 1 = Unbuffered DIMMS
348 * [19:19] Enable 32-Byte Granularity
349 * 0 = Optimize for 64byte bursts
350 * 1 = Optimize for 32byte bursts
351 * [20:20] DIMM 0 is x4
352 * [21:21] DIMM 1 is x4
353 * [22:22] DIMM 2 is x4
354 * [23:23] DIMM 3 is x4
356 * 1 = x4 DIMM present
357 * [24:24] Disable DRAM Receivers
358 * 0 = Receivers enabled
359 * 1 = Receivers disabled
361 * 000 = Arbiters chois is always respected
362 * 001 = Oldest entry in DCQ can be bypassed 1 time
363 * 010 = Oldest entry in DCQ can be bypassed 2 times
364 * 011 = Oldest entry in DCQ can be bypassed 3 times
365 * 100 = Oldest entry in DCQ can be bypassed 4 times
366 * 101 = Oldest entry in DCQ can be bypassed 5 times
367 * 110 = Oldest entry in DCQ can be bypassed 6 times
368 * 111 = Oldest entry in DCQ can be bypassed 7 times
371 PCI_ADDR(0, 0x18, 2, 0x90), 0xf0000000,
373 (0 << 23)|(0 << 22)|(0 << 21)|(0 << 20)|
374 (1 << 19)|(0 << 18)|(1 << 17)|(0 << 16)|
375 (2 << 14)|(0 << 13)|(0 << 12)|
376 (0 << 11)|(0 << 10)|(0 << 9)|(0 << 8)|
377 (0 << 3) |(0 << 1) |(0 << 0),
378 /* DRAM Config High Register
380 * [ 0: 3] Maximum Asynchronous Latency
385 * [11: 8] Read Preamble
403 * [18:16] Idle Cycle Limit
412 * [19:19] Dynamic Idle Cycle Center Enable
413 * 0 = Use Idle Cycle Limit
414 * 1 = Generate a dynamic Idle cycle limit
415 * [22:20] DRAM MEMCLK Frequency
425 * [25:25] Memory Clock Ratio Valid (FIXME carefully enable memclk)
426 * 0 = Disable MemClks
428 * [26:26] Memory Clock 0 Enable
431 * [27:27] Memory Clock 1 Enable
434 * [28:28] Memory Clock 2 Enable
437 * [29:29] Memory Clock 3 Enable
442 PCI_ADDR(0, 0x18, 2, 0x94), 0xc180f0f0,
443 (0 << 29)|(0 << 28)|(0 << 27)|(0 << 26)|(0 << 25)|
444 (0 << 20)|(0 << 19)|(DCH_IDLE_LIMIT_16 << 16)|(0 << 8)|(0 << 0),
445 /* DRAM Delay Line Register
447 * Adjust the skew of the input DQS strobe relative to DATA
449 * [23:16] Delay Line Adjust
450 * Adjusts the DLL derived PDL delay by one or more delay stages
451 * in either the faster or slower direction.
452 * [24:24} Adjust Slower
454 * 1 = Adj is used to increase the PDL delay
455 * [25:25] Adjust Faster
457 * 1 = Adj is used to decrease the PDL delay
460 PCI_ADDR(0, 0x18, 2, 0x98), 0xfc00ffff, 0x00000000,
461 /* MCA NB Status Low reg */
462 PCI_ADDR(0, 0x18, 3, 0x48), 0x00f00000, 0x00000000,
463 /* MCA NB Status high reg */
464 PCI_ADDR(0, 0x18, 3, 0x4c), 0x01801e8c, 0x00000000,
465 /* MCA NB address Low reg */
466 PCI_ADDR(0, 0x18, 3, 0x50), 0x00000007, 0x00000000,
467 /* MCA NB address high reg */
468 PCI_ADDR(0, 0x18, 3, 0x54), 0xffffff00, 0x00000000,
469 /* DRAM Scrub Control Register
471 * [ 4: 0] DRAM Scrube Rate
473 * [12: 8] L2 Scrub Rate
475 * [20:16] Dcache Scrub
478 * 00000 = Do not scrub
500 * All Others = Reserved
502 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
503 /* DRAM Scrub Address Low Register
505 * [ 0: 0] DRAM Scrubber Redirect Enable
507 * 1 = Scrubber Corrects errors found in normal operation
509 * [31: 6] DRAM Scrub Address 31-6
511 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
512 /* DRAM Scrub Address High Register
514 * [ 7: 0] DRAM Scrubb Address 39-32
517 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
522 if (!controller_present(ctrl)) {
523 // printk(BIOS_DEBUG, "No memory controller present\n");
526 printk(BIOS_SPEW, "setting up CPU%02x northbridge registers\n", ctrl->node_id);
527 max = ARRAY_SIZE(register_values);
528 for (i = 0; i < max; i += 3) {
532 dev = (register_values[i] & ~0xfff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
533 where = register_values[i] & 0xfff;
534 reg = pci_read_config32(dev, where);
535 reg &= register_values[i+1];
536 reg |= register_values[i+2];
537 pci_write_config32(dev, where, reg);
539 printk(BIOS_SPEW, "done.\n");
542 static void hw_enable_ecc(const struct mem_controller *ctrl)
545 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
546 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
547 dcl &= ~DCL_DimmEccEn;
548 if (nbcap & NBCAP_ECC) {
549 dcl |= DCL_DimmEccEn;
551 if (read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
552 dcl &= ~DCL_DimmEccEn;
554 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
557 static int is_dual_channel(const struct mem_controller *ctrl)
560 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
561 return dcl & DCL_128BitEn;
564 static int is_opteron(const struct mem_controller *ctrl)
566 /* Test to see if I am an Opteron. Socket 939 based Athlon64
567 * have dual channel capability, too, so we need a better test
569 * However, all code uses is_opteron() to find out whether to
570 * use dual channel, so if we really check for opteron here, we
571 * need to fix up all code using this function, too.
574 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
575 return !!(nbcap & NBCAP_128Bit);
578 static int is_registered(const struct mem_controller *ctrl)
580 /* Test to see if we are dealing with registered SDRAM.
581 * If we are not registered we are unbuffered.
582 * This function must be called after spd_handle_unbuffered_dimms.
585 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
586 return !(dcl & DCL_UnBuffDimm);
594 #if CONFIG_QRANK_DIMM_SUPPORT
599 static struct dimm_size spd_get_dimm_size(unsigned device)
601 /* Calculate the log base 2 size of a DIMM in bits */
608 #if CONFIG_QRANK_DIMM_SUPPORT
612 /* Note it might be easier to use byte 31 here, it has the DIMM size as
613 * a multiple of 4MB. The way we do it now we can size both
614 * sides of an assymetric dimm.
616 value = spd_read_byte(device, 3); /* rows */
617 if (value < 0) goto hw_err;
618 if ((value & 0xf) == 0) goto val_err;
619 sz.side1 += value & 0xf;
620 sz.rows = value & 0xf;
622 value = spd_read_byte(device, 4); /* columns */
623 if (value < 0) goto hw_err;
624 if ((value & 0xf) == 0) goto val_err;
625 sz.side1 += value & 0xf;
626 sz.col = value & 0xf;
628 value = spd_read_byte(device, 17); /* banks */
629 if (value < 0) goto hw_err;
630 if ((value & 0xff) == 0) goto val_err;
631 sz.side1 += log2(value & 0xff);
633 /* Get the module data width and convert it to a power of two */
634 value = spd_read_byte(device, 7); /* (high byte) */
635 if (value < 0) goto hw_err;
639 low = spd_read_byte(device, 6); /* (low byte) */
640 if (low < 0) goto hw_err;
641 value = value | (low & 0xff);
642 if ((value != 72) && (value != 64)) goto val_err;
643 sz.side1 += log2(value);
646 value = spd_read_byte(device, 5); /* number of physical banks */
647 if (value < 0) goto hw_err;
648 if (value == 1) goto out;
649 if ((value != 2) && (value != 4 )) {
652 #if CONFIG_QRANK_DIMM_SUPPORT
656 /* Start with the symmetrical case */
659 value = spd_read_byte(device, 3); /* rows */
660 if (value < 0) goto hw_err;
661 if ((value & 0xf0) == 0) goto out; /* If symmetrical we are done */
662 sz.side2 -= (value & 0x0f); /* Subtract out rows on side 1 */
663 sz.side2 += ((value >> 4) & 0x0f); /* Add in rows on side 2 */
665 value = spd_read_byte(device, 4); /* columns */
666 if (value < 0) goto hw_err;
667 if ((value & 0xff) == 0) goto val_err;
668 sz.side2 -= (value & 0x0f); /* Subtract out columns on side 1 */
669 sz.side2 += ((value >> 4) & 0x0f); /* Add in columsn on side 2 */
674 die("Bad SPD value\n");
675 /* If an hw_error occurs report that I have no memory */
681 #if CONFIG_QRANK_DIMM_SUPPORT
689 static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
691 uint32_t base0, base1;
694 if (sz.side1 != sz.side2) {
698 /* For each base register.
699 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
700 * The initialize dimm size is in bits.
701 * Set the base enable bit0.
706 /* Make certain side1 of the dimm is at least 32MB */
707 if (sz.side1 >= (25 +3)) {
708 base0 = (1 << ((sz.side1 - (25 + 3)) + 21)) | 1;
711 /* Make certain side2 of the dimm is at least 32MB */
712 if (sz.side2 >= (25 + 3)) {
713 base1 = (1 << ((sz.side2 - (25 + 3)) + 21)) | 1;
716 /* Double the size if we are using dual channel memory */
717 if (is_dual_channel(ctrl)) {
718 base0 = (base0 << 1) | (base0 & 1);
719 base1 = (base1 << 1) | (base1 & 1);
722 /* Clear the reserved bits */
723 base0 &= ~0x001ffffe;
724 base1 &= ~0x001ffffe;
726 /* Set the appropriate DIMM base address register */
727 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
728 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
729 #if CONFIG_QRANK_DIMM_SUPPORT
731 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
732 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
736 /* Enable the memory clocks for this DIMM */
738 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
739 dch |= DCH_MEMCLK_EN0 << index;
740 #if CONFIG_QRANK_DIMM_SUPPORT
742 dch |= DCH_MEMCLK_EN0 << (index + 2);
745 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
749 static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
751 static const unsigned cs_map_aa[] = {
752 /* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
760 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
761 map &= ~(0xf << (index * 4));
762 #if CONFIG_QRANK_DIMM_SUPPORT
764 map &= ~(0xf << ( (index + 2) * 4));
769 /* Make certain side1 of the dimm is at least 32MB */
770 if (sz.side1 >= (25 +3)) {
771 if (is_cpu_pre_d0()) {
772 map |= (sz.side1 - (25 + 3)) << (index *4);
773 #if CONFIG_QRANK_DIMM_SUPPORT
775 map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
780 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
781 #if CONFIG_QRANK_DIMM_SUPPORT
783 map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
789 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
793 static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
797 for (i = 0; i < DIMM_SOCKETS; i++) {
799 if (!(dimm_mask & (1 << i))) {
802 sz = spd_get_dimm_size(ctrl->channel0[i]);
804 return -1; /* Report SPD error */
806 set_dimm_size(ctrl, sz, i);
807 set_dimm_map (ctrl, sz, i);
812 static void route_dram_accesses(const struct mem_controller *ctrl,
813 unsigned long base_k, unsigned long limit_k)
815 /* Route the addresses to the controller node */
820 unsigned limit_reg, base_reg;
823 node_id = ctrl->node_id;
824 index = (node_id << 3);
825 limit = (limit_k << 2);
828 limit |= ( 0 << 8) | (node_id << 0);
829 base = (base_k << 2);
831 base |= (0 << 8) | (1<<1) | (1<<0);
833 limit_reg = 0x44 + index;
834 base_reg = 0x40 + index;
835 for (device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1); device += PCI_DEV(0, 1, 0)) {
836 pci_write_config32(device, limit_reg, limit);
837 pci_write_config32(device, base_reg, base);
841 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
843 /* Error if I don't have memory */
848 /* Report the amount of memory. */
849 printk(BIOS_DEBUG, "RAM end at 0x%08x kB\n", tom_k);
851 /* Now set top of memory */
853 if (tom_k > (4*1024*1024)) {
854 printk(BIOS_SPEW, "Handling memory mapped above 4 GB\n");
855 printk(BIOS_SPEW, "Upper RAM end at 0x%08x kB\n", tom_k);
856 msr.lo = (tom_k & 0x003fffff) << 10;
857 msr.hi = (tom_k & 0xffc00000) >> 22;
858 wrmsr(TOP_MEM2, msr);
859 printk(BIOS_SPEW, "Correcting memory amount mapped below 4 GB\n");
862 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
863 * so I can see my rom chip and other I/O devices.
865 if (tom_k >= 0x003f0000) {
866 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
867 if (hole_startk != 0) {
872 printk(BIOS_SPEW, "Adjusting lower RAM end\n");
874 printk(BIOS_SPEW, "Lower RAM end at 0x%08x kB\n", tom_k);
875 msr.lo = (tom_k & 0x003fffff) << 10;
876 msr.hi = (tom_k & 0xffc00000) >> 22;
880 static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
883 static const uint8_t csbase_low_shift[] = {
886 /* 128MB */ (14 - 4),
887 /* 256MB */ (15 - 4),
888 /* 512MB */ (15 - 4),
893 static const uint8_t csbase_low_d0_shift[] = {
896 /* 128MB */ (14 - 4),
897 /* 128MB */ (15 - 4),
898 /* 256MB */ (15 - 4),
899 /* 512MB */ (15 - 4),
900 /* 256MB */ (16 - 4),
901 /* 512MB */ (16 - 4),
907 /* cs_base_high is not changed */
910 int chip_selects, index;
912 unsigned common_size;
913 unsigned common_cs_mode;
914 uint32_t csbase, csmask;
916 /* See if all of the memory chip selects are the same size
917 * and if so count them.
922 for (index = 0; index < 8; index++) {
927 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
935 if (common_size == 0) {
938 /* The size differed fail */
939 if (common_size != size) {
943 value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
944 cs_mode =( value >> ((index>>1)*4)) & 0xf;
945 if (cs_mode == 0 ) continue;
946 if (common_cs_mode == 0) {
947 common_cs_mode = cs_mode;
949 /* The cs_mode differed fail */
950 if (common_cs_mode != cs_mode) {
955 /* Chip selects can only be interleaved when there is
956 * more than one and their is a power of two of them.
958 bits = log2(chip_selects);
959 if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) {
963 /* Find the bits of csbase that we need to interleave on */
964 if (is_cpu_pre_d0()){
965 csbase_inc = 1 << csbase_low_shift[common_cs_mode];
966 if (is_dual_channel(ctrl)) {
967 /* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
968 if ((bits == 3) && (common_size == (1 << (32 - 3)))) {
969 // printk(BIOS_DEBUG, "8 4GB chip selects cannot be interleaved\n");
976 csbase_inc = 1 << csbase_low_d0_shift[common_cs_mode];
977 if (is_dual_channel(ctrl)) {
978 if ( (bits==3) && (common_cs_mode > 8)) {
979 // printk(BIOS_DEBUG, "8 cs_mode>8 chip selects cannot be interleaved\n");
986 /* Compute the initial values for csbase and csbask.
987 * In csbase just set the enable bit and the base to zero.
988 * In csmask set the mask bits for the size and page level interleave.
991 csmask = (((common_size << bits) - 1) << 21);
992 csmask |= 0xfe00 & ~((csbase_inc << bits) - csbase_inc);
993 for (index = 0; index < 8; index++) {
996 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1001 pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
1002 pci_write_config32(ctrl->f2, DRAM_CSMASK + (index << 2), csmask);
1003 csbase += csbase_inc;
1006 printk(BIOS_SPEW, "Interleaved\n");
1008 /* Return the memory size in K */
1009 return common_size << (15 + bits);
1012 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
1016 /* Remember which registers we have used in the high 8 bits of tom */
1019 /* Find the largest remaining candidate */
1020 unsigned index, candidate;
1021 uint32_t csbase, csmask;
1025 for (index = 0; index < 8; index++) {
1027 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1029 /* Is it enabled? */
1034 /* Is it greater? */
1035 if (value <= csbase) {
1039 /* Has it already been selected */
1040 if (tom & (1 << (index + 24))) {
1043 /* I have a new candidate */
1048 /* See if I have found a new candidate */
1053 /* Remember the dimm size */
1054 size = csbase >> 21;
1056 /* Remember I have used this register */
1057 tom |= (1 << (candidate + 24));
1059 /* Recompute the cs base register value */
1060 csbase = (tom << 21) | 1;
1062 /* Increment the top of memory */
1065 /* Compute the memory mask */
1066 csmask = ((size -1) << 21);
1067 csmask |= 0xfe00; /* For now don't optimize */
1069 /* Write the new base register */
1070 pci_write_config32(ctrl->f2, DRAM_CSBASE + (candidate << 2), csbase);
1071 /* Write the new mask register */
1072 pci_write_config32(ctrl->f2, DRAM_CSMASK + (candidate << 2), csmask);
1075 /* Return the memory size in K */
1076 return (tom & ~0xff000000) << 15;
1079 static unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
1083 /* Find the last memory address used */
1085 for (node_id = 0; node_id < max_node_id; node_id++) {
1086 uint32_t limit, base;
1088 index = node_id << 3;
1089 base = pci_read_config32(ctrl->f1, 0x40 + index);
1090 /* Only look at the limit if the base is enabled */
1091 if ((base & 3) == 3) {
1092 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1093 end_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1099 static void order_dimms(const struct mem_controller *ctrl)
1101 unsigned long tom_k, base_k;
1103 if (read_option(CMOS_VSTART_interleave_chip_selects, CMOS_VLEN_interleave_chip_selects, 1) != 0) {
1104 tom_k = interleave_chip_selects(ctrl);
1106 printk(BIOS_DEBUG, "Interleaving disabled\n");
1111 tom_k = order_chip_selects(ctrl);
1114 /* Compute the memory base address */
1115 base_k = memory_end_k(ctrl, ctrl->node_id);
1117 route_dram_accesses(ctrl, base_k, tom_k);
1118 set_top_mem(tom_k, 0);
1121 static long disable_dimm(const struct mem_controller *ctrl, unsigned index, long dimm_mask)
1123 printk(BIOS_DEBUG, "disabling dimm %02x\n", index);
1124 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), 0);
1125 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), 0);
1126 dimm_mask &= ~(1 << index);
1130 static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl,
1136 int has_dualch = is_opteron(ctrl);
1140 for (i = 0; (i < DIMM_SOCKETS); i++) {
1142 if (!(dimm_mask & (1 << i))) {
1145 value = spd_read_byte(ctrl->channel0[i], 21);
1150 /* Registered dimm ? */
1151 if (value & (1 << 1)) {
1154 /* Otherwise it must be an unbuffered dimm */
1159 if (unbuffered && registered) {
1160 die("Mixed buffered and registered dimms not supported");
1163 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1164 dcl &= ~DCL_UnBuffDimm;
1166 if ((has_dualch) && (!is_cpu_pre_d0())) {
1167 dcl |= DCL_UnBuffDimm; /* set DCL_DualDIMMen too? */
1169 /* set DCL_En2T if you have non-equal DDR mem types! */
1171 if ((cpuid_eax(1) & 0x30) == 0x30) {
1172 /* CS[7:4] is copy of CS[3:0], should be set for 939 socket */
1173 dcl |= DCL_UpperCSMap;
1176 dcl |= DCL_UnBuffDimm;
1179 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1181 if (is_registered(ctrl)) {
1182 printk(BIOS_SPEW, "Registered\n");
1184 printk(BIOS_SPEW, "Unbuffered\n");
1190 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
1195 for (i = 0; i < DIMM_SOCKETS; i++) {
1198 device = ctrl->channel0[i];
1200 byte = spd_read_byte(ctrl->channel0[i], 2); /* Type */
1202 dimm_mask |= (1 << i);
1205 device = ctrl->channel1[i];
1207 byte = spd_read_byte(ctrl->channel1[i], 2);
1209 dimm_mask |= (1 << (i + DIMM_SOCKETS));
1216 static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_mask)
1220 /* SPD addresses to verify are identical */
1221 static const uint8_t addresses[] = {
1222 2, /* Type should be DDR SDRAM */
1223 3, /* *Row addresses */
1224 4, /* *Column addresses */
1225 5, /* *Physical Banks */
1226 6, /* *Module Data Width low */
1227 7, /* *Module Data Width high */
1228 9, /* *Cycle time at highest CAS Latency CL=X */
1229 11, /* *SDRAM Type */
1230 13, /* *SDRAM Width */
1231 17, /* *Logical Banks */
1232 18, /* *Supported CAS Latencies */
1233 21, /* *SDRAM Module Attributes */
1234 23, /* *Cycle time at CAS Latnecy (CLX - 0.5) */
1235 26, /* *Cycle time at CAS Latnecy (CLX - 1.0) */
1236 27, /* *tRP Row precharge time */
1237 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1238 29, /* *tRCD RAS to CAS */
1239 30, /* *tRAS Activate to Precharge */
1240 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1241 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1243 /* If the dimms are not in pairs do not do dual channels */
1244 if ((dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1245 ((dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1246 goto single_channel;
1248 /* If the cpu is not capable of doing dual channels don't do dual channels */
1249 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1250 if (!(nbcap & NBCAP_128Bit)) {
1251 goto single_channel;
1253 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1254 unsigned device0, device1;
1257 /* If I don't have a dimm skip this one */
1258 if (!(dimm_mask & (1 << i))) {
1261 device0 = ctrl->channel0[i];
1262 device1 = ctrl->channel1[i];
1263 for (j = 0; j < ARRAY_SIZE(addresses); j++) {
1265 addr = addresses[j];
1266 value0 = spd_read_byte(device0, addr);
1270 value1 = spd_read_byte(device1, addr);
1274 if (value0 != value1) {
1275 goto single_channel;
1279 printk(BIOS_SPEW, "Enabling dual channel memory\n");
1281 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1282 dcl &= ~DCL_32ByteEn;
1283 dcl |= DCL_128BitEn;
1284 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1287 dimm_mask &= ~((1 << (DIMM_SOCKETS *2)) - (1 << DIMM_SOCKETS));
1293 uint8_t divisor; /* In 1/2 ns increments */
1296 uint32_t dch_memclk;
1297 uint16_t dch_tref4k, dch_tref8k;
1300 uint8_t dtl_trwt[3][3]; /* first index is CAS_LAT 2/2.5/3 and 128/registered64/64 */
1301 uint8_t rdpreamble[4]; /* 0 is for registered, 1 for 1-2 DIMMS, 2 and 3 for 3 or 4 unreg dimm slots */
1305 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1307 static const struct mem_param speed[] = {
1311 .divisor = (10 <<1),
1314 .dch_memclk = DCH_MEMCLK_100MHZ << DCH_MEMCLK_SHIFT,
1315 .dch_tref4k = DTH_TREF_100MHZ_4K,
1316 .dch_tref8k = DTH_TREF_100MHZ_8K,
1319 .dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1320 .rdpreamble = { ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0), ((9 << 1) + 0) }
1325 .divisor = (7<<1)+1,
1328 .dch_memclk = DCH_MEMCLK_133MHZ << DCH_MEMCLK_SHIFT,
1329 .dch_tref4k = DTH_TREF_133MHZ_4K,
1330 .dch_tref8k = DTH_TREF_133MHZ_8K,
1333 .dtl_trwt = { { 2, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1334 .rdpreamble = { ((8 << 1) + 0), ((7 << 1) + 0), ((7 << 1) + 1), ((7 << 1) + 0) }
1342 .dch_memclk = DCH_MEMCLK_166MHZ << DCH_MEMCLK_SHIFT,
1343 .dch_tref4k = DTH_TREF_166MHZ_4K,
1344 .dch_tref8k = DTH_TREF_166MHZ_8K,
1347 .dtl_trwt = { { 3, 2, 3 }, { 3, 3, 4 }, { 4, 3, 4 }},
1348 .rdpreamble = { ((7 << 1) + 1), ((6 << 1) + 0), ((6 << 1) + 1), ((6 << 1) + 0) }
1356 .dch_memclk = DCH_MEMCLK_200MHZ << DCH_MEMCLK_SHIFT,
1357 .dch_tref4k = DTH_TREF_200MHZ_4K,
1358 .dch_tref8k = DTH_TREF_200MHZ_8K,
1361 .dtl_trwt = { { 0, 2, 3 }, { 3, 3, 4 }, { 3, 3, 4 }},
1362 .rdpreamble = { ((7 << 1) + 0), ((5 << 1) + 0), ((5 << 1) + 1), ((5 << 1) + 1) }
1368 const struct mem_param *param;
1369 for (param = &speed[0]; param->cycle_time ; param++) {
1370 if (min_cycle_time > (param+1)->cycle_time) {
1374 if (!param->cycle_time) {
1375 die("min_cycle_time to low");
1377 printk(BIOS_SPEW, "%s\n", param->name);
1381 struct spd_set_memclk_result {
1382 const struct mem_param *param;
1385 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask)
1387 /* Compute the minimum cycle time for these dimms */
1388 struct spd_set_memclk_result result;
1389 unsigned min_cycle_time, min_latency, bios_cycle_time;
1393 static const uint8_t latency_indicies[] = { 26, 23, 9 };
1394 static const unsigned char min_cycle_times[] = {
1395 [NBCAP_MEMCLK_200MHZ] = 0x50, /* 5ns */
1396 [NBCAP_MEMCLK_166MHZ] = 0x60, /* 6ns */
1397 [NBCAP_MEMCLK_133MHZ] = 0x75, /* 7.5ns */
1398 [NBCAP_MEMCLK_100MHZ] = 0xa0, /* 10ns */
1401 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1403 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1404 bios_cycle_time = min_cycle_times[
1405 read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
1406 if (bios_cycle_time > min_cycle_time) {
1407 min_cycle_time = bios_cycle_time;
1411 /* Compute the least latency with the fastest clock supported
1412 * by both the memory controller and the dimms.
1414 for (i = 0; i < DIMM_SOCKETS; i++) {
1415 int new_cycle_time, new_latency;
1420 if (!(dimm_mask & (1 << i))) {
1424 /* First find the supported CAS latencies
1425 * Byte 18 for DDR SDRAM is interpreted:
1426 * bit 0 == CAS Latency = 1.0
1427 * bit 1 == CAS Latency = 1.5
1428 * bit 2 == CAS Latency = 2.0
1429 * bit 3 == CAS Latency = 2.5
1430 * bit 4 == CAS Latency = 3.0
1431 * bit 5 == CAS Latency = 3.5
1435 new_cycle_time = 0xa0;
1438 latencies = spd_read_byte(ctrl->channel0[i], 18);
1439 if (latencies <= 0) continue;
1441 /* Compute the lowest cas latency supported */
1442 latency = log2(latencies) -2;
1444 /* Loop through and find a fast clock with a low latency */
1445 for (index = 0; index < 3; index++, latency++) {
1447 if ((latency < 2) || (latency > 4) ||
1448 (!(latencies & (1 << latency)))) {
1451 spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1452 if (spd_value < 0) {
1456 /* Only increase the latency if we decreas the clock */
1457 if ((spd_value >= min_cycle_time) && (spd_value < new_cycle_time)) {
1458 new_cycle_time = spd_value;
1459 new_latency = latency;
1462 if (new_latency > 4){
1465 /* Does min_latency need to be increased? */
1466 if (new_cycle_time > min_cycle_time) {
1467 min_cycle_time = new_cycle_time;
1469 /* Does min_cycle_time need to be increased? */
1470 if (new_latency > min_latency) {
1471 min_latency = new_latency;
1474 /* Make a second pass through the dimms and disable
1475 * any that cannot support the selected memclk and cas latency.
1478 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1483 if (!(dimm_mask & (1 << i))) {
1487 latencies = spd_read_byte(ctrl->channel0[i], 18);
1488 if (latencies < 0) goto hw_error;
1489 if (latencies == 0) {
1493 /* Compute the lowest cas latency supported */
1494 latency = log2(latencies) -2;
1496 /* Walk through searching for the selected latency */
1497 for (index = 0; index < 3; index++, latency++) {
1498 if (!(latencies & (1 << latency))) {
1501 if (latency == min_latency)
1504 /* If I can't find the latency or my index is bad error */
1505 if ((latency != min_latency) || (index >= 3)) {
1509 /* Read the min_cycle_time for this latency */
1510 spd_value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1511 if (spd_value < 0) goto hw_error;
1513 /* All is good if the selected clock speed
1514 * is what I need or slower.
1516 if (spd_value <= min_cycle_time) {
1519 /* Otherwise I have an error, disable the dimm */
1521 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
1524 //down speed for full load 4 rank support
1525 #if CONFIG_QRANK_DIMM_SUPPORT
1526 if (dimm_mask == (3|(3<<DIMM_SOCKETS)) ) {
1528 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1530 if (!(dimm_mask & (1 << i))) {
1533 val = spd_read_byte(ctrl->channel0[i], 5);
1540 if (min_cycle_time <= 0x50 ) {
1541 min_cycle_time = 0x60;
1548 /* Now that I know the minimum cycle time lookup the memory parameters */
1549 result.param = get_mem_param(min_cycle_time);
1551 /* Update DRAM Config High with our selected memory speed */
1552 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1553 value &= ~(DCH_MEMCLK_MASK << DCH_MEMCLK_SHIFT);
1555 /* Improves DQS centering by correcting for case when core speed multiplier and MEMCLK speed result in odd clock divisor, by selecting the next lowest memory speed, required only at DDR400 and higher speeds with certain DIMM loadings ---- cheating???*/
1556 if (!is_cpu_pre_e0()) {
1557 if (min_cycle_time==0x50) {
1563 value |= result.param->dch_memclk;
1564 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1566 static const unsigned latencies[] = { DTL_CL_2, DTL_CL_2_5, DTL_CL_3 };
1568 /* Update DRAM Timing Low with our selected cas latency */
1569 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1570 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1571 value |= latencies[min_latency - 2] << DTL_TCL_SHIFT;
1572 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1574 result.dimm_mask = dimm_mask;
1577 result.param = (const struct mem_param *)0;
1578 result.dimm_mask = -1;
1583 static int update_dimm_Trc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1585 unsigned clocks, old_clocks;
1588 value = spd_read_byte(ctrl->channel0[i], 41);
1589 if (value < 0) return -1;
1590 if ((value == 0) || (value == 0xff)) {
1593 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1594 if (clocks < DTL_TRC_MIN) {
1595 clocks = DTL_TRC_MIN;
1597 if (clocks > DTL_TRC_MAX) {
1601 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1602 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
1603 if (old_clocks > clocks) {
1604 clocks = old_clocks;
1606 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
1607 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
1608 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1612 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1614 unsigned clocks, old_clocks;
1617 value = spd_read_byte(ctrl->channel0[i], 42);
1618 if (value < 0) return -1;
1619 if ((value == 0) || (value == 0xff)) {
1620 value = param->tRFC;
1622 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1623 if (clocks < DTL_TRFC_MIN) {
1624 clocks = DTL_TRFC_MIN;
1626 if (clocks > DTL_TRFC_MAX) {
1629 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1630 old_clocks = ((dtl >> DTL_TRFC_SHIFT) & DTL_TRFC_MASK) + DTL_TRFC_BASE;
1631 if (old_clocks > clocks) {
1632 clocks = old_clocks;
1634 dtl &= ~(DTL_TRFC_MASK << DTL_TRFC_SHIFT);
1635 dtl |= ((clocks - DTL_TRFC_BASE) << DTL_TRFC_SHIFT);
1636 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1641 static int update_dimm_Trcd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1643 unsigned clocks, old_clocks;
1646 value = spd_read_byte(ctrl->channel0[i], 29);
1647 if (value < 0) return -1;
1648 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1649 if (clocks < DTL_TRCD_MIN) {
1650 clocks = DTL_TRCD_MIN;
1652 if (clocks > DTL_TRCD_MAX) {
1655 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1656 old_clocks = ((dtl >> DTL_TRCD_SHIFT) & DTL_TRCD_MASK) + DTL_TRCD_BASE;
1657 if (old_clocks > clocks) {
1658 clocks = old_clocks;
1660 dtl &= ~(DTL_TRCD_MASK << DTL_TRCD_SHIFT);
1661 dtl |= ((clocks - DTL_TRCD_BASE) << DTL_TRCD_SHIFT);
1662 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1666 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1668 unsigned clocks, old_clocks;
1671 value = spd_read_byte(ctrl->channel0[i], 28);
1672 if (value < 0) return -1;
1673 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1674 if (clocks < DTL_TRRD_MIN) {
1675 clocks = DTL_TRRD_MIN;
1677 if (clocks > DTL_TRRD_MAX) {
1680 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1681 old_clocks = ((dtl >> DTL_TRRD_SHIFT) & DTL_TRRD_MASK) + DTL_TRRD_BASE;
1682 if (old_clocks > clocks) {
1683 clocks = old_clocks;
1685 dtl &= ~(DTL_TRRD_MASK << DTL_TRRD_SHIFT);
1686 dtl |= ((clocks - DTL_TRRD_BASE) << DTL_TRRD_SHIFT);
1687 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1691 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1693 unsigned clocks, old_clocks;
1696 value = spd_read_byte(ctrl->channel0[i], 30);
1697 if (value < 0) return -1;
1698 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1699 if (clocks < DTL_TRAS_MIN) {
1700 clocks = DTL_TRAS_MIN;
1702 if (clocks > DTL_TRAS_MAX) {
1705 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1706 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
1707 if (old_clocks > clocks) {
1708 clocks = old_clocks;
1710 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
1711 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
1712 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1716 static int update_dimm_Trp(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1718 unsigned clocks, old_clocks;
1721 value = spd_read_byte(ctrl->channel0[i], 27);
1722 if (value < 0) return -1;
1723 clocks = (value + (param->divisor << 1) - 1)/(param->divisor << 1);
1724 if (clocks < DTL_TRP_MIN) {
1725 clocks = DTL_TRP_MIN;
1727 if (clocks > DTL_TRP_MAX) {
1730 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1731 old_clocks = ((dtl >> DTL_TRP_SHIFT) & DTL_TRP_MASK) + DTL_TRP_BASE;
1732 if (old_clocks > clocks) {
1733 clocks = old_clocks;
1735 dtl &= ~(DTL_TRP_MASK << DTL_TRP_SHIFT);
1736 dtl |= ((clocks - DTL_TRP_BASE) << DTL_TRP_SHIFT);
1737 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1741 static void set_Twr(const struct mem_controller *ctrl, const struct mem_param *param)
1744 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1745 dtl &= ~(DTL_TWR_MASK << DTL_TWR_SHIFT);
1746 dtl |= (param->dtl_twr - DTL_TWR_BASE) << DTL_TWR_SHIFT;
1747 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1751 static void init_Tref(const struct mem_controller *ctrl, const struct mem_param *param)
1754 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1755 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1756 dth |= (param->dch_tref4k << DTH_TREF_SHIFT);
1757 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1760 static int update_dimm_Tref(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1764 unsigned tref, old_tref;
1765 value = spd_read_byte(ctrl->channel0[i], 3);
1766 if (value < 0) return -1;
1769 tref = param->dch_tref8k;
1771 tref = param->dch_tref4k;
1774 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1775 old_tref = (dth >> DTH_TREF_SHIFT) & DTH_TREF_MASK;
1776 if ((value == 12) && (old_tref == param->dch_tref4k)) {
1777 tref = param->dch_tref4k;
1779 tref = param->dch_tref8k;
1781 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1782 dth |= (tref << DTH_TREF_SHIFT);
1783 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1788 static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1792 #if CONFIG_QRANK_DIMM_SUPPORT
1796 value = spd_read_byte(ctrl->channel0[i], 13);
1801 #if CONFIG_QRANK_DIMM_SUPPORT
1802 rank = spd_read_byte(ctrl->channel0[i], 5); /* number of physical banks */
1808 dimm = 1<<(DCL_x4DIMM_SHIFT+i);
1809 #if CONFIG_QRANK_DIMM_SUPPORT
1811 dimm |= 1<<(DCL_x4DIMM_SHIFT+i+2);
1814 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1819 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1823 static int update_dimm_ecc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1827 value = spd_read_byte(ctrl->channel0[i], 11);
1832 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1833 dcl &= ~DCL_DimmEccEn;
1834 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1839 static int count_dimms(const struct mem_controller *ctrl)
1844 for (index = 0; index < 8; index += 2) {
1846 csbase = pci_read_config32(ctrl->f2, (DRAM_CSBASE + (index << 2)));
1854 static void set_Twtr(const struct mem_controller *ctrl, const struct mem_param *param)
1858 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1859 dth &= ~(DTH_TWTR_MASK << DTH_TWTR_SHIFT);
1860 dth |= ((param->dtl_twtr - DTH_TWTR_BASE) << DTH_TWTR_SHIFT);
1861 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1864 static void set_Trwt(const struct mem_controller *ctrl, const struct mem_param *param)
1872 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1873 latency = (dtl >> DTL_TCL_SHIFT) & DTL_TCL_MASK;
1875 if (is_opteron(ctrl)) {
1876 mtype = 0; /* dual channel */
1877 } else if (is_registered(ctrl)) {
1878 mtype = 1; /* registered 64bit interface */
1880 mtype = 2; /* unbuffered 64bit interface */
1894 die("Unknown LAT for Trwt");
1897 clocks = param->dtl_trwt[lat][mtype];
1898 if ((clocks < DTH_TRWT_MIN) || (clocks > DTH_TRWT_MAX)) {
1899 die("Unknown Trwt\n");
1902 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1903 dth &= ~(DTH_TRWT_MASK << DTH_TRWT_SHIFT);
1904 dth |= ((clocks - DTH_TRWT_BASE) << DTH_TRWT_SHIFT);
1905 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1909 static void set_Twcl(const struct mem_controller *ctrl, const struct mem_param *param)
1911 /* Memory Clocks after CAS# */
1914 if (is_registered(ctrl)) {
1919 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1920 dth &= ~(DTH_TWCL_MASK << DTH_TWCL_SHIFT);
1921 dth |= ((clocks - DTH_TWCL_BASE) << DTH_TWCL_SHIFT);
1922 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1926 static void set_read_preamble(const struct mem_controller *ctrl, const struct mem_param *param)
1929 unsigned rdpreamble;
1934 for (i = 0; i < 4; i++) {
1935 if (ctrl->channel0[i]) {
1940 /* map to index to param.rdpreamble array */
1941 if (is_registered(ctrl)) {
1943 } else if (slots < 3) {
1945 } else if (slots == 3) {
1947 } else if (slots == 4) {
1950 die("Unknown rdpreamble for this nr of slots");
1953 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1954 dch &= ~(DCH_RDPREAMBLE_MASK << DCH_RDPREAMBLE_SHIFT);
1955 rdpreamble = param->rdpreamble[i];
1957 if ((rdpreamble < DCH_RDPREAMBLE_MIN) || (rdpreamble > DCH_RDPREAMBLE_MAX)) {
1958 die("Unknown rdpreamble");
1961 dch |= (rdpreamble - DCH_RDPREAMBLE_BASE) << DCH_RDPREAMBLE_SHIFT;
1962 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
1965 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
1971 dimms = count_dimms(ctrl);
1973 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1974 dch &= ~(DCH_ASYNC_LAT_MASK << DCH_ASYNC_LAT_SHIFT);
1976 if (is_registered(ctrl)) {
1988 die("Too many unbuffered dimms");
1990 else if (dimms == 3) {
1999 dch |= ((async_lat - DCH_ASYNC_LAT_BASE) << DCH_ASYNC_LAT_SHIFT);
2000 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2003 static void set_idle_cycle_limit(const struct mem_controller *ctrl, const struct mem_param *param)
2006 /* AMD says to Hardcode this */
2007 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2008 dch &= ~(DCH_IDLE_LIMIT_MASK << DCH_IDLE_LIMIT_SHIFT);
2009 dch |= DCH_IDLE_LIMIT_16 << DCH_IDLE_LIMIT_SHIFT;
2010 dch |= DCH_DYN_IDLE_CTR_EN;
2011 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2014 static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param, long dimm_mask)
2018 init_Tref(ctrl, param);
2019 for (i = 0; i < DIMM_SOCKETS; i++) {
2021 if (!(dimm_mask & (1 << i))) {
2024 /* DRAM Timing Low Register */
2025 if ((rc = update_dimm_Trc (ctrl, param, i)) <= 0) goto dimm_err;
2026 if ((rc = update_dimm_Trfc(ctrl, param, i)) <= 0) goto dimm_err;
2027 if ((rc = update_dimm_Trcd(ctrl, param, i)) <= 0) goto dimm_err;
2028 if ((rc = update_dimm_Trrd(ctrl, param, i)) <= 0) goto dimm_err;
2029 if ((rc = update_dimm_Tras(ctrl, param, i)) <= 0) goto dimm_err;
2030 if ((rc = update_dimm_Trp (ctrl, param, i)) <= 0) goto dimm_err;
2032 /* DRAM Timing High Register */
2033 if ((rc = update_dimm_Tref(ctrl, param, i)) <= 0) goto dimm_err;
2036 /* DRAM Config Low */
2037 if ((rc = update_dimm_x4 (ctrl, param, i)) <= 0) goto dimm_err;
2038 if ((rc = update_dimm_ecc(ctrl, param, i)) <= 0) goto dimm_err;
2044 dimm_mask = disable_dimm(ctrl, i, dimm_mask);
2046 /* DRAM Timing Low Register */
2047 set_Twr(ctrl, param);
2049 /* DRAM Timing High Register */
2050 set_Twtr(ctrl, param);
2051 set_Trwt(ctrl, param);
2052 set_Twcl(ctrl, param);
2054 /* DRAM Config High */
2055 set_read_preamble(ctrl, param);
2056 set_max_async_latency(ctrl, param);
2057 set_idle_cycle_limit(ctrl, param);
2061 #if CONFIG_RAMINIT_SYSINFO
2062 static void sdram_set_spd_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
2064 static void sdram_set_spd_registers(const struct mem_controller *ctrl)
2067 struct spd_set_memclk_result result;
2068 const struct mem_param *param;
2071 if (!controller_present(ctrl)) {
2072 // printk(BIOS_DEBUG, "No memory controller present\n");
2076 hw_enable_ecc(ctrl);
2077 activate_spd_rom(ctrl);
2078 dimm_mask = spd_detect_dimms(ctrl);
2079 if (!(dimm_mask & ((1 << DIMM_SOCKETS) - 1))) {
2080 printk(BIOS_DEBUG, "No memory for this cpu\n");
2083 dimm_mask = spd_enable_2channels(ctrl, dimm_mask);
2086 dimm_mask = spd_set_ram_size(ctrl , dimm_mask);
2089 dimm_mask = spd_handle_unbuffered_dimms(ctrl, dimm_mask);
2092 result = spd_set_memclk(ctrl, dimm_mask);
2093 param = result.param;
2094 dimm_mask = result.dimm_mask;
2097 dimm_mask = spd_set_dram_timing(ctrl, param , dimm_mask);
2103 /* Unrecoverable error reading SPD data */
2104 printk(BIOS_ERR, "SPD error - reset\n");
2109 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
2110 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
2113 uint32_t carry_over;
2115 uint32_t base, limit;
2120 carry_over = (4*1024*1024) - hole_startk;
2122 for (ii=controllers - 1;ii>i;ii--) {
2123 base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
2124 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2127 limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
2128 for (j = 0; j < controllers; j++) {
2129 pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit + (carry_over << 2));
2130 pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base + (carry_over << 2));
2133 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2134 for (j = 0; j < controllers; j++) {
2135 pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit + (carry_over << 2));
2138 base = pci_read_config32(dev, 0x40 + (i << 3));
2139 basek = (base & 0xffff0000) >> 2;
2140 if (basek == hole_startk) {
2141 //don't need set memhole here, because hole off set will be 0, overflow
2142 //so need to change base reg instead, new basek will be 4*1024*1024
2144 base |= (4*1024*1024)<<2;
2145 for (j = 0; j < controllers; j++) {
2146 pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
2150 hoist = /* hole start address */
2151 ((hole_startk << 10) & 0xff000000) +
2152 /* hole address to memory controller address */
2153 (((basek + carry_over) >> 6) & 0x0000ff00) +
2156 pci_write_config32(dev, 0xf0, hoist);
2162 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
2165 uint32_t hole_startk;
2168 hole_startk = 4*1024*1024 - CONFIG_HW_MEM_HOLE_SIZEK;
2170 printk(BIOS_SPEW, "Handling memory hole at 0x%08x (default)\n", hole_startk);
2171 #if CONFIG_HW_MEM_HOLE_SIZE_AUTO_INC == 1
2172 /* We need to double check if hole_startk is valid.
2173 * If it is equal to the dram base address in K (base_k),
2174 * we need to decrease it.
2177 for (i=0; i<controllers; i++) {
2180 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2181 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2184 base_k = (base & 0xffff0000) >> 2;
2185 if (base_k == hole_startk) {
2186 /* decrease memory hole startk to make sure it is
2187 * in the middle of the previous node
2189 hole_startk -= (base_k - basek_pri)>>1;
2190 break; /* only one hole */
2195 printk(BIOS_SPEW, "Handling memory hole at 0x%08x (adjusted)\n", hole_startk);
2197 /* Find node number that needs the memory hole configured */
2198 for (i=0; i<controllers; i++) {
2199 uint32_t base, limit;
2200 unsigned base_k, limit_k;
2201 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2202 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2205 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2206 base_k = (base & 0xffff0000) >> 2;
2207 limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
2208 if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
2210 hoist_memory(controllers, ctrl, hole_startk, i);
2211 end_k = memory_end_k(ctrl, controllers);
2212 set_top_mem(end_k, hole_startk);
2213 break; /* only one hole */
2221 #define TIMEOUT_LOOPS 300000
2222 #if CONFIG_RAMINIT_SYSINFO
2223 static void sdram_enable(int controllers, const struct mem_controller *ctrl, struct sys_info *sysinfo)
2225 static void sdram_enable(int controllers, const struct mem_controller *ctrl)
2230 /* Error if I don't have memory */
2231 if (memory_end_k(ctrl, controllers) == 0) {
2235 /* Before enabling memory start the memory clocks */
2236 for (i = 0; i < controllers; i++) {
2238 if (!controller_present(ctrl + i))
2240 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2241 if (dch & (DCH_MEMCLK_EN0|DCH_MEMCLK_EN1|DCH_MEMCLK_EN2|DCH_MEMCLK_EN3)) {
2242 dch |= DCH_MEMCLK_VALID;
2243 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2246 /* Disable dram receivers */
2248 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2249 dcl |= DCL_DisInRcvrs;
2250 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2254 /* We need to wait a minimum of 20 MEMCLKS to enable the InitDram */
2255 /* And if necessary toggle the the reset on the dimms by hand */
2256 memreset(controllers, ctrl);
2258 for (i = 0; i < controllers; i++) {
2260 if (!controller_present(ctrl + i))
2262 /* Skip everything if I don't have any memory on this controller */
2263 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2264 if (!(dch & DCH_MEMCLK_VALID)) {
2268 /* Toggle DisDqsHys to get it working */
2269 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2270 if (dcl & DCL_DimmEccEn) {
2272 printk(BIOS_SPEW, "ECC enabled\n");
2273 mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
2275 if (dcl & DCL_128BitEn) {
2276 mnc |= MNC_CHIPKILL_EN;
2278 pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
2280 dcl |= DCL_DisDqsHys;
2281 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2282 dcl &= ~DCL_DisDqsHys;
2283 dcl &= ~DCL_DLL_Disable;
2286 dcl |= DCL_DramInit;
2287 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2290 for (i = 0; i < controllers; i++) {
2292 if (!controller_present(ctrl + i))
2294 /* Skip everything if I don't have any memory on this controller */
2295 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2296 if (!(dch & DCH_MEMCLK_VALID)) {
2300 printk(BIOS_DEBUG, "Initializing memory: ");
2303 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2305 if ((loops & 1023) == 0) {
2306 printk(BIOS_DEBUG, ".");
2308 } while(((dcl & DCL_DramInit) != 0) && (loops < TIMEOUT_LOOPS));
2309 if (loops >= TIMEOUT_LOOPS) {
2310 printk(BIOS_DEBUG, " failed\n");
2314 if (!is_cpu_pre_c0()) {
2315 /* Wait until it is safe to touch memory */
2316 dcl &= ~(DCL_MemClrStatus | DCL_DramEnable);
2317 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2319 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2320 } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
2323 printk(BIOS_DEBUG, " done\n");
2326 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
2327 // init hw mem hole here
2328 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
2329 if (!is_cpu_pre_e0())
2330 set_hw_mem_hole(controllers, ctrl);
2333 //FIXME add enable node interleaving here -- yhlu
2335 1. check how many nodes we have , if not all has ram installed get out
2336 2. check cs_base lo is 0, node 0 f2 0x40,,,,, if any one is not using lo is CS_BASE, get out
2337 3. check if other node is the same as node 0 about f2 0x40,,,,, otherwise get out
2338 4. if all ready enable node_interleaving in f1 0x40..... of every node
2339 5. for node interleaving we need to set mem hole to every node ( need recalcute hole offset in f0 for every node)
2344 static void set_sysinfo_in_ram(unsigned val)
2348 void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
2349 const uint16_t *spd_addr)
2353 struct mem_controller *ctrl;
2354 for (i=0;i<controllers; i++) {
2357 ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
2358 ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
2359 ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
2360 ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
2362 if (spd_addr == (void *)0) continue;
2364 for (j=0;j<DIMM_SOCKETS;j++) {
2365 ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
2366 ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];