2 * This file is part of the coreboot project.
4 * Copyright (C) 2002 Linux Networx
5 * (Written by Eric Biederman <ebiederman@lnxi.com> for Linux Networx)
6 * Copyright (C) 2004 YingHai Lu
7 * Copyright (C) 2008 Advanced Micro Devices, Inc.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <cpu/x86/mem.h>
24 #include <cpu/x86/cache.h>
25 #include <cpu/x86/mtrr.h>
26 #include <cpu/x86/tsc.h>
32 #ifndef QRANK_DIMM_SUPPORT
33 #define QRANK_DIMM_SUPPORT 0
36 static inline void print_raminit(const char *strval, uint32_t val)
38 #if CONFIG_USE_PRINTK_IN_CAR
39 printk_debug("%s%08x\r\n", strval, val);
41 print_debug(strval); print_debug_hex32(val); print_debug("\r\n");
45 #define RAM_TIMING_DEBUG 0
47 static inline void print_tx(const char *strval, uint32_t val)
49 #if RAM_TIMING_DEBUG == 1
50 print_raminit(strval, val);
55 static inline void print_t(const char *strval)
57 #if RAM_TIMING_DEBUG == 1
64 #if (CONFIG_LB_MEM_TOPK & (CONFIG_LB_MEM_TOPK -1)) != 0
65 # error "CONFIG_LB_MEM_TOPK must be a power of 2"
68 #include "amdk8_f_pci.c"
71 /* for PCI_ADDR(0, 0x18, 2, 0x98) index,
72 and PCI_ADDR(0x, 0x18, 2, 0x9c) data */
75 [29: 0] DctOffset (Dram Controller Offset)
76 [30:30] DctAccessWrite (Dram Controller Read/Write Select)
79 [31:31] DctAccessDone (Dram Controller Access Done)
80 0 = Access in progress
81 1 = No access is progress
84 [31: 0] DctOffsetData (Dram Controller Offset Data)
87 - Write the register num to DctOffset with
89 - poll the DctAccessDone until it = 1
90 - Read the data from DctOffsetData
92 - Write the data to DctOffsetData
93 - Write register num to DctOffset with DctAccessWrite = 1
94 - poll the DctAccessDone untio it = 1
98 static void setup_resource_map(const unsigned int *register_values, int max)
101 for (i = 0; i < max; i += 3) {
105 dev = register_values[i] & ~0xff;
106 where = register_values[i] & 0xff;
107 reg = pci_read_config32(dev, where);
108 reg &= register_values[i+1];
109 reg |= register_values[i+2];
110 pci_write_config32(dev, where, reg);
114 static int controller_present(const struct mem_controller *ctrl)
116 return pci_read_config32(ctrl->f0, 0) == 0x11001022;
119 static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
121 static const unsigned int register_values[] = {
123 /* Careful set limit registers before base registers which
124 contain the enables */
125 /* DRAM Limit i Registers
134 * [ 2: 0] Destination Node ID
144 * [10: 8] Interleave select
145 * specifies the values of A[14:12] to use with interleave enable.
147 * [31:16] DRAM Limit Address i Bits 39-24
148 * This field defines the upper address bits of a 40 bit address
149 * that define the end of the DRAM region.
151 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
152 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
153 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
154 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
155 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
156 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
157 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
158 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
159 /* DRAM Base i Registers
168 * [ 0: 0] Read Enable
171 * [ 1: 1] Write Enable
172 * 0 = Writes Disabled
175 * [10: 8] Interleave Enable
176 * 000 = No interleave
177 * 001 = Interleave on A[12] (2 nodes)
179 * 011 = Interleave on A[12] and A[14] (4 nodes)
183 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
185 * [13:16] DRAM Base Address i Bits 39-24
186 * This field defines the upper address bits of a 40-bit address
187 * that define the start of the DRAM region.
189 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
190 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
191 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
192 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
193 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
194 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
195 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
196 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
198 /* DRAM CS Base Address i Registers
207 * [ 0: 0] Chip-Select Bank Enable
211 * [ 2: 2] Memory Test Failed
213 * [13: 5] Base Address (21-13)
214 * An optimization used when all DIMM are the same size...
216 * [28:19] Base Address (36-27)
217 * This field defines the top 11 addresses bit of a 40-bit
218 * address that define the memory address space. These
219 * bits decode 32-MByte blocks of memory.
222 PCI_ADDR(0, 0x18, 2, 0x40), 0xe007c018, 0x00000000,
223 PCI_ADDR(0, 0x18, 2, 0x44), 0xe007c018, 0x00000000,
224 PCI_ADDR(0, 0x18, 2, 0x48), 0xe007c018, 0x00000000,
225 PCI_ADDR(0, 0x18, 2, 0x4C), 0xe007c018, 0x00000000,
226 PCI_ADDR(0, 0x18, 2, 0x50), 0xe007c018, 0x00000000,
227 PCI_ADDR(0, 0x18, 2, 0x54), 0xe007c018, 0x00000000,
228 PCI_ADDR(0, 0x18, 2, 0x58), 0xe007c018, 0x00000000,
229 PCI_ADDR(0, 0x18, 2, 0x5C), 0xe007c018, 0x00000000,
230 /* DRAM CS Mask Address i Registers
235 * Select bits to exclude from comparison with the DRAM Base address register.
237 * [13: 5] Address Mask (21-13)
238 * Address to be excluded from the optimized case
240 * [28:19] Address Mask (36-27)
241 * The bits with an address mask of 1 are excluded from address comparison
245 PCI_ADDR(0, 0x18, 2, 0x60), 0xe007c01f, 0x00000000,
246 PCI_ADDR(0, 0x18, 2, 0x64), 0xe007c01f, 0x00000000,
247 PCI_ADDR(0, 0x18, 2, 0x68), 0xe007c01f, 0x00000000,
248 PCI_ADDR(0, 0x18, 2, 0x6C), 0xe007c01f, 0x00000000,
250 /* DRAM Control Register
252 * [ 3: 0] RdPtrInit ( Read Pointer Initial Value)
253 * 0x03-0x00: reserved
254 * [ 6: 4] RdPadRcvFifoDly (Read Delay from Pad Receive FIFO)
257 * 010 = 1.5 Memory Clocks
258 * 011 = 2 Memory Clocks
259 * 100 = 2.5 Memory Clocks
260 * 101 = 3 Memory Clocks
261 * 110 = 3.5 Memory Clocks
264 * [16:16] AltVidC3MemClkTriEn (AltVID Memory Clock Tristate Enable)
265 * Enables the DDR memory clocks to be tristated when alternate VID
266 * mode is enabled. This bit has no effect if the DisNbClkRamp bit
268 * [17:17] DllTempAdjTime (DLL Temperature Adjust Cycle Time)
271 * [18:18] DqsRcvEnTrain (DQS Receiver Enable Training Mode)
272 * 0 = Normal DQS Receiver enable operation
273 * 1 = DQS receiver enable training mode
276 PCI_ADDR(0, 0x18, 2, 0x78), 0xfff80000, (6<<4)|(6<<0),
278 /* DRAM Initialization Register
280 * [15: 0] MrsAddress (Address for MRS/EMRS Commands)
281 * this field specifies the dsata driven on the DRAM address pins
282 * 15-0 for MRS and EMRS commands
283 * [18:16] MrsBank (Bank Address for MRS/EMRS Commands)
284 * this files specifies the data driven on the DRAM bank pins for
285 * the MRS and EMRS commands
287 * [24:24] SendPchgAll (Send Precharge All Command)
288 * Setting this bit causes the DRAM controller to send a precharge
289 * all command. This bit is cleared by the hardware after the
291 * [25:25] SendAutoRefresh (Send Auto Refresh Command)
292 * Setting this bit causes the DRAM controller to send an auto
293 * refresh command. This bit is cleared by the hardware after the
295 * [26:26] SendMrsCmd (Send MRS/EMRS Command)
296 * Setting this bit causes the DRAM controller to send the MRS or
297 * EMRS command defined by the MrsAddress and MrsBank fields. This
298 * bit is cleared by the hardware adter the commmand completes
299 * [27:27] DeassertMemRstX (De-assert Memory Reset)
300 * Setting this bit causes the DRAM controller to de-assert the
301 * memory reset pin. This bit cannot be used to assert the memory
303 * [28:28] AssertCke (Assert CKE)
304 * setting this bit causes the DRAM controller to assert the CKE
305 * pins. This bit cannot be used to de-assert the CKE pins
307 * [31:31] EnDramInit (Enable DRAM Initialization)
308 * Setting this bit puts the DRAM controller in a BIOS controlled
309 * DRAM initialization mode. BIOS must clear this bit aster DRAM
310 * initialization is complete.
312 // PCI_ADDR(0, 0x18, 2, 0x7C), 0x60f80000, 0,
315 /* DRAM Bank Address Mapping Register
317 * Specify the memory module size
337 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff0000, 0x00000000,
338 /* DRAM Timing Low Register
340 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
350 * [ 5: 4] Trcd (Ras#-active to Cas# read/write delay)
356 * [ 9: 8] Trp (Row Precharge Time, Precharge-to-Active or Auto-Refresh)
362 * [11:11] Trtp (Read to Precharge Time, read Cas# to precharge time)
363 * 0 = 2 clocks for Burst Length of 32 Bytes
364 * 4 clocks for Burst Length of 64 Bytes
365 * 1 = 3 clocks for Burst Length of 32 Bytes
366 * 5 clocks for Burst Length of 64 Bytes
367 * [15:12] Tras (Minimum Ras# Active Time)
370 * 0010 = 5 bus clocks
372 * 1111 = 18 bus clocks
373 * [19:16] Trc (Row Cycle Time, Ras#-active to Ras#-active or auto
374 * refresh of the same bank)
375 * 0000 = 11 bus clocks
376 * 0010 = 12 bus clocks
378 * 1110 = 25 bus clocks
379 * 1111 = 26 bus clocks
380 * [21:20] Twr (Write Recovery Time, From the last data to precharge,
381 * writes can go back-to-back)
386 * [23:22] Trrd (Active-to-active(Ras#-to-Ras#) Delay of different banks)
391 * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel A,
392 * BIOS should set it to reduce the power consumption)
393 * Bit F(1207) M2 Package S1g1 Package
395 * 1 N/A MA0_CLK1 MA0_CLK1
398 * 4 MA1_CLK MA1_CLK0 N/A
399 * 5 MA0_CLK MA0_CLK0 MA0_CLK0
401 * 7 N/A MA0_CLK2 MA0_CLK2
403 PCI_ADDR(0, 0x18, 2, 0x88), 0x000004c8, 0xff000002 /* 0x03623125 */ ,
404 /* DRAM Timing High Register
407 * [ 6: 4] TrwtTO (Read-to-Write Turnaround for Data, DQS Contention)
417 * [ 9: 8] Twtr (Internal DRAM Write-to-Read Command Delay,
418 * minium write-to-read delay when both access the same chip select)
423 * [11:10] Twrrd (Write to Read DIMM Termination Turnaround, minimum
424 * write-to-read delay when accessing two different DIMMs)
429 * [13:12] Twrwr (Write to Write Timing)
430 * 00 = 1 bus clocks ( 0 idle cycle on the bus)
431 * 01 = 2 bus clocks ( 1 idle cycle on the bus)
432 * 10 = 3 bus clocks ( 2 idle cycles on the bus)
434 * [15:14] Trdrd ( Read to Read Timing)
435 * 00 = 2 bus clocks ( 1 idle cycle on the bus)
436 * 01 = 3 bus clocks ( 2 idle cycles on the bus)
437 * 10 = 4 bus clocks ( 3 idle cycles on the bus)
438 * 11 = 5 bus clocks ( 4 idel cycles on the bus)
439 * [17:16] Tref (Refresh Rate)
440 * 00 = Undefined behavior
442 * 10 = Refresh interval of 7.8 microseconds
443 * 11 = Refresh interval of 3.9 microseconds
445 * [22:20] Trfc0 ( Auto-Refresh Row Cycle Time for the Logical DIMM0,
446 * based on DRAM density and speed)
447 * 000 = 75 ns (all speeds, 256Mbit)
448 * 001 = 105 ns (all speeds, 512Mbit)
449 * 010 = 127.5 ns (all speeds, 1Gbit)
450 * 011 = 195 ns (all speeds, 2Gbit)
451 * 100 = 327.5 ns (all speeds, 4Gbit)
455 * [25:23] Trfc1 ( Auto-Refresh Row Cycle Time for the Logical DIMM1,
456 * based on DRAM density and speed)
457 * [28:26] Trfc2 ( Auto-Refresh Row Cycle Time for the Logical DIMM2,
458 * based on DRAM density and speed)
459 * [31:29] Trfc3 ( Auto-Refresh Row Cycle Time for the Logical DIMM3,
460 * based on DRAM density and speed)
462 PCI_ADDR(0, 0x18, 2, 0x8c), 0x000c008f, (2 << 16)|(1 << 8),
463 /* DRAM Config Low Register
465 * [ 0: 0] InitDram (Initialize DRAM)
466 * 1 = write 1 cause DRAM controller to execute the DRAM
467 * initialization, when done it read to 0
468 * [ 1: 1] ExitSelfRef ( Exit Self Refresh Command )
469 * 1 = write 1 causes the DRAM controller to bring the DRAMs out
470 * for self refresh mode
472 * [ 5: 4] DramTerm (DRAM Termination)
473 * 00 = On die termination disabled
478 * [ 7: 7] DramDrvWeak ( DRAM Drivers Weak Mode)
479 * 0 = Normal drive strength mode.
480 * 1 = Weak drive strength mode
481 * [ 8: 8] ParEn (Parity Enable)
482 * 1 = Enable address parity computation output, PAR,
483 * and enables the parity error input, ERR
484 * [ 9: 9] SelfRefRateEn (Faster Self Refresh Rate Enable)
485 * 1 = Enable high temperature ( two times normal )
487 * [10:10] BurstLength32 ( DRAM Burst Length Set for 32 Bytes)
490 * [11:11] Width128 ( Width of DRAM interface)
491 * 0 = the controller DRAM interface is 64-bits wide
492 * 1 = the controller DRAM interface is 128-bits wide
493 * [12:12] X4Dimm (DIMM 0 is x4)
494 * [13:13] X4Dimm (DIMM 1 is x4)
495 * [14:14] X4Dimm (DIMM 2 is x4)
496 * [15:15] X4Dimm (DIMM 3 is x4)
498 * 1 = x4 DIMM present
499 * [16:16] UnBuffDimm ( Unbuffered DIMMs)
501 * 1 = Unbuffered DIMMs
503 * [19:19] DimmEccEn ( DIMM ECC Enable )
504 * 1 = ECC checking is being enabled for all DIMMs on the DRAM
505 * controller ( Through F3 0x44[EccEn])
508 PCI_ADDR(0, 0x18, 2, 0x90), 0xfff6004c, 0x00000010,
509 /* DRAM Config High Register
511 * [ 0: 2] MemClkFreq ( Memory Clock Frequency)
517 * [ 3: 3] MemClkFreqVal (Memory Clock Freqency Valid)
518 * 1 = BIOS need to set the bit when setting up MemClkFreq to
520 * [ 7: 4] MaxAsyncLat ( Maximum Asynchronous Latency)
525 * [12:12] RDqsEn ( Read DQS Enable) This bit is only be set if x8
526 * registered DIMMs are present in the system
527 * 0 = DM pins function as data mask pins
528 * 1 = DM pins function as read DQS pins
530 * [14:14] DisDramInterface ( Disable the DRAM interface ) When this bit
531 * is set, the DRAM controller is disabled, and interface in low power
533 * 0 = Enabled (default)
535 * [15:15] PowerDownEn ( Power Down Mode Enable )
536 * 0 = Disabled (default)
538 * [16:16] PowerDown ( Power Down Mode )
539 * 0 = Channel CKE Control
540 * 1 = Chip Select CKE Control
541 * [17:17] FourRankSODimm (Four Rank SO-DIMM)
542 * 1 = this bit is set by BIOS to indicate that a four rank
544 * [18:18] FourRankRDimm (Four Rank Registered DIMM)
545 * 1 = this bit is set by BIOS to indicate that a four rank
546 * registered DIMM is present
548 * [20:20] SlowAccessMode (Slow Access Mode (2T Mode))
549 * 0 = DRAM address and control signals are driven for one
551 * 1 = One additional MEMCLK of setup time is provided on all
552 * DRAM address and control signals except CS, CKE, and ODT;
553 * i.e., these signals are drivern for two MEMCLK cycles
556 * [22:22] BankSwizzleMode ( Bank Swizzle Mode),
557 * 0 = Disabled (default)
560 * [27:24] DcqBypassMax ( DRAM Controller Queue Bypass Maximum)
561 * 0000 = No bypass; the oldest request is never bypassed
562 * 0001 = The oldest request may be bypassed no more than 1 time
564 * 1111 = The oldest request may be bypassed no more than 15\
566 * [31:28] FourActWindow ( Four Bank Activate Window) , not more than
567 * 4 banks in a 8 bank device are activated
568 * 0000 = No tFAW window restriction
569 * 0001 = 8 MEMCLK cycles
570 * 0010 = 9 MEMCLK cycles
572 * 1101 = 20 MEMCLK cycles
575 PCI_ADDR(0, 0x18, 2, 0x94), 0x00a82f00,0x00008000,
576 /* DRAM Delay Line Register
578 * [ 0: 0] MemClrStatus (Memory Clear Status) : Readonly
579 * when set, this bit indicates that the memory clear function
580 * is complete. Only clear by reset. BIOS should not write or
581 * read the DRAM until this bit is set by hardware
582 * [ 1: 1] DisableJitter ( Disable Jitter)
583 * When set the DDR compensation circuit will not change the
584 * values unless the change is more than one step from the
586 * [ 3: 2] RdWrQByp ( Read/Write Queue Bypass Count)
591 * [ 4: 4] Mode64BitMux (Mismatched DIMM Support Enable)
592 * 1 When bit enables support for mismatched DIMMs when using
593 * 128-bit DRAM interface, the Width128 no effect, only for
595 * [ 5: 5] DCC_EN ( Dynamica Idle Cycle Counter Enable)
596 * When set to 1, indicates that each entry in the page tables
597 * dynamically adjusts the idle cycle limit based on page
598 * Conflict/Page Miss (PC/PM) traffic
599 * [ 8: 6] ILD_lmt ( Idle Cycle Limit)
608 * [ 9: 9] DramEnabled ( DRAM Enabled)
609 * When Set, this bit indicates that the DRAM is enabled, this
610 * bit is set by hardware after DRAM initialization or on an exit
611 * from self refresh. The DRAM controller is intialized after the
612 * hardware-controlled initialization process ( initiated by the
613 * F2 0x90[DramInit]) completes or when the BIOS-controlled
614 * initialization process completes (F2 0x7c(EnDramInit] is
615 * written from 1 to 0)
617 * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel B,
618 * BIOS should set it to reduce the power consumption)
619 * Bit F(1207) M2 Package S1g1 Package
621 * 1 N/A MA0_CLK1 MA0_CLK1
624 * 4 MA1_CLK MA1_CLK0 N/A
625 * 5 MA0_CLK MA0_CLK0 MA0_CLK0
627 * 7 N/A MA0_CLK2 MA0_CLK2
629 PCI_ADDR(0, 0x18, 2, 0xa0), 0x00fffc00, 0xff000000,
631 /* DRAM Scrub Control Register
633 * [ 4: 0] DRAM Scrube Rate
635 * [12: 8] L2 Scrub Rate
637 * [20:16] Dcache Scrub
640 * 00000 = Do not scrub
662 * All Others = Reserved
664 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
665 /* DRAM Scrub Address Low Register
667 * [ 0: 0] DRAM Scrubber Redirect Enable
669 * 1 = Scrubber Corrects errors found in normal operation
671 * [31: 6] DRAM Scrub Address 31-6
673 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
674 /* DRAM Scrub Address High Register
676 * [ 7: 0] DRAM Scrubb Address 39-32
679 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
681 /* for PCI_ADDR(0, 0x18, 2, 0x98) index,
682 and PCI_ADDR(0x, 0x18, 2, 0x9c) data */
685 [29: 0] DctOffset (Dram Controller Offset)
686 [30:30] DctAccessWrite (Dram Controller Read/Write Select)
689 [31:31] DctAccessDone (Dram Controller Access Done)
690 0 = Access in progress
691 1 = No access is progress
694 [31: 0] DctOffsetData (Dram Controller Offset Data)
697 - Write the register num to DctOffset with DctAccessWrite = 0
698 - poll the DctAccessDone until it = 1
699 - Read the data from DctOffsetData
701 - Write the data to DctOffsetData
702 - Write register num to DctOffset with DctAccessWrite = 1
703 - poll the DctAccessDone untio it = 1
709 if (!controller_present(ctrl)) {
710 sysinfo->ctrl_present[ctrl->node_id] = 0;
713 sysinfo->ctrl_present[ctrl->node_id] = 1;
715 print_spew("setting up CPU");
716 print_spew_hex8(ctrl->node_id);
717 print_spew(" northbridge registers\r\n");
718 max = sizeof(register_values)/sizeof(register_values[0]);
719 for (i = 0; i < max; i += 3) {
723 dev = (register_values[i] & ~0xff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
724 where = register_values[i] & 0xff;
725 reg = pci_read_config32(dev, where);
726 reg &= register_values[i+1];
727 reg |= register_values[i+2];
728 pci_write_config32(dev, where, reg);
731 print_spew("done.\r\n");
735 static int is_dual_channel(const struct mem_controller *ctrl)
738 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
739 return dcl & DCL_Width128;
743 static int is_opteron(const struct mem_controller *ctrl)
745 /* Test to see if I am an Opteron.
746 * FIXME Testing dual channel capability is correct for now
747 * but a better test is probably required.
748 * m2 and s1g1 support dual channel too. but only support unbuffered dimm
750 #warning "FIXME implement a better test for opterons"
752 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
753 return !!(nbcap & NBCAP_128Bit);
757 static int is_registered(const struct mem_controller *ctrl)
759 /* Test to see if we are dealing with registered SDRAM.
760 * If we are not registered we are unbuffered.
761 * This function must be called after spd_handle_unbuffered_dimms.
764 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
765 return !(dcl & DCL_UnBuffDimm);
769 static void spd_get_dimm_size(unsigned device, struct dimm_size *sz)
771 /* Calculate the log base 2 size of a DIMM in bits */
778 value = spd_read_byte(device, SPD_ROW_NUM); /* rows */
779 if (value < 0) goto hw_err;
780 if ((value & 0xff) == 0) goto val_err; /* max is 16 ? */
781 sz->per_rank += value & 0xff;
782 sz->rows = value & 0xff;
784 value = spd_read_byte(device, SPD_COL_NUM); /* columns */
785 if (value < 0) goto hw_err;
786 if ((value & 0xff) == 0) goto val_err; /* max is 11 */
787 sz->per_rank += value & 0xff;
788 sz->col = value & 0xff;
790 value = spd_read_byte(device, SPD_BANK_NUM); /* banks */
791 if (value < 0) goto hw_err;
792 if ((value & 0xff) == 0) goto val_err;
793 sz->bank = log2(value & 0xff); // convert 4 to 2, and 8 to 3
794 sz->per_rank += sz->bank;
796 /* Get the module data width and convert it to a power of two */
797 value = spd_read_byte(device, SPD_DATA_WIDTH);
798 if (value < 0) goto hw_err;
800 if ((value != 72) && (value != 64)) goto val_err;
801 sz->per_rank += log2(value) - 3; //64 bit So another 3 lines
803 /* How many ranks? */
804 /* number of physical banks */
805 value = spd_read_byte(device, SPD_MOD_ATTRIB_RANK);
806 if (value < 0) goto hw_err;
807 /* value >>= SPD_MOD_ATTRIB_RANK_NUM_SHIFT; */
808 value &= SPD_MOD_ATTRIB_RANK_NUM_MASK;
809 value += SPD_MOD_ATTRIB_RANK_NUM_BASE; // 0-->1, 1-->2, 3-->4
811 rank == 1 only one rank or say one side
812 rank == 2 two side , and two ranks
813 rank == 4 two side , and four ranks total
814 Some one side two ranks, because of stacked
816 if ((value != 1) && (value != 2) && (value != 4 )) {
821 /* verify if per_rank is equal byte 31
822 it has the DIMM size as a multiple of 128MB.
824 value = spd_read_byte(device, SPD_RANK_SIZE);
825 if (value < 0) goto hw_err;
828 if (value <=4 ) value += 8; // add back to 1G to high
829 value += (27-5); // make 128MB to the real lines
830 if ( value != (sz->per_rank)) {
831 print_err("Bad RANK Size --\r\n");
838 die("Bad SPD value\r\n");
839 /* If an hw_error occurs report that I have no memory */
851 static void set_dimm_size(const struct mem_controller *ctrl,
852 struct dimm_size *sz, unsigned index, int is_Width128)
854 uint32_t base0, base1;
856 /* For each base register.
857 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
858 * The initialize dimm size is in bits.
859 * Set the base enable bit0.
864 /* Make certain side1 of the dimm is at least 128MB */
865 if (sz->per_rank >= 27) {
866 base0 = (1 << ((sz->per_rank - 27 ) + 19)) | 1;
869 /* Make certain side2 of the dimm is at least 128MB */
870 if (sz->rank > 1) { // 2 ranks or 4 ranks
871 base1 = (1 << ((sz->per_rank - 27 ) + 19)) | 1;
874 /* Double the size if we are using dual channel memory */
876 base0 = (base0 << 1) | (base0 & 1);
877 base1 = (base1 << 1) | (base1 & 1);
880 /* Clear the reserved bits */
881 base0 &= ~0xe007fffe;
882 base1 &= ~0xe007fffe;
884 /* Set the appropriate DIMM base address register */
885 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
886 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
887 #if QRANK_DIMM_SUPPORT == 1
889 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
890 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
894 /* Enable the memory clocks for this DIMM by Clear the MemClkDis bit*/
898 #if CPU_SOCKET_TYPE == 0x10 /* L1 */
899 ClkDis0 = DTL_MemClkDis0;
900 #elif CPU_SOCKET_TYPE == 0x11 /* AM2 */
901 ClkDis0 = DTL_MemClkDis0_AM2;
902 #elif CPU_SOCKET_TYPE == 0x12 /* S1G1 */
903 ClkDis0 = DTL_MemClkDis0_S1g1;
906 dword = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW); //Channel A
907 dword &= ~(ClkDis0 >> index);
908 #if QRANK_DIMM_SUPPORT == 1
910 dword &= ~(ClkDis0 >> (index+2));
913 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dword);
915 if (is_Width128) { //Channel B
916 dword = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
917 dword &= ~(ClkDis0 >> index);
918 #if QRANK_DIMM_SUPPORT == 1
920 dword &= ~(ClkDis0 >> (index+2));
923 pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dword);
929 /* row col bank for 64 bit
945 static void set_dimm_cs_map(const struct mem_controller *ctrl,
946 struct dimm_size *sz, unsigned index)
948 static const uint8_t cs_map_aaa[24] = {
949 /* (bank=2, row=13, col=9)(3, 16, 11) ---> (0, 0, 0) (1, 3, 2) */
964 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
965 map &= ~(0xf << (index * 4));
966 #if QRANK_DIMM_SUPPORT == 1
968 map &= ~(0xf << ( (index + 2) * 4));
972 /* Make certain side1 of the dimm is at least 128MB */
973 if (sz->per_rank >= 27) {
975 temp_map = cs_map_aaa[(sz->bank-2)*3*4 + (sz->rows - 13)*3 + (sz->col - 9) ];
976 map |= temp_map << (index*4);
977 #if QRANK_DIMM_SUPPORT == 1
979 map |= temp_map << ( (index + 2) * 4);
984 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
989 static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask,
990 struct mem_info *meminfo)
994 for (i = 0; i < DIMM_SOCKETS; i++) {
995 struct dimm_size *sz = &(meminfo->sz[i]);
996 if (!(dimm_mask & (1 << i))) {
999 spd_get_dimm_size(ctrl->channel0[i], sz);
1000 if (sz->per_rank == 0) {
1001 return -1; /* Report SPD error */
1003 set_dimm_size(ctrl, sz, i, meminfo->is_Width128);
1004 set_dimm_cs_map (ctrl, sz, i);
1010 static void route_dram_accesses(const struct mem_controller *ctrl,
1011 unsigned long base_k, unsigned long limit_k)
1013 /* Route the addresses to the controller node */
1018 unsigned limit_reg, base_reg;
1021 node_id = ctrl->node_id;
1022 index = (node_id << 3);
1023 limit = (limit_k << 2);
1024 limit &= 0xffff0000;
1025 limit -= 0x00010000;
1026 limit |= ( 0 << 8) | (node_id << 0);
1027 base = (base_k << 2);
1029 base |= (0 << 8) | (1<<1) | (1<<0);
1031 limit_reg = 0x44 + index;
1032 base_reg = 0x40 + index;
1033 for (device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1);
1034 device += PCI_DEV(0, 1, 0)) {
1035 pci_write_config32(device, limit_reg, limit);
1036 pci_write_config32(device, base_reg, base);
1041 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
1043 /* Error if I don't have memory */
1048 /* Report the amount of memory. */
1049 print_debug("RAM: 0x");
1050 print_debug_hex32(tom_k);
1051 print_debug(" KB\r\n");
1054 if (tom_k > (4*1024*1024)) {
1055 /* Now set top of memory */
1056 msr.lo = (tom_k & 0x003fffff) << 10;
1057 msr.hi = (tom_k & 0xffc00000) >> 22;
1058 wrmsr(TOP_MEM2, msr);
1061 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
1062 * so I can see my rom chip and other I/O devices.
1064 if (tom_k >= 0x003f0000) {
1065 #if HW_MEM_HOLE_SIZEK != 0
1066 if (hole_startk != 0) {
1067 tom_k = hole_startk;
1072 msr.lo = (tom_k & 0x003fffff) << 10;
1073 msr.hi = (tom_k & 0xffc00000) >> 22;
1074 wrmsr(TOP_MEM, msr);
1077 static unsigned long interleave_chip_selects(const struct mem_controller *ctrl, int is_Width128)
1081 static const uint8_t csbase_low_f0_shift[] = {
1082 /* 128MB */ (14 - (13-5)),
1083 /* 256MB */ (15 - (13-5)),
1084 /* 512MB */ (15 - (13-5)),
1085 /* 512MB */ (16 - (13-5)),
1086 /* 512MB */ (16 - (13-5)),
1087 /* 1GB */ (16 - (13-5)),
1088 /* 1GB */ (16 - (13-5)),
1089 /* 2GB */ (16 - (13-5)),
1090 /* 2GB */ (17 - (13-5)),
1091 /* 4GB */ (17 - (13-5)),
1092 /* 4GB */ (16 - (13-5)),
1093 /* 8GB */ (17 - (13-5)),
1096 /* cs_base_high is not changed */
1098 uint32_t csbase_inc;
1099 int chip_selects, index;
1101 unsigned common_size;
1102 unsigned common_cs_mode;
1103 uint32_t csbase, csmask;
1105 /* See if all of the memory chip selects are the same size
1106 * and if so count them.
1110 common_cs_mode = 0xff;
1111 for (index = 0; index < 8; index++) {
1116 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1118 /* Is it enabled? */
1123 size = (value >> 19) & 0x3ff;
1124 if (common_size == 0) {
1127 /* The size differed fail */
1128 if (common_size != size) {
1132 value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
1133 cs_mode =( value >> ((index>>1)*4)) & 0xf;
1134 if (common_cs_mode == 0xff) {
1135 common_cs_mode = cs_mode;
1137 /* The cs_mode differed fail */
1138 if (common_cs_mode != cs_mode) {
1143 /* Chip selects can only be interleaved when there is
1144 * more than one and their is a power of two of them.
1146 bits = log2(chip_selects);
1147 if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) {
1148 //chip_selects max = 8
1152 /* Find the bits of csbase that we need to interleave on */
1153 csbase_inc = 1 << (csbase_low_f0_shift[common_cs_mode]);
1159 /* Compute the initial values for csbase and csbask.
1160 * In csbase just set the enable bit and the base to zero.
1161 * In csmask set the mask bits for the size and page level interleave.
1164 csmask = (((common_size << bits) - 1) << 19);
1165 csmask |= 0x3fe0 & ~((csbase_inc << bits) - csbase_inc);
1166 for (index = 0; index < 8; index++) {
1169 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1170 /* Is it enabled? */
1174 pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
1175 if ((index & 1) == 0) { //only have 4 CSMASK
1176 pci_write_config32(ctrl->f2, DRAM_CSMASK + ((index>>1) << 2), csmask);
1178 csbase += csbase_inc;
1181 print_debug("Interleaved\r\n");
1183 /* Return the memory size in K */
1184 return common_size << ((27-10) + bits);
1186 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
1190 /* Remember which registers we have used in the high 8 bits of tom */
1193 /* Find the largest remaining canidate */
1194 unsigned index, canidate;
1195 uint32_t csbase, csmask;
1199 for (index = 0; index < 8; index++) {
1201 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1203 /* Is it enabled? */
1208 /* Is it greater? */
1209 if (value <= csbase) {
1213 /* Has it already been selected */
1214 if (tom & (1 << (index + 24))) {
1217 /* I have a new canidate */
1222 /* See if I have found a new canidate */
1227 /* Remember the dimm size */
1228 size = csbase >> 19;
1230 /* Remember I have used this register */
1231 tom |= (1 << (canidate + 24));
1233 /* Recompute the cs base register value */
1234 csbase = (tom << 19) | 1;
1236 /* Increment the top of memory */
1239 /* Compute the memory mask */
1240 csmask = ((size -1) << 19);
1241 csmask |= 0x3fe0; /* For now don't optimize */
1243 /* Write the new base register */
1244 pci_write_config32(ctrl->f2, DRAM_CSBASE + (canidate << 2), csbase);
1245 /* Write the new mask register */
1246 if ((canidate & 1) == 0) { //only have 4 CSMASK
1247 pci_write_config32(ctrl->f2, DRAM_CSMASK + ((canidate>>1) << 2), csmask);
1251 /* Return the memory size in K */
1252 return (tom & ~0xff000000) << (27-10);
1255 unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
1259 /* Find the last memory address used */
1261 for (node_id = 0; node_id < max_node_id; node_id++) {
1262 uint32_t limit, base;
1264 index = node_id << 3;
1265 base = pci_read_config32(ctrl->f1, 0x40 + index);
1266 /* Only look at the limit if the base is enabled */
1267 if ((base & 3) == 3) {
1268 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1269 end_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1276 static void order_dimms(const struct mem_controller *ctrl,
1277 struct mem_info *meminfo)
1279 unsigned long tom_k, base_k;
1281 if (read_option(CMOS_VSTART_interleave_chip_selects,
1282 CMOS_VLEN_interleave_chip_selects, 1) != 0) {
1283 tom_k = interleave_chip_selects(ctrl, meminfo->is_Width128);
1285 print_debug("Interleaving disabled\r\n");
1290 tom_k = order_chip_selects(ctrl);
1293 /* Compute the memory base address */
1294 base_k = memory_end_k(ctrl, ctrl->node_id);
1296 route_dram_accesses(ctrl, base_k, tom_k);
1297 set_top_mem(tom_k, 0);
1301 static long disable_dimm(const struct mem_controller *ctrl, unsigned index,
1302 long dimm_mask, struct mem_info *meminfo)
1304 print_debug("disabling dimm");
1305 print_debug_hex8(index);
1306 print_debug("\r\n");
1307 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), 0);
1308 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), 0);
1309 #if QRANK_DIMM_SUPPORT == 1
1310 if (meminfo->sz[index].rank == 4) {
1311 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), 0);
1312 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), 0);
1316 dimm_mask &= ~(1 << index);
1321 static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl,
1322 long dimm_mask, struct mem_info *meminfo)
1325 uint32_t registered;
1328 for (i = 0; (i < DIMM_SOCKETS); i++) {
1330 if (!(dimm_mask & (1 << i))) {
1334 value = spd_read_byte(ctrl->channel0[i], SPD_DIMM_TYPE);
1339 /* Registered dimm ? */
1341 if ((value == SPD_DIMM_TYPE_RDIMM) ||
1342 (value == SPD_DIMM_TYPE_mRDIMM)) {
1343 /* check SPD_MOD_ATTRIB to verify it is
1344 SPD_MOD_ATTRIB_REGADC (0x11)? */
1345 registered |= (1<<i);
1349 if (is_opteron(ctrl)) {
1351 if ( registered != (dimm_mask & ((1<<DIMM_SOCKETS)-1)) ) {
1352 dimm_mask &= (registered | (registered << DIMM_SOCKETS) ); //disable unbuffed dimm
1353 // die("Mixed buffered and registered dimms not supported");
1355 //By yhlu for debug M2, s1g1 can do dual channel, but it use unbuffer DIMM
1357 die("Unbuffered Dimms not supported on Opteron");
1363 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1364 dcl &= ~DCL_UnBuffDimm;
1365 meminfo->is_registered = 1;
1367 dcl |= DCL_UnBuffDimm;
1368 meminfo->is_registered = 0;
1370 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1373 if (meminfo->is_registered) {
1374 print_debug("Registered\r\n");
1376 print_debug("Unbuffered\r\n");
1383 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
1388 for (i = 0; i < DIMM_SOCKETS; i++) {
1391 device = ctrl->channel0[i];
1393 byte = spd_read_byte(ctrl->channel0[i], SPD_MEM_TYPE); /* Type */
1394 if (byte == SPD_MEM_TYPE_SDRAM_DDR2) {
1395 dimm_mask |= (1 << i);
1398 device = ctrl->channel1[i];
1400 byte = spd_read_byte(ctrl->channel1[i], SPD_MEM_TYPE);
1401 if (byte == SPD_MEM_TYPE_SDRAM_DDR2) {
1402 dimm_mask |= (1 << (i + DIMM_SOCKETS));
1410 static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_mask, struct mem_info *meminfo)
1414 /* SPD addresses to verify are identical */
1415 static const uint8_t addresses[] = {
1416 2, /* Type should be DDR2 SDRAM */
1417 3, /* *Row addresses */
1418 4, /* *Column addresses */
1419 5, /* *Number of DIMM Ranks */
1420 6, /* *Module Data Width*/
1421 9, /* *Cycle time at highest CAS Latency CL=X */
1422 11, /* *DIMM Conf Type */
1423 13, /* *Pri SDRAM Width */
1424 17, /* *Logical Banks */
1425 18, /* *Supported CAS Latencies */
1426 20, /* *DIMM Type Info */
1427 21, /* *SDRAM Module Attributes */
1428 23, /* *Cycle time at CAS Latnecy (CLX - 1) */
1429 26, /* *Cycle time at CAS Latnecy (CLX - 2) */
1430 27, /* *tRP Row precharge time */
1431 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1432 29, /* *tRCD RAS to CAS */
1433 30, /* *tRAS Activate to Precharge */
1434 36, /* *Write recovery time (tWR) */
1435 37, /* *Internal write to read command delay (tRDP) */
1436 38, /* *Internal read to precharge commanfd delay (tRTP) */
1437 41, /* *Extension of Byte 41 tRC and Byte 42 tRFC */
1438 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1439 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1441 /* If the dimms are not in pairs do not do dual channels */
1442 if ((dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1443 ((dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1444 goto single_channel;
1446 /* If the cpu is not capable of doing dual channels
1447 don't do dual channels */
1448 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1449 if (!(nbcap & NBCAP_128Bit)) {
1450 goto single_channel;
1452 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1453 unsigned device0, device1;
1456 /* If I don't have a dimm skip this one */
1457 if (!(dimm_mask & (1 << i))) {
1460 device0 = ctrl->channel0[i];
1461 device1 = ctrl->channel1[i];
1462 for (j = 0; j < sizeof(addresses)/sizeof(addresses[0]); j++) {
1464 addr = addresses[j];
1465 value0 = spd_read_byte(device0, addr);
1469 value1 = spd_read_byte(device1, addr);
1473 if (value0 != value1) {
1474 goto single_channel;
1478 print_spew("Enabling dual channel memory\r\n");
1480 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1481 dcl &= ~DCL_BurstLength32; /* 32byte mode may be preferred in platforms that include graphics controllers that generate a lot of 32-bytes system memory accesses
1482 32byte mode is not supported when the DRAM interface is 128 bits wides, even 32byte mode is set, system still use 64 byte mode */
1483 dcl |= DCL_Width128;
1484 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1485 meminfo->is_Width128 = 1;
1488 dimm_mask &= ~((1 << (DIMM_SOCKETS *2)) - (1 << DIMM_SOCKETS));
1489 meminfo->is_Width128 = 0;
1494 uint16_t cycle_time;
1495 uint8_t divisor; /* In 1/40 ns increments */
1500 uint8_t DcqByPassMax;
1501 uint32_t dch_memclk;
1505 static const struct mem_param speed[] = {
1507 .name = "200Mhz\r\n",
1508 .cycle_time = 0x500,
1509 .divisor = 200, // how many 1/40ns per clock
1510 .dch_memclk = DCH_MemClkFreq_200MHz, //0
1519 .name = "266Mhz\r\n",
1520 .cycle_time = 0x375,
1521 .divisor = 150, //????
1522 .dch_memclk = DCH_MemClkFreq_266MHz, //1
1530 .name = "333Mhz\r\n",
1531 .cycle_time = 0x300,
1533 .dch_memclk = DCH_MemClkFreq_333MHz, //2
1542 .name = "400Mhz\r\n",
1543 .cycle_time = 0x250,
1545 .dch_memclk = DCH_MemClkFreq_400MHz,//3
1553 .cycle_time = 0x000,
1557 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1560 const struct mem_param *param;
1561 for (param = &speed[0]; param->cycle_time ; param++) {
1562 if (min_cycle_time > (param+1)->cycle_time) {
1566 if (!param->cycle_time) {
1567 die("min_cycle_time to low");
1569 print_spew(param->name);
1570 #ifdef DRAM_MIN_CYCLE_TIME
1571 print_debug(param->name);
1576 static uint8_t get_exact_divisor(int i, uint8_t divisor)
1578 //input divisor could be 200(200), 150(266), 120(333), 100 (400)
1579 static const uint8_t dv_a[] = {
1580 /* 200 266 333 400 */
1581 /*4 */ 250, 250, 250, 250,
1582 /*5 */ 200, 200, 200, 100,
1583 /*6 */ 200, 166, 166, 100,
1584 /*7 */ 200, 171, 142, 100,
1586 /*8 */ 200, 150, 125, 100,
1587 /*9 */ 200, 156, 133, 100,
1588 /*10*/ 200, 160, 120, 100,
1589 /*11*/ 200, 163, 127, 100,
1591 /*12*/ 200, 150, 133, 100,
1592 /*13*/ 200, 153, 123, 100,
1593 /*14*/ 200, 157, 128, 100,
1594 /*15*/ 200, 160, 120, 100,
1601 msr = rdmsr(0xc0010042);
1602 fid_cur = msr.lo & 0x3f;
1606 if (index>12) return divisor;
1608 if (i>3) return divisor;
1610 return dv_a[index * 4+i];
1615 struct spd_set_memclk_result {
1616 const struct mem_param *param;
1621 static unsigned convert_to_linear(unsigned value)
1623 static const unsigned fraction[] = { 0x25, 0x33, 0x66, 0x75 };
1626 /* We need to convert value to more readable */
1627 if ((value & 0xf) < 10) { //no .25, .33, .66, .75
1630 valuex = ((value & 0xf0) << 4) | fraction [(value & 0xf)-10];
1636 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask, struct mem_info *meminfo)
1638 /* Compute the minimum cycle time for these dimms */
1639 struct spd_set_memclk_result result;
1640 unsigned min_cycle_time, min_latency, bios_cycle_time;
1644 static const uint8_t latency_indicies[] = { 25, 23, 9 };
1646 static const uint16_t min_cycle_times[] = { // use full speed to compare
1647 [NBCAP_MEMCLK_NOLIMIT] = 0x250, /*2.5ns */
1648 [NBCAP_MEMCLK_333MHZ] = 0x300, /* 3.0ns */
1649 [NBCAP_MEMCLK_266MHZ] = 0x375, /* 3.75ns */
1650 [NBCAP_MEMCLK_200MHZ] = 0x500, /* 5.0s */
1654 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1655 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1656 bios_cycle_time = min_cycle_times[
1657 read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
1658 if (bios_cycle_time > min_cycle_time) {
1659 min_cycle_time = bios_cycle_time;
1663 print_tx("1 min_cycle_time:", min_cycle_time);
1665 /* Compute the least latency with the fastest clock supported
1666 * by both the memory controller and the dimms.
1668 for (i = 0; i < DIMM_SOCKETS; i++) {
1669 int new_cycle_time, new_latency;
1674 if (!(dimm_mask & (1 << i))) {
1678 /* First find the supported CAS latencies
1679 * Byte 18 for DDR SDRAM is interpreted:
1680 * bit 3 == CAS Latency = 3
1681 * bit 4 == CAS Latency = 4
1682 * bit 5 == CAS Latency = 5
1683 * bit 6 == CAS Latency = 6
1685 new_cycle_time = 0x500;
1688 latencies = spd_read_byte(ctrl->channel0[i], SPD_CAS_LAT);
1689 if (latencies <= 0) continue;
1692 print_tx("\tlatencies:", latencies);
1693 /* Compute the lowest cas latency supported */
1694 latency = log2(latencies) - 2;
1696 /* Loop through and find a fast clock with a low latency */
1697 for (index = 0; index < 3; index++, latency++) {
1699 if ((latency < 3) || (latency > 6) ||
1700 (!(latencies & (1 << latency)))) {
1703 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1708 print_tx("\tindex:", index);
1709 print_tx("\t\tlatency:", latency);
1710 print_tx("\t\tvalue1:", value);
1712 value = convert_to_linear(value);
1714 print_tx("\t\tvalue2:", value);
1716 /* Only increase the latency if we decreas the clock */
1717 if (value >= min_cycle_time ) {
1718 if (value < new_cycle_time) {
1719 new_cycle_time = value;
1720 new_latency = latency;
1721 } else if (value == new_cycle_time) {
1722 if (new_latency > latency) {
1723 new_latency = latency;
1727 print_tx("\t\tnew_cycle_time:", new_cycle_time);
1728 print_tx("\t\tnew_latency:", new_latency);
1732 if (new_latency > 6){
1736 /* Does min_latency need to be increased? */
1737 if (new_cycle_time > min_cycle_time) {
1738 min_cycle_time = new_cycle_time;
1741 /* Does min_cycle_time need to be increased? */
1742 if (new_latency > min_latency) {
1743 min_latency = new_latency;
1746 print_tx("2 min_cycle_time:", min_cycle_time);
1747 print_tx("2 min_latency:", min_latency);
1749 /* Make a second pass through the dimms and disable
1750 * any that cannot support the selected memclk and cas latency.
1753 print_tx("3 min_cycle_time:", min_cycle_time);
1754 print_tx("3 min_latency:", min_latency);
1756 for (i = 0; (i < DIMM_SOCKETS) && (ctrl->channel0[i]); i++) {
1761 if (!(dimm_mask & (1 << i))) {
1765 latencies = spd_read_byte(ctrl->channel0[i], SPD_CAS_LAT);
1766 if (latencies < 0) goto hw_error;
1767 if (latencies == 0) {
1771 /* Compute the lowest cas latency supported */
1772 latency = log2(latencies) -2;
1774 /* Walk through searching for the selected latency */
1775 for (index = 0; index < 3; index++, latency++) {
1776 if (!(latencies & (1 << latency))) {
1779 if (latency == min_latency)
1782 /* If I can't find the latency or my index is bad error */
1783 if ((latency != min_latency) || (index >= 3)) {
1787 /* Read the min_cycle_time for this latency */
1788 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1789 if (value < 0) goto hw_error;
1791 value = convert_to_linear(value);
1792 /* All is good if the selected clock speed
1793 * is what I need or slower.
1795 if (value <= min_cycle_time) {
1798 /* Otherwise I have an error, disable the dimm */
1800 dimm_mask = disable_dimm(ctrl, i, dimm_mask, meminfo);
1803 print_tx("4 min_cycle_time:", min_cycle_time);
1805 /* Now that I know the minimum cycle time lookup the memory parameters */
1806 result.param = get_mem_param(min_cycle_time);
1808 /* Update DRAM Config High with our selected memory speed */
1809 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1810 value &= ~(DCH_MemClkFreq_MASK << DCH_MemClkFreq_SHIFT);
1812 value |= result.param->dch_memclk << DCH_MemClkFreq_SHIFT;
1813 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1815 print_debug(result.param->name);
1817 /* Update DRAM Timing Low with our selected cas latency */
1818 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1819 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1820 value |= (min_latency - DTL_TCL_BASE) << DTL_TCL_SHIFT;
1821 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1823 result.dimm_mask = dimm_mask;
1826 result.param = (const struct mem_param *)0;
1827 result.dimm_mask = -1;
1831 static unsigned convert_to_1_4(unsigned value)
1833 static const uint8_t fraction[] = { 0, 1, 2, 2, 3, 3, 0 };
1836 /* We need to convert value to more readable */
1837 valuex = fraction [value & 0x7];
1840 static int update_dimm_Trc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1842 unsigned clocks, old_clocks;
1846 value = spd_read_byte(ctrl->channel0[i], SPD_TRC);
1847 if (value < 0) return -1;
1849 value2 = spd_read_byte(ctrl->channel0[i], SPD_TRC -1);
1851 value += convert_to_1_4(value2>>4);
1855 clocks = (value + param->divisor - 1)/param->divisor;
1857 if (clocks < DTL_TRC_MIN) {
1858 clocks = DTL_TRC_MIN;
1860 if (clocks > DTL_TRC_MAX) {
1864 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1865 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
1866 if (old_clocks >= clocks) { //?? someone did it
1867 // clocks = old_clocks;
1870 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
1871 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
1872 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1876 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i, struct mem_info *meminfo)
1878 unsigned clocks, old_clocks;
1882 //get the cs_size --> logic dimm size
1883 value = spd_read_byte(ctrl->channel0[i], SPD_PRI_WIDTH);
1888 value = 6 - log2(value); //4-->4, 8-->3, 16-->2
1890 clocks = meminfo->sz[i].per_rank - 27 + 2 - value;
1892 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1894 old_clocks = ((dth >> (DTH_TRFC0_SHIFT+i*3)) & DTH_TRFC_MASK);
1895 if (old_clocks >= clocks) { // some one did it?
1898 dth &= ~(DTH_TRFC_MASK << (DTH_TRFC0_SHIFT+i*3));
1899 dth |= clocks << (DTH_TRFC0_SHIFT+i*3);
1900 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1904 static int update_dimm_TT_1_4(const struct mem_controller *ctrl, const struct mem_param *param, int i,
1906 unsigned SPD_TT, unsigned TT_SHIFT, unsigned TT_MASK, unsigned TT_BASE, unsigned TT_MIN, unsigned TT_MAX )
1908 unsigned clocks, old_clocks;
1911 value = spd_read_byte(ctrl->channel0[i], SPD_TT); //already in 1/4 ns
1912 if (value < 0) return -1;
1914 clocks = (value + param->divisor -1)/param->divisor;
1915 if (clocks < TT_MIN) {
1919 if (clocks > TT_MAX) {
1923 dtl = pci_read_config32(ctrl->f2, TT_REG);
1925 old_clocks = ((dtl >> TT_SHIFT) & TT_MASK) + TT_BASE;
1926 if (old_clocks >= clocks) { //some one did it?
1927 // clocks = old_clocks;
1930 dtl &= ~(TT_MASK << TT_SHIFT);
1931 dtl |= ((clocks - TT_BASE) << TT_SHIFT);
1932 pci_write_config32(ctrl->f2, TT_REG, dtl);
1937 static int update_dimm_Trcd(const struct mem_controller *ctrl,
1938 const struct mem_param *param, int i)
1940 return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_LOW, SPD_TRCD, DTL_TRCD_SHIFT, DTL_TRCD_MASK, DTL_TRCD_BASE, DTL_TRCD_MIN, DTL_TRCD_MAX);
1944 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1946 return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_LOW, SPD_TRRD, DTL_TRRD_SHIFT, DTL_TRRD_MASK, DTL_TRRD_BASE, DTL_TRRD_MIN, DTL_TRRD_MAX);
1950 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1952 unsigned clocks, old_clocks;
1955 value = spd_read_byte(ctrl->channel0[i], SPD_TRAS); //in 1 ns
1956 if (value < 0) return -1;
1957 print_tx("update_dimm_Tras: 0 value=", value);
1959 value <<= 2; //convert it to in 1/4ns
1962 print_tx("update_dimm_Tras: 1 value=", value);
1964 clocks = (value + param->divisor - 1)/param->divisor;
1965 print_tx("update_dimm_Tras: divisor=", param->divisor);
1966 print_tx("update_dimm_Tras: clocks=", clocks);
1967 if (clocks < DTL_TRAS_MIN) {
1968 clocks = DTL_TRAS_MIN;
1971 if (clocks > DTL_TRAS_MAX) {
1975 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1976 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
1977 if (old_clocks >= clocks) { // someone did it?
1981 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
1982 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
1983 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1987 static int update_dimm_Trp(const struct mem_controller *ctrl,
1988 const struct mem_param *param, int i)
1990 return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_LOW, SPD_TRP, DTL_TRP_SHIFT, DTL_TRP_MASK, DTL_TRP_BASE, DTL_TRP_MIN, DTL_TRP_MAX);
1994 static int update_dimm_Trtp(const struct mem_controller *ctrl,
1995 const struct mem_param *param, int i, struct mem_info *meminfo)
1997 /* need to figure if it is 32 byte burst or 64 bytes burst */
1999 if (!meminfo->is_Width128) {
2001 dword = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2002 if ((dword & DCL_BurstLength32)) offset = 0;
2004 return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_LOW, SPD_TRTP, DTL_TRTP_SHIFT, DTL_TRTP_MASK, DTL_TRTP_BASE+offset, DTL_TRTP_MIN+offset, DTL_TRTP_MAX+offset);
2008 static int update_dimm_Twr(const struct mem_controller *ctrl, const struct mem_param *param, int i)
2010 return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_LOW, SPD_TWR, DTL_TWR_SHIFT, DTL_TWR_MASK, DTL_TWR_BASE, DTL_TWR_MIN, DTL_TWR_MAX);
2014 static int update_dimm_Tref(const struct mem_controller *ctrl,
2015 const struct mem_param *param, int i)
2017 uint32_t dth, dth_old;
2019 value = spd_read_byte(ctrl->channel0[i], SPD_TREF); // 0: 15.625us, 1: 3.9us 2: 7.8 us....
2020 if (value < 0) return -1;
2028 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
2031 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
2032 dth |= (value << DTH_TREF_SHIFT);
2033 if (dth_old != dth) {
2034 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
2040 static void set_4RankRDimm(const struct mem_controller *ctrl,
2041 const struct mem_param *param, struct mem_info *meminfo)
2043 #if QRANK_DIMM_SUPPRT == 1
2048 if (!(meminfo->is_registered)) return;
2052 for (i = 0; i < DIMM_SOCKETS; i++) {
2053 if (!(dimm_mask & (1 << i))) {
2057 if (meminfo->sz.rank == 4) {
2065 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2066 dch |= DCH_FourRankRDimm;
2067 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2073 static uint32_t get_extra_dimm_mask(const struct mem_controller *ctrl,
2074 struct mem_info *meminfo)
2080 uint32_t mask_single_rank;
2081 uint32_t mask_page_1k;
2083 #if QRANK_DIMM_SUPPORT == 1
2087 long dimm_mask = meminfo->dimm_mask;
2092 mask_single_rank = 0;
2095 for (i = 0; i < DIMM_SOCKETS; i++) {
2096 if (!(dimm_mask & (1 << i))) {
2100 if (meminfo->sz[i].rank == 1) {
2101 mask_single_rank |= 1<<i;
2104 if (meminfo->sz[i].col==10) {
2105 mask_page_1k |= 1<<i;
2109 value = spd_read_byte(ctrl->channel0[i], SPD_PRI_WIDTH);
2111 #if QRANK_DIMM_SUPPORT == 1
2112 rank = meminfo->sz[i].rank;
2117 #if QRANK_DIMM_SUPPORT == 1
2119 mask_x4 |= 1<<(i+2);
2122 } else if (value==16) {
2124 #if QRANK_DIMM_SUPPORT == 1
2126 mask_x16 |= 1<<(i+2);
2133 meminfo->x4_mask= mask_x4;
2134 meminfo->x16_mask = mask_x16;
2136 meminfo->single_rank_mask = mask_single_rank;
2137 meminfo->page_1k_mask = mask_page_1k;
2144 static void set_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
2147 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2148 dcl &= ~(DCL_X4Dimm_MASK<<DCL_X4Dimm_SHIFT);
2149 dcl |= ((meminfo->x4_mask) & 0xf) << (DCL_X4Dimm_SHIFT);
2150 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2154 static int count_ones(uint32_t dimm_mask)
2159 for (index = 0; index < DIMM_SOCKETS; index++, dimm_mask>>=1) {
2160 if (dimm_mask & 1) {
2168 static void set_DramTerm(const struct mem_controller *ctrl,
2169 const struct mem_param *param, struct mem_info *meminfo)
2175 if (param->divisor == 100) { //DDR2 800
2176 if (meminfo->is_Width128) {
2177 if (count_ones(meminfo->dimm_mask & 0x0f)==2) {
2185 #if DIMM_SUPPORT == 0x0204
2186 odt = 0x2; /* 150 ohms */
2189 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2190 dcl &= ~(DCL_DramTerm_MASK<<DCL_DramTerm_SHIFT);
2191 dcl |= (odt & DCL_DramTerm_MASK) << (DCL_DramTerm_SHIFT);
2192 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2196 static void set_ecc(const struct mem_controller *ctrl,
2197 const struct mem_param *param, long dimm_mask, struct mem_info *meminfo)
2202 uint32_t dcl, nbcap;
2203 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
2204 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2205 dcl &= ~DCL_DimmEccEn;
2206 if (nbcap & NBCAP_ECC) {
2207 dcl |= DCL_DimmEccEn;
2209 if (read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
2210 dcl &= ~DCL_DimmEccEn;
2212 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2214 meminfo->is_ecc = 1;
2215 if (!(dcl & DCL_DimmEccEn)) {
2216 meminfo->is_ecc = 0;
2217 return; // already disabled the ECC, so don't need to read SPD any more
2220 for (i = 0; i < DIMM_SOCKETS; i++) {
2222 if (!(dimm_mask & (1 << i))) {
2226 value = spd_read_byte(ctrl->channel0[i], SPD_DIMM_CONF_TYPE);
2228 if (!(value & SPD_DIMM_CONF_TYPE_ECC)) {
2229 dcl &= ~DCL_DimmEccEn;
2230 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2231 meminfo->is_ecc = 0;
2239 static int update_dimm_Twtr(const struct mem_controller *ctrl,
2240 const struct mem_param *param, int i)
2243 return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_HIGH, SPD_TWTR, DTH_TWTR_SHIFT, DTH_TWTR_MASK, DTH_TWTR_BASE, DTH_TWTR_MIN, DTH_TWTR_MAX);
2247 static void set_TT(const struct mem_controller *ctrl,
2248 const struct mem_param *param, unsigned TT_REG, unsigned TT_SHIFT,
2249 unsigned TT_MASK, unsigned TT_BASE, unsigned TT_MIN, unsigned TT_MAX,
2250 unsigned val, const char *str)
2254 if ((val < TT_MIN) || (val > TT_MAX)) {
2256 die(" Unknown\r\n");
2259 reg = pci_read_config32(ctrl->f2, TT_REG);
2260 reg &= ~(TT_MASK << TT_SHIFT);
2261 reg |= ((val - TT_BASE) << TT_SHIFT);
2262 pci_write_config32(ctrl->f2, TT_REG, reg);
2267 static void set_TrwtTO(const struct mem_controller *ctrl,
2268 const struct mem_param *param)
2270 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TRWTTO_SHIFT, DTH_TRWTTO_MASK,DTH_TRWTTO_BASE, DTH_TRWTTO_MIN, DTH_TRWTTO_MAX, param->TrwtTO, "TrwtTO");
2274 static void set_Twrrd(const struct mem_controller *ctrl, const struct mem_param *param)
2276 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TWRRD_SHIFT, DTH_TWRRD_MASK,DTH_TWRRD_BASE, DTH_TWRRD_MIN, DTH_TWRRD_MAX, param->Twrrd, "Twrrd");
2280 static void set_Twrwr(const struct mem_controller *ctrl, const struct mem_param *param)
2282 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TWRWR_SHIFT, DTH_TWRWR_MASK,DTH_TWRWR_BASE, DTH_TWRWR_MIN, DTH_TWRWR_MAX, param->Twrwr, "Twrwr");
2286 static void set_Trdrd(const struct mem_controller *ctrl, const struct mem_param *param)
2288 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TRDRD_SHIFT, DTH_TRDRD_MASK,DTH_TRDRD_BASE, DTH_TRDRD_MIN, DTH_TRDRD_MAX, param->Trdrd, "Trdrd");
2292 static void set_DcqBypassMax(const struct mem_controller *ctrl, const struct mem_param *param)
2294 set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_DcqBypassMax_SHIFT, DCH_DcqBypassMax_MASK,DCH_DcqBypassMax_BASE, DCH_DcqBypassMax_MIN, DCH_DcqBypassMax_MAX, param->DcqByPassMax, "DcqBypassMax"); // value need to be in CMOS
2298 static void set_Tfaw(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
2300 static const uint8_t faw_1k[] = {8, 10, 13, 14};
2301 static const uint8_t faw_2k[] = {10, 14, 17, 18};
2302 unsigned memclkfreq_index;
2306 memclkfreq_index = param->dch_memclk;
2308 if (meminfo->page_1k_mask != 0) { //1k page
2309 faw = faw_1k[memclkfreq_index];
2311 faw = faw_2k[memclkfreq_index];
2314 set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_FourActWindow_SHIFT, DCH_FourActWindow_MASK, DCH_FourActWindow_BASE, DCH_FourActWindow_MIN, DCH_FourActWindow_MAX, faw, "FourActWindow");
2319 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
2325 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2326 dch &= ~(DCH_MaxAsyncLat_MASK << DCH_MaxAsyncLat_SHIFT);
2328 //FIXME: We need to use Max of DqsRcvEnDelay + 6ns here: After trainning and get that from index reg 0x10, 0x13, 0x16, 0x19, 0x30, 0x33, 0x36, 0x39
2332 dch |= ((async_lat - DCH_MaxAsyncLat_BASE) << DCH_MaxAsyncLat_SHIFT);
2333 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2337 static void set_SlowAccessMode(const struct mem_controller *ctrl)
2341 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2345 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2350 DRAM_OUTPUT_DRV_COMP_CTRL 0, 0x20
2351 DRAM_ADDR_TIMING_CTRL 04, 0x24
2353 static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info *meminfo)
2357 unsigned SlowAccessMode = 0;
2359 long dimm_mask = meminfo->dimm_mask & 0x0f;
2361 #if DIMM_SUPPORT==0x0104 /* DDR2 and REG */
2364 dwordx = 0x002f0000;
2365 switch (meminfo->memclk_set) {
2366 case DCH_MemClkFreq_266MHz:
2367 if ( (dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) {
2368 dwordx = 0x002f2700;
2371 case DCH_MemClkFreq_333MHz:
2372 if ( (dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) {
2373 if ((meminfo->single_rank_mask & 0x03)!=0x03) { //any double rank there?
2374 dwordx = 0x002f2f00;
2378 case DCH_MemClkFreq_400MHz:
2379 dwordx = 0x002f3300;
2385 #if DIMM_SUPPORT==0x0204 /* DDR2 and SO-DIMM, S1G1 */
2387 dwordx = 0x002F2F00;
2389 switch (meminfo->memclk_set) {
2390 case DCH_MemClkFreq_200MHz: /* nothing to be set here */
2392 case DCH_MemClkFreq_266MHz:
2393 if ((meminfo->single_rank_mask == 0)
2394 && (meminfo->x4_mask == 0) && (meminfo->x16_mask))
2395 dwordx = 0x002C2C00; /* Double rank x8 */
2396 /* else SRx16, SRx8, DRx16 == 0x002F2F00 */
2398 case DCH_MemClkFreq_333MHz:
2399 if ((meminfo->single_rank_mask == 1)
2400 && (meminfo->x16_mask == 1)) /* SR x16 */
2401 dwordx = 0x00272700;
2402 else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0)
2403 && (meminfo->single_rank_mask == 0)) { /* DR x8 */
2405 dwordx = 0x00002800;
2406 } else { /* SR x8, DR x16 */
2407 dwordx = 0x002A2A00;
2410 case DCH_MemClkFreq_400MHz:
2411 if ((meminfo->single_rank_mask == 1)
2412 && (meminfo->x16_mask == 1)) /* SR x16 */
2413 dwordx = 0x00292900;
2414 else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0)
2415 && (meminfo->single_rank_mask == 0)) { /* DR x8 */
2417 dwordx = 0x00002A00;
2418 } else { /* SR x8, DR x16 */
2419 dwordx = 0x002A2A00;
2425 #if DIMM_SUPPORT==0x0004 /* DDR2 and unbuffered */
2426 /* for UNBUF DIMM */
2428 dwordx = 0x002f2f00;
2429 switch (meminfo->memclk_set) {
2430 case DCH_MemClkFreq_200MHz:
2431 if (dimm_mask == 0x03) {
2436 case DCH_MemClkFreq_266MHz:
2437 if (dimm_mask == 0x03) {
2440 if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0)) {
2441 switch (meminfo->single_rank_mask) {
2443 dwordx = 0x00002f00; //x8 single Rank
2446 dwordx = 0x00342f00; //x8 double Rank
2449 dwordx = 0x00372f00; //x8 single Rank and double Rank mixed
2451 } else if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x01) && (meminfo->single_rank_mask == 0x01)) {
2452 dwordx = 0x00382f00; //x8 Double Rank and x16 single Rank mixed
2453 } else if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x02) && (meminfo->single_rank_mask == 0x02)) {
2454 dwordx = 0x00382f00; //x16 single Rank and x8 double Rank mixed
2458 if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x00) && ((meminfo->single_rank_mask == 0x01)||(meminfo->single_rank_mask == 0x02))) { //x8 single rank
2459 dwordx = 0x002f2f00;
2461 dwordx = 0x002b2f00;
2465 case DCH_MemClkFreq_333MHz:
2466 dwordx = 0x00202220;
2467 if (dimm_mask == 0x03) {
2470 if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0)) {
2471 switch (meminfo->single_rank_mask) {
2473 dwordx = 0x00302220; //x8 single Rank
2476 dwordx = 0x002b2220; //x8 double Rank
2479 dwordx = 0x002a2220; //x8 single Rank and double Rank mixed
2481 } else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0x01) && (meminfo->single_rank_mask == 0x01)) {
2482 dwordx = 0x002c2220; //x8 Double Rank and x16 single Rank mixed
2483 } else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0x02) && (meminfo->single_rank_mask == 0x02)) {
2484 dwordx = 0x002c2220; //x16 single Rank and x8 double Rank mixed
2488 case DCH_MemClkFreq_400MHz:
2489 dwordx = 0x00202520;
2491 if (dimm_mask == 0x03) {
2499 print_raminit("\tdimm_mask = ", meminfo->dimm_mask);
2500 print_raminit("\tx4_mask = ", meminfo->x4_mask);
2501 print_raminit("\tx16_mask = ", meminfo->x16_mask);
2502 print_raminit("\tsingle_rank_mask = ", meminfo->single_rank_mask);
2503 print_raminit("\tODC = ", dword);
2504 print_raminit("\tAddr Timing= ", dwordx);
2507 #if (DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
2508 if (SlowAccessMode) {
2509 set_SlowAccessMode(ctrl);
2513 /* Program the Output Driver Compensation Control Registers (Function 2:Offset 0x9c, index 0, 0x20) */
2514 pci_write_config32_index_wait(ctrl->f2, 0x98, 0, dword);
2515 if (meminfo->is_Width128) {
2516 pci_write_config32_index_wait(ctrl->f2, 0x98, 0x20, dword);
2519 /* Program the Address Timing Control Registers (Function 2:Offset 0x9c, index 4, 0x24) */
2520 pci_write_config32_index_wait(ctrl->f2, 0x98, 4, dwordx);
2521 if (meminfo->is_Width128) {
2522 pci_write_config32_index_wait(ctrl->f2, 0x98, 0x24, dwordx);
2528 static void set_RDqsEn(const struct mem_controller *ctrl,
2529 const struct mem_param *param, struct mem_info *meminfo)
2531 #if CPU_SOCKET_TYPE==0x10
2532 //only need to set for reg and x8
2535 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2538 if ((!meminfo->x4_mask) && (!meminfo->x16_mask)) {
2542 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2547 static void set_idle_cycle_limit(const struct mem_controller *ctrl,
2548 const struct mem_param *param)
2551 /* AMD says to Hardcode this */
2552 dcm = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
2553 dcm &= ~(DCM_ILD_lmt_MASK << DCM_ILD_lmt_SHIFT);
2554 dcm |= DCM_ILD_lmt_16 << DCM_ILD_lmt_SHIFT;
2556 pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dcm);
2560 static void set_RdWrQByp(const struct mem_controller *ctrl,
2561 const struct mem_param *param)
2563 set_TT(ctrl, param, DRAM_CTRL_MISC, DCM_RdWrQByp_SHIFT, DCM_RdWrQByp_MASK,0, 0, 3, 2, "RdWrQByp");
2567 static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param, long dimm_mask, struct mem_info *meminfo)
2571 for (i = 0; i < DIMM_SOCKETS; i++) {
2573 if (!(dimm_mask & (1 << i))) {
2576 print_tx("dimm socket: ", i);
2577 /* DRAM Timing Low Register */
2578 print_t("\ttrc\r\n");
2579 if ((rc = update_dimm_Trc (ctrl, param, i)) <= 0) goto dimm_err;
2581 print_t("\ttrcd\r\n");
2582 if ((rc = update_dimm_Trcd(ctrl, param, i)) <= 0) goto dimm_err;
2584 print_t("\ttrrd\r\n");
2585 if ((rc = update_dimm_Trrd(ctrl, param, i)) <= 0) goto dimm_err;
2587 print_t("\ttras\r\n");
2588 if ((rc = update_dimm_Tras(ctrl, param, i)) <= 0) goto dimm_err;
2590 print_t("\ttrp\r\n");
2591 if ((rc = update_dimm_Trp (ctrl, param, i)) <= 0) goto dimm_err;
2593 print_t("\ttrtp\r\n");
2594 if ((rc = update_dimm_Trtp(ctrl, param, i, meminfo)) <= 0) goto dimm_err;
2596 print_t("\ttwr\r\n");
2597 if ((rc = update_dimm_Twr (ctrl, param, i)) <= 0) goto dimm_err;
2599 /* DRAM Timing High Register */
2600 print_t("\ttref\r\n");
2601 if ((rc = update_dimm_Tref(ctrl, param, i)) <= 0) goto dimm_err;
2603 print_t("\ttwtr\r\n");
2604 if ((rc = update_dimm_Twtr(ctrl, param, i)) <= 0) goto dimm_err;
2606 print_t("\ttrfc\r\n");
2607 if ((rc = update_dimm_Trfc(ctrl, param, i, meminfo)) <= 0) goto dimm_err;
2609 /* DRAM Config Low */
2616 dimm_mask = disable_dimm(ctrl, i, dimm_mask, meminfo);
2619 meminfo->dimm_mask = dimm_mask; // store final dimm_mask
2621 get_extra_dimm_mask(ctrl, meminfo); // will be used by RDqsEn and dimm_x4
2622 /* DRAM Timing Low Register */
2624 /* DRAM Timing High Register */
2625 set_TrwtTO(ctrl, param);
2626 set_Twrrd (ctrl, param);
2627 set_Twrwr (ctrl, param);
2628 set_Trdrd (ctrl, param);
2630 set_4RankRDimm(ctrl, param, meminfo);
2632 /* DRAM Config High */
2633 set_Tfaw(ctrl, param, meminfo);
2634 set_DcqBypassMax(ctrl, param);
2635 set_max_async_latency(ctrl, param);
2636 set_RDqsEn(ctrl, param, meminfo);
2638 /* DRAM Config Low */
2639 set_ecc(ctrl, param, dimm_mask, meminfo);
2640 set_dimm_x4(ctrl, param, meminfo);
2641 set_DramTerm(ctrl, param, meminfo);
2643 /* DRAM Control Misc */
2644 set_idle_cycle_limit(ctrl, param);
2645 set_RdWrQByp(ctrl, param);
2650 static void sdram_set_spd_registers(const struct mem_controller *ctrl,
2651 struct sys_info *sysinfo)
2653 struct spd_set_memclk_result result;
2654 const struct mem_param *param;
2655 struct mem_param paramx;
2656 struct mem_info *meminfo;
2659 if (!sysinfo->ctrl_present[ctrl->node_id]) {
2663 meminfo = &sysinfo->meminfo[ctrl->node_id];
2665 print_debug_addr("sdram_set_spd_registers: paramx :", ¶mx);
2667 activate_spd_rom(ctrl);
2668 dimm_mask = spd_detect_dimms(ctrl);
2669 if (!(dimm_mask & ((1 << DIMM_SOCKETS) - 1))) {
2670 print_debug("No memory for this cpu\r\n");
2673 dimm_mask = spd_enable_2channels(ctrl, dimm_mask, meminfo);
2676 dimm_mask = spd_set_ram_size(ctrl , dimm_mask, meminfo);
2679 dimm_mask = spd_handle_unbuffered_dimms(ctrl, dimm_mask, meminfo);
2682 result = spd_set_memclk(ctrl, dimm_mask, meminfo);
2683 param = result.param;
2684 dimm_mask = result.dimm_mask;
2688 //store memclk set to sysinfo, incase we need rebuilt param again
2689 meminfo->memclk_set = param->dch_memclk;
2691 memcpy(¶mx, param, sizeof(paramx));
2693 paramx.divisor = get_exact_divisor(param->dch_memclk, paramx.divisor);
2695 dimm_mask = spd_set_dram_timing(ctrl, ¶mx , dimm_mask, meminfo); // dimm_mask will be stored to meminfo->dimm_mask
2699 order_dimms(ctrl, meminfo);
2703 /* Unrecoverable error reading SPD data */
2704 print_err("SPD error - reset\r\n");
2709 #define TIMEOUT_LOOPS 300000
2711 #include "raminit_f_dqs.c"
2713 #if HW_MEM_HOLE_SIZEK != 0
2714 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
2717 uint32_t carry_over;
2719 uint32_t base, limit;
2724 carry_over = (4*1024*1024) - hole_startk;
2726 for (ii=controllers - 1;ii>i;ii--) {
2727 base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
2728 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2731 limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
2732 limit += (carry_over << 2 );
2733 base += (carry_over << 2 );
2734 for (j = 0; j < controllers; j++) {
2735 pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit);
2736 pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base );
2739 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2740 limit += (carry_over << 2);
2741 for (j = 0; j < controllers; j++) {
2742 pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit);
2745 base = pci_read_config32(dev, 0x40 + (i << 3));
2746 basek = (base & 0xffff0000) >> 2;
2747 if (basek == hole_startk) {
2748 //don't need set memhole here, because hole off set will be 0, overflow
2749 //so need to change base reg instead, new basek will be 4*1024*1024
2751 base |= (4*1024*1024)<<2;
2752 for (j = 0; j < controllers; j++) {
2753 pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
2756 hoist = /* hole start address */
2757 ((hole_startk << 10) & 0xff000000) +
2758 /* hole address to memory controller address */
2759 (((basek + carry_over) >> 6) & 0x0000ff00) +
2762 pci_write_config32(dev, 0xf0, hoist);
2768 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
2771 uint32_t hole_startk;
2774 hole_startk = 4*1024*1024 - HW_MEM_HOLE_SIZEK;
2776 #if HW_MEM_HOLE_SIZE_AUTO_INC == 1
2777 /* We need to double check if the hole_startk is valid, if it is equal
2778 to basek, we need to decrease it some */
2780 for (i=0; i<controllers; i++) {
2783 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2784 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2787 base_k = (base & 0xffff0000) >> 2;
2788 if (base_k == hole_startk) {
2789 /* decrease mem hole startk to make sure it is
2790 on middle of previous node */
2791 hole_startk -= (base_k - basek_pri) >> 1;
2792 break; //only one hole
2797 /* find node index that need do set hole */
2798 for (i=0; i < controllers; i++) {
2799 uint32_t base, limit;
2800 unsigned base_k, limit_k;
2801 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2802 if ((base & ((1 << 1) | (1 << 0))) != ((1 << 1) | (1 << 0))) {
2805 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2806 base_k = (base & 0xffff0000) >> 2;
2807 limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
2808 if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
2810 hoist_memory(controllers, ctrl, hole_startk, i);
2811 end_k = memory_end_k(ctrl, controllers);
2812 set_top_mem(end_k, hole_startk);
2813 break; //only one hole
2821 static void sdram_enable(int controllers, const struct mem_controller *ctrl,
2822 struct sys_info *sysinfo)
2826 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
2827 unsigned cpu_f0_f1[8];
2828 /* FIXME: How about 32 node machine later? */
2831 print_debug_addr("sdram_enable: tsc0[8]: ", &tsc0[0]);
2835 /* Error if I don't have memory */
2836 if (memory_end_k(ctrl, controllers) == 0) {
2837 die("No memory\r\n");
2840 /* Before enabling memory start the memory clocks */
2841 for (i = 0; i < controllers; i++) {
2843 if (!sysinfo->ctrl_present[ i ])
2845 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2847 /* if no memory installed, disabled the interface */
2848 if (sysinfo->meminfo[i].dimm_mask==0x00){
2849 dch |= DCH_DisDramInterface;
2850 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2853 dch |= DCH_MemClkFreqVal;
2854 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2855 /* address timing and Output driver comp Control */
2856 set_misc_timing(ctrl+i, sysinfo->meminfo+i );
2860 /* We need to wait a mimmium of 20 MEMCLKS to enable the InitDram */
2861 memreset(controllers, ctrl);
2863 print_debug("prepare to InitDram:");
2864 for (i=0; i<10; i++) {
2865 print_debug_hex32(i);
2866 print_debug("\b\b\b\b\b\b\b\b");
2868 print_debug("\r\n");
2871 for (i = 0; i < controllers; i++) {
2873 if (!sysinfo->ctrl_present[ i ])
2875 /* Skip everything if I don't have any memory on this controller */
2876 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2877 if (!(dch & DCH_MemClkFreqVal)) {
2882 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2883 if (dcl & DCL_DimmEccEn) {
2885 print_spew("ECC enabled\r\n");
2886 mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
2888 if (dcl & DCL_Width128) {
2889 mnc |= MNC_CHIPKILL_EN;
2891 pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
2894 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
2895 cpu_f0_f1[i] = is_cpu_pre_f2_in_bsp(i);
2897 //Rev F0/F1 workaround
2899 /* Set the DqsRcvEnTrain bit */
2900 dword = pci_read_config32(ctrl[i].f2, DRAM_CTRL);
2901 dword |= DC_DqsRcvEnTrain;
2902 pci_write_config32(ctrl[i].f2, DRAM_CTRL, dword);
2909 /* Set the DqsRcvEnTrain bit */
2910 dword = pci_read_config32(ctrl[i].f2, DRAM_CTRL);
2911 dword |= DC_DqsRcvEnTrain;
2912 pci_write_config32(ctrl[i].f2, DRAM_CTRL, dword);
2915 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2916 dcl |= DCL_InitDram;
2917 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2920 for (i = 0; i < controllers; i++) {
2921 uint32_t dcl, dch, dcm;
2922 if (!sysinfo->ctrl_present[ i ])
2924 /* Skip everything if I don't have any memory on this controller */
2925 if (sysinfo->meminfo[i].dimm_mask==0x00) continue;
2927 print_debug("Initializing memory: ");
2930 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2932 if ((loops & 1023) == 0) {
2935 } while(((dcl & DCL_InitDram) != 0) && (loops < TIMEOUT_LOOPS));
2936 if (loops >= TIMEOUT_LOOPS) {
2937 print_debug(" failed\r\n");
2941 /* Wait until it is safe to touch memory */
2943 dcm = pci_read_config32(ctrl[i].f2, DRAM_CTRL_MISC);
2944 } while(((dcm & DCM_MemClrStatus) == 0) /* || ((dcm & DCM_DramEnabled) == 0)*/ );
2946 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
2950 print_debug_dqs_tsc("\r\nbegin tsc0", i, tsc0[i].hi, tsc0[i].lo, 2);
2951 print_debug_dqs_tsc("end tsc ", i, tsc.hi, tsc.lo, 2);
2953 if (tsc.lo<tsc0[i].lo) {
2956 tsc.lo -= tsc0[i].lo;
2957 tsc.hi -= tsc0[i].hi;
2959 tsc0[i].lo = tsc.lo;
2960 tsc0[i].hi = tsc.hi;
2962 print_debug_dqs_tsc(" dtsc0", i, tsc0[i].hi, tsc0[i].lo, 2);
2965 print_debug(" done\r\n");
2968 #if HW_MEM_HOLE_SIZEK != 0
2969 /* init hw mem hole here */
2970 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
2971 set_hw_mem_hole(controllers, ctrl);
2974 /* store tom to sysinfo, and it will be used by dqs_timing */
2978 msr = rdmsr(TOP_MEM);
2979 sysinfo->tom_k = ((msr.hi<<24) | (msr.lo>>8))>>2;
2982 msr = rdmsr(TOP_MEM2);
2983 sysinfo->tom2_k = ((msr.hi<<24)| (msr.lo>>8))>>2;
2986 for (i = 0; i < controllers; i++) {
2987 sysinfo->mem_trained[i] = 0;
2989 if (!sysinfo->ctrl_present[ i ])
2992 /* Skip everything if I don't have any memory on this controller */
2993 if (sysinfo->meminfo[i].dimm_mask==0x00)
2996 sysinfo->mem_trained[i] = 0x80; // mem need to be trained
3000 #if MEM_TRAIN_SEQ == 0
3001 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
3002 dqs_timing(controllers, ctrl, tsc0, sysinfo);
3004 dqs_timing(controllers, ctrl, sysinfo);
3008 #if MEM_TRAIN_SEQ == 2
3009 /* need to enable mtrr, so dqs training could access the test address */
3010 setup_mtrr_dqs(sysinfo->tom_k, sysinfo->tom2_k);
3013 for (i = 0; i < controllers; i++) {
3014 /* Skip everything if I don't have any memory on this controller */
3015 if (sysinfo->mem_trained[i]!=0x80)
3018 dqs_timing(i, &ctrl[i], sysinfo, 1);
3020 #if MEM_TRAIN_SEQ == 1
3021 break; // only train the first node with ram
3025 #if MEM_TRAIN_SEQ == 2
3026 clear_mtrr_dqs(sysinfo->tom2_k);
3031 #if MEM_TRAIN_SEQ != 1
3032 wait_all_core0_mem_trained(sysinfo);
3038 static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
3039 const uint16_t *spd_addr)
3043 struct mem_controller *ctrl;
3044 for (i=0;i<controllers; i++) {
3047 ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
3048 ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
3049 ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
3050 ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
3052 if (spd_addr == (void *)0) continue;
3054 for (j=0;j<DIMM_SOCKETS;j++) {
3055 ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
3056 ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];