1 /* This should be done by Eric
2 2004.11 yhlu add 4 rank DIMM support
3 2004.12 yhlu add D0 support
4 2005.02 yhlu add E0 memory hole support
5 2005.10 yhlu make it support DDR2 only
8 #include <cpu/x86/mem.h>
9 #include <cpu/x86/cache.h>
10 #include <cpu/x86/mtrr.h>
11 #include <cpu/x86/tsc.h>
17 #ifndef QRANK_DIMM_SUPPORT
18 #define QRANK_DIMM_SUPPORT 0
21 static inline void print_raminit(const char *strval, uint32_t val)
24 printk_debug("%s:%08x\r\n", strval, val);
26 print_debug(strval); print_debug_hex32(val); print_debug("\r\n");
30 #define RAM_TIMING_DEBUG 0
32 static inline void print_tx(const char *strval, uint32_t val)
34 #if RAM_TIMING_DEBUG == 1
35 print_raminit(strval, val);
40 static inline void print_t(const char *strval)
42 #if RAM_TIMING_DEBUG == 1
49 #if (CONFIG_LB_MEM_TOPK & (CONFIG_LB_MEM_TOPK -1)) != 0
50 # error "CONFIG_LB_MEM_TOPK must be a power of 2"
53 #include "amdk8_f_pci.c"
56 // for PCI_ADDR(0, 0x18, 2, 0x98) index, and PCI_ADDR(0x, 0x18, 2, 0x9c) data
59 [29: 0] DctOffset (Dram Controller Offset)
60 [30:30] DctAccessWrite (Dram Controller Read/Write Select)
63 [31:31] DctAccessDone (Dram Controller Access Done)
64 0 = Access in progress
65 1 = No access is progress
68 [31: 0] DctOffsetData (Dram Controller Offset Data)
71 - Write the register num to DctOffset with DctAccessWrite = 0
72 - poll the DctAccessDone until it = 1
73 - Read the data from DctOffsetData
75 - Write the data to DctOffsetData
76 - Write register num to DctOffset with DctAccessWrite = 1
77 - poll the DctAccessDone untio it = 1
83 static void setup_resource_map(const unsigned int *register_values, int max)
87 for(i = 0; i < max; i += 3) {
92 dev = register_values[i] & ~0xff;
93 where = register_values[i] & 0xff;
94 reg = pci_read_config32(dev, where);
95 reg &= register_values[i+1];
96 reg |= register_values[i+2];
97 pci_write_config32(dev, where, reg);
102 static int controller_present(const struct mem_controller *ctrl)
104 return pci_read_config32(ctrl->f0, 0) == 0x11001022;
107 static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
109 static const unsigned int register_values[] = {
111 /* Careful set limit registers before base registers which contain the enables */
112 /* DRAM Limit i Registers
121 * [ 2: 0] Destination Node ID
131 * [10: 8] Interleave select
132 * specifies the values of A[14:12] to use with interleave enable.
134 * [31:16] DRAM Limit Address i Bits 39-24
135 * This field defines the upper address bits of a 40 bit address
136 * that define the end of the DRAM region.
138 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
139 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
140 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
141 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
142 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
143 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
144 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
145 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
146 /* DRAM Base i Registers
155 * [ 0: 0] Read Enable
158 * [ 1: 1] Write Enable
159 * 0 = Writes Disabled
162 * [10: 8] Interleave Enable
163 * 000 = No interleave
164 * 001 = Interleave on A[12] (2 nodes)
166 * 011 = Interleave on A[12] and A[14] (4 nodes)
170 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
172 * [13:16] DRAM Base Address i Bits 39-24
173 * This field defines the upper address bits of a 40-bit address
174 * that define the start of the DRAM region.
176 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
177 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
178 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
179 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
180 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
181 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
182 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
183 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
185 /* DRAM CS Base Address i Registers
194 * [ 0: 0] Chip-Select Bank Enable
198 * [ 2: 2] Memory Test Failed
200 * [13: 5] Base Address (21-13)
201 * An optimization used when all DIMM are the same size...
203 * [28:19] Base Address (36-27)
204 * This field defines the top 11 addresses bit of a 40-bit
205 * address that define the memory address space. These
206 * bits decode 32-MByte blocks of memory.
209 PCI_ADDR(0, 0x18, 2, 0x40), 0xe007c018, 0x00000000,
210 PCI_ADDR(0, 0x18, 2, 0x44), 0xe007c018, 0x00000000,
211 PCI_ADDR(0, 0x18, 2, 0x48), 0xe007c018, 0x00000000,
212 PCI_ADDR(0, 0x18, 2, 0x4C), 0xe007c018, 0x00000000,
213 PCI_ADDR(0, 0x18, 2, 0x50), 0xe007c018, 0x00000000,
214 PCI_ADDR(0, 0x18, 2, 0x54), 0xe007c018, 0x00000000,
215 PCI_ADDR(0, 0x18, 2, 0x58), 0xe007c018, 0x00000000,
216 PCI_ADDR(0, 0x18, 2, 0x5C), 0xe007c018, 0x00000000,
217 /* DRAM CS Mask Address i Registers
222 * Select bits to exclude from comparison with the DRAM Base address register.
224 * [13: 5] Address Mask (21-13)
225 * Address to be excluded from the optimized case
227 * [28:19] Address Mask (36-27)
228 * The bits with an address mask of 1 are excluded from address comparison
232 PCI_ADDR(0, 0x18, 2, 0x60), 0xe007c01f, 0x00000000,
233 PCI_ADDR(0, 0x18, 2, 0x64), 0xe007c01f, 0x00000000,
234 PCI_ADDR(0, 0x18, 2, 0x68), 0xe007c01f, 0x00000000,
235 PCI_ADDR(0, 0x18, 2, 0x6C), 0xe007c01f, 0x00000000,
237 /* DRAM Control Register
239 * [ 3: 0] RdPtrInit ( Read Pointer Initial Value)
240 * 0x03-0x00: reserved
241 * [ 6: 4] RdPadRcvFifoDly (Read Delay from Pad Receive FIFO)
244 * 010 = 1.5 Memory Clocks
245 * 011 = 2 Memory Clocks
246 * 100 = 2.5 Memory Clocks
247 * 101 = 3 Memory Clocks
248 * 110 = 3.5 Memory Clocks
251 * [16:16] AltVidC3MemClkTriEn (AltVID Memory Clock Tristate Enable)
252 * Enables the DDR memory clocks to be tristated when alternate VID mode is enabled. This bit has no effect if the DisNbClkRamp bit (F3, 0x88) is set
253 * [17:17] DllTempAdjTime (DLL Temperature Adjust Cycle Time)
256 * [18:18] DqsRcvEnTrain (DQS Receiver Enable Training Mode)
257 * 0 = Normal DQS Receiver enable operation
258 * 1 = DQS receiver enable training mode
261 PCI_ADDR(0, 0x18, 2, 0x78), 0xfff80000, (6<<4)|(6<<0),
263 /* DRAM Initialization Register
265 * [15: 0] MrsAddress (Address for MRS/EMRS Commands)
266 * this field specifies the dsata driven on the DRAM address pins 15-0 for MRS and EMRS commands
267 * [18:16] MrsBank (Bank Address for MRS/EMRS Commands)
268 * this files specifies the data driven on the DRAM bank pins for the MRS and EMRS commands
270 * [24:24] SendPchgAll (Send Precharge All Command)
271 * Setting this bit causes the DRAM controller to send a precharge all command. This bit is cleared by the hardware after the command completes
272 * [25:25] SendAutoRefresh (Send Auto Refresh Command)
273 * Setting this bit causes the DRAM controller to send an auto refresh command. This bit is cleared by the hardware after the command completes
274 * [26:26] SendMrsCmd (Send MRS/EMRS Command)
275 * Setting this bit causes the DRAM controller to send the MRS or EMRS command defined by the MrsAddress and MrsBank fields. This bit is cleared by the hardware adter the commmand completes
276 * [27:27] DeassertMemRstX (De-assert Memory Reset)
277 * Setting this bit causes the DRAM controller to de-assert the memory reset pin. This bit cannot be used to assert the memory reset pin
278 * [28:28] AssertCke (Assert CKE)
279 * setting this bit causes the DRAM controller to assert the CKE pins. This bit cannot be used to de-assert the CKE pins
281 * [31:31] EnDramInit (Enable DRAM Initialization)
282 * Setting this bit puts the DRAM controller in a BIOS controlled DRAM initialization mode. BIOS must clear this bit aster DRAM initialization is complete.
284 // PCI_ADDR(0, 0x18, 2, 0x7C), 0x60f80000, 0,
287 /* DRAM Bank Address Mapping Register
289 * Specify the memory module size
309 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff0000, 0x00000000,
310 /* DRAM Timing Low Register
312 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
322 * [ 5: 4] Trcd (Ras#-active to Cas# read/write delay)
328 * [ 9: 8] Trp (Row Precharge Time, Precharge-to-Active or Auto-Refresh)
334 * [11:11] Trtp (Read to Precharge Time, read Cas# to precharge time)
335 * 0 = 2 clocks for Burst Length of 32 Bytes
336 * 4 clocks for Burst Length of 64 Bytes
337 * 1 = 3 clocks for Burst Length of 32 Bytes
338 * 5 clocks for Burst Length of 64 Bytes
339 * [15:12] Tras (Minimum Ras# Active Time)
342 * 0010 = 5 bus clocks
344 * 1111 = 18 bus clocks
345 * [19:16] Trc (Row Cycle Time, Ras#-active to Ras#-active or auto refresh of the same bank)
346 * 0000 = 11 bus clocks
347 * 0010 = 12 bus clocks
349 * 1110 = 25 bus clocks
350 * 1111 = 26 bus clocks
351 * [21:20] Twr (Write Recovery Time, From the last data to precharge, writes can go back-to-back)
356 * [23:22] Trrd (Active-to-active (Ras#-to-Ras#) Delay of different banks)
361 * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel A, BIOS should set it to reduce the power consumption)
362 * Bit F(1207) M2 Package S1g1 Package
364 * 1 N/A MA0_CLK1 MA0_CLK1
367 * 4 MA1_CLK MA1_CLK0 N/A
368 * 5 MA0_CLK MA0_CLK0 MA0_CLK0
370 * 7 N/A MA0_CLK2 MA0_CLK2
372 PCI_ADDR(0, 0x18, 2, 0x88), 0x000004c8, 0xff000002 /* 0x03623125 */ ,
373 /* DRAM Timing High Register
376 * [ 6: 4] TrwtTO (Read-to-Write Turnaround for Data, DQS Contention)
386 * [ 9: 8] Twtr (Internal DRAM Write-to-Read Command Delay, minium write-to-read delay when both access the same chip select)
391 * [11:10] Twrrd (Write to Read DIMM Termination Turnaround, minimum write-to-read delay when accessing two different DIMMs)
396 * [13:12] Twrwr (Write to Write Timing)
397 * 00 = 1 bus clocks ( 0 idle cycle on the bus)
398 * 01 = 2 bus clocks ( 1 idle cycle on the bus)
399 * 10 = 3 bus clocks ( 2 idle cycles on the bus)
401 * [15:14] Trdrd ( Read to Read Timing)
402 * 00 = 2 bus clocks ( 1 idle cycle on the bus)
403 * 01 = 3 bus clocks ( 2 idle cycles on the bus)
404 * 10 = 4 bus clocks ( 3 idle cycles on the bus)
405 * 11 = 5 bus clocks ( 4 idel cycles on the bus)
406 * [17:16] Tref (Refresh Rate)
407 * 00 = Undefined behavior
409 * 10 = Refresh interval of 7.8 microseconds
410 * 11 = Refresh interval of 3.9 microseconds
412 * [22:20] Trfc0 ( Auto-Refresh Row Cycle Time for the Logical DIMM0, based on DRAM density and speed)
413 * 000 = 75 ns (all speeds, 256Mbit)
414 * 001 = 105 ns (all speeds, 512Mbit)
415 * 010 = 127.5 ns (all speeds, 1Gbit)
416 * 011 = 195 ns (all speeds, 2Gbit)
417 * 100 = 327.5 ns (all speeds, 4Gbit)
421 * [25:23] Trfc1 ( Auto-Refresh Row Cycle Time for the Logical DIMM1, based on DRAM density and speed)
422 * [28:26] Trfc2 ( Auto-Refresh Row Cycle Time for the Logical DIMM2, based on DRAM density and speed)
423 * [31:29] Trfc3 ( Auto-Refresh Row Cycle Time for the Logical DIMM3, based on DRAM density and speed)
425 PCI_ADDR(0, 0x18, 2, 0x8c), 0x000c008f, (2 << 16)|(1 << 8),
426 /* DRAM Config Low Register
428 * [ 0: 0] InitDram (Initialize DRAM)
429 * 1 = write 1 cause DRAM controller to execute the DRAM initialization, when done it read to 0
430 * [ 1: 1] ExitSelfRef ( Exit Self Refresh Command )
431 * 1 = write 1 causes the DRAM controller to bring the DRAMs out fo self refresh mode
433 * [ 5: 4] DramTerm (DRAM Termination)
434 * 00 = On die termination disabled
439 * [ 7: 7] DramDrvWeak ( DRAM Drivers Weak Mode)
440 * 0 = Normal drive strength mode.
441 * 1 = Weak drive strength mode
442 * [ 8: 8] ParEn (Parity Enable)
443 * 1 = Enable address parity computation output, PAR, and enables the parity error input, ERR
444 * [ 9: 9] SelfRefRateEn (Faster Self Refresh Rate Enable)
445 * 1 = Enable high temperature ( two times normal ) self refresh rate
446 * [10:10] BurstLength32 ( DRAM Burst Length Set for 32 Bytes)
449 * [11:11] Width128 ( Width of DRAM interface)
450 * 0 = the controller DRAM interface is 64-bits wide
451 * 1 = the controller DRAM interface is 128-bits wide
452 * [12:12] X4Dimm (DIMM 0 is x4)
453 * [13:13] X4Dimm (DIMM 1 is x4)
454 * [14:14] X4Dimm (DIMM 2 is x4)
455 * [15:15] X4Dimm (DIMM 3 is x4)
457 * 1 = x4 DIMM present
458 * [16:16] UnBuffDimm ( Unbuffered DIMMs)
460 * 1 = Unbuffered DIMMs
462 * [19:19] DimmEccEn ( DIMM ECC Enable )
463 1 = ECC checking is being enabled for all DIMMs on the DRAM controller ( Through F3 0x44[EccEn])
466 PCI_ADDR(0, 0x18, 2, 0x90), 0xfff6004c, 0x00000010,
467 /* DRAM Config High Register
469 * [ 0: 2] MemClkFreq ( Memory Clock Frequency)
475 * [ 3: 3] MemClkFreqVal (Memory Clock Freqency Valid)
476 * 1 = BIOS need to set the bit when setting up MemClkFreq to the proper value
477 * [ 7: 4] MaxAsyncLat ( Maximum Asynchronous Latency)
482 * [12:12] RDqsEn ( Read DQS Enable) This bit is only be set if x8 registered DIMMs are present in the system
483 * 0 = DM pins function as data mask pins
484 * 1 = DM pins function as read DQS pins
486 * [14:14] DisDramInterface ( Disable the DRAM interface ) When this bit is set, the DRAM controller is disabled, and interface in low power state
487 * 0 = Enabled (default)
489 * [15:15] PowerDownEn ( Power Down Mode Enable )
490 * 0 = Disabled (default)
492 * [16:16] PowerDown ( Power Down Mode )
493 * 0 = Channel CKE Control
494 * 1 = Chip Select CKE Control
495 * [17:17] FourRankSODimm (Four Rank SO-DIMM)
496 * 1 = this bit is set by BIOS to indicate that a four rank SO-DIMM is present
497 * [18:18] FourRankRDimm (Four Rank Registered DIMM)
498 * 1 = this bit is set by BIOS to indicate that a four rank registered DIMM is present
500 * [20:20] SlowAccessMode (Slow Access Mode (2T Mode))
501 * 0 = DRAM address and control signals are driven for one MEMCLK cycle
502 * 1 = One additional MEMCLK of setup time is provided on all DRAM address and control signals except CS, CKE, and ODT; i.e., these signals are drivern for two MEMCLK cycles rather than one
504 * [22:22] BankSwizzleMode ( Bank Swizzle Mode),
505 * 0 = Disabled (default)
508 * [27:24] DcqBypassMax ( DRAM Controller Queue Bypass Maximum)
509 * 0000 = No bypass; the oldest request is never bypassed
510 * 0001 = The oldest request may be bypassed no more than 1 time
512 * 1111 = The oldest request may be bypassed no more than 15 times
513 * [31:28] FourActWindow ( Four Bank Activate Window) , not more than 4 banks in a 8 bank device are activated
514 * 0000 = No tFAW window restriction
515 * 0001 = 8 MEMCLK cycles
516 * 0010 = 9 MEMCLK cycles
518 * 1101 = 20 MEMCLK cycles
521 PCI_ADDR(0, 0x18, 2, 0x94), 0x00a82f00,0x00008000,
522 /* DRAM Delay Line Register
524 * [ 0: 0] MemClrStatus (Memory Clear Status) : ---------Readonly
525 * when set, this bit indicates that the memory clear function is complete. Only clear by reset. BIOS should not write or read the DRAM until this bit is set by hardware
526 * [ 1: 1] DisableJitter ( Disable Jitter)
527 * When set the DDR compensation circuit will not change the values unless the change is more than one step from the current value
528 * [ 3: 2] RdWrQByp ( Read/Write Queue Bypass Count)
533 * [ 4: 4] Mode64BitMux (Mismatched DIMM Support Enable)
534 * 1 When bit enables support for mismatched DIMMs when using 128-bit DRAM interface, the Width128 no effect, only for M2 and s1g1
535 * [ 5: 5] DCC_EN ( Dynamica Idle Cycle Counter Enable)
536 * When set to 1, indicates that each entry in the page tables dynamically adjusts the idle cycle limit based on page Conflict/Page Miss (PC/PM) traffic
537 * [ 8: 6] ILD_lmt ( Idle Cycle Limit)
546 * [ 9: 9] DramEnabled ( DRAM Enabled)
547 * When Set, this bit indicates that the DRAM is enabled, this bit is set by hardware after DRAM initialization or on an exit from self refresh. The DRAM controller is intialized after the
548 * hardware-controlled initialization process ( initiated by the F2 0x90[DramInit]) completes or when the BIOS-controlled initialization process completes (F2 0x7c(EnDramInit] is
549 * written from 1 to 0)
551 * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel B, BIOS should set it to reduce the power consumption)
552 * Bit F(1207) M2 Package S1g1 Package
554 * 1 N/A MA0_CLK1 MA0_CLK1
557 * 4 MA1_CLK MA1_CLK0 N/A
558 * 5 MA0_CLK MA0_CLK0 MA0_CLK0
560 * 7 N/A MA0_CLK2 MA0_CLK2
562 PCI_ADDR(0, 0x18, 2, 0xa0), 0x00fffc00, 0xff000000,
564 /* DRAM Scrub Control Register
566 * [ 4: 0] DRAM Scrube Rate
568 * [12: 8] L2 Scrub Rate
570 * [20:16] Dcache Scrub
573 * 00000 = Do not scrub
595 * All Others = Reserved
597 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
598 /* DRAM Scrub Address Low Register
600 * [ 0: 0] DRAM Scrubber Redirect Enable
602 * 1 = Scrubber Corrects errors found in normal operation
604 * [31: 6] DRAM Scrub Address 31-6
606 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
607 /* DRAM Scrub Address High Register
609 * [ 7: 0] DRAM Scrubb Address 39-32
612 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
614 // for PCI_ADDR(0, 0x18, 2, 0x98) index, and PCI_ADDR(0x, 0x18, 2, 0x9c) data
617 [29: 0] DctOffset (Dram Controller Offset)
618 [30:30] DctAccessWrite (Dram Controller Read/Write Select)
621 [31:31] DctAccessDone (Dram Controller Access Done)
622 0 = Access in progress
623 1 = No access is progress
626 [31: 0] DctOffsetData (Dram Controller Offset Data)
629 - Write the register num to DctOffset with DctAccessWrite = 0
630 - poll the DctAccessDone until it = 1
631 - Read the data from DctOffsetData
633 - Write the data to DctOffsetData
634 - Write register num to DctOffset with DctAccessWrite = 1
635 - poll the DctAccessDone untio it = 1
639 static const unsigned int index_register_values[] = {
640 /* Output Driver Compensation Control Register
642 * [ 1: 0] CkeDrvStren (CKE Drive Strength)
645 * 10 = 1.5x (Default)
648 * [ 5: 4] CsOdtDrvStren (CS/ODT Drive Strength)
651 * 10 = 1.5x (Default)
654 * [ 9: 8] AddrCmdDrvStren (Address/Command Drive Strength)
657 * 10 = 1.5x (Default)
660 * [13:12] ClkDrvStren (MEMCLK Drive Strength)
666 * [17:16] DataDrvStren (Data Drive Strength)
672 * [21:20] DqsDrvStren (DQS Drive Strength)
678 * [29:28] ProcOdt ( Processor On-die Termination)
679 * 00 = 300 ohms +/- 20%
680 * 01 = 150 ohms +/- 20%
681 * 10 = 75 ohms +/- 20%
685 0x00, 0xcfcccccc, 0x00000000,
686 0x20, 0xcfcccccc, 0x00000000,
687 /* Write Data Timing Low Control Register
689 * [ 5: 0] WrDatTimeByte0 (Write Data Byte 0 Timing Control)
691 * 000001 = 1/96 MEMCLK delay
692 * 000010 = 2/96 MEMCLK delay
694 * 101111 = 47/96 MEMCLK delay
697 * [13: 8] WrDatTimeByte1 (Write Data Byte 1 Timing Control)
699 * [21:16] WrDatTimeByte2 (Write Data Byte 2 Timing Control)
701 * [29:24] WrDatTimeByte3 (Write Data Byte 3 Timing Control)
704 0x01, 0xc0c0c0c0, 0x00000000,
705 0x21, 0xc0c0c0c0, 0x00000000,
706 /* Write Data Timing High Control Register
708 * [ 5: 0] WrDatTimeByte4 (Write Data Byte 4 Timing Control)
710 * [13: 8] WrDatTimeByte5 (Write Data Byte 5 Timing Control)
712 * [21:16] WrDatTimeByte6 (Write Data Byte 6 Timing Control)
714 * [29:24] WrDatTimeByte7 (Write Data Byte 7 Timing Control)
717 0x02, 0xc0c0c0c0, 0x00000000,
718 0x22, 0xc0c0c0c0, 0x00000000,
720 /* Write Data ECC Timing Control Register
722 * [ 5: 0] WrChkTime (Write Data ECC Timing Control)
724 * 000001 = 1/96 MEMCLK delay
725 * 000010 = 2/96 MEMCLK delay
727 * 101111 = 47/96 MEMCLK delay
731 0x03, 0x000000c0, 0x00000000,
732 0x23, 0x000000c0, 0x00000000,
734 /* Address Timing Control Register
736 * [ 4: 0] CkeFineDelay (CKE Fine Delay)
738 * 00001 = 1/64 MEMCLK delay
739 * 00010 = 2/64 MEMCLK delay
741 * 11111 = 31/64 MEMCLK delay
742 * [ 5: 5] CkeSetup (CKE Setup Time)
746 * [12: 8] CsOdtFineDelay (CS/ODT Fine Delay)
748 * 00001 = 1/64 MEMCLK delay
749 * 00010 = 2/64 MEMCLK delay
751 * 11111 = 31/64 MEMCLK delay
752 * [13:13] CsOdtSetup (CS/ODT Setup Time)
756 * [20:16] AddrCmdFineDelay (Address/Command Fine Delay)
758 * 00001 = 1/64 MEMCLK delay
759 * 00010 = 2/64 MEMCLK delay
761 * 11111 = 31/64 MEMCLK delay
762 * [21:21] AddrCmdSetup (Address/Command Setup Time)
767 0x04, 0xffc0c0c0, 0x00000000,
768 0x24, 0xffc0c0c0, 0x00000000,
770 /* Read DQS Timing Low Control Register
772 * [ 5: 0] RdDqsTimeByte0 (Read DQS Byte 0 Timing Control)
774 * 000001 = 1/96 MEMCLK delay
775 * 000010 = 2/96 MEMCLK delay
777 * 101111 = 47/96 MEMCLK delay
780 * [13: 8] RdDqsTimeByte1 (Read DQS Byte 1 Timing Control)
782 * [21:16] RdDqsTimeByte2 (Read DQS Byte 2 Timing Control)
784 * [29:24] RdDqsTimeByte3 (Read DQS Byte 3 Timing Control)
787 0x05, 0xc0c0c0c0, 0x00000000,
788 0x25, 0xc0c0c0c0, 0x00000000,
790 /* Read DQS Timing High Control Register
792 * [ 5: 0] RdDqsTimeByte4 (Read DQS Byte 4 Timing Control)
794 * [13: 8] RdDqsTimeByte5 (Read DQS Byte 5 Timing Control)
796 * [21:16] RdDqsTimeByte6 (Read DQS Byte 6 Timing Control)
798 * [29:24] RdDqsTimeByte7 (Read DQS Byte 7 Timing Control)
801 0x06, 0xc0c0c0c0, 0x00000000,
802 0x26, 0xc0c0c0c0, 0x00000000,
804 /* Read DQS ECC Timing Control Register
806 * [ 5: 0] RdDqsTimeCheck (Read DQS ECC Timing Control)
808 * 000001 = 1/96 MEMCLK delay
809 * 000010 = 2/96 MEMCLK delay
811 * 101111 = 47/96 MEMCLK delay
815 0x07, 0x000000c0, 0x00000000,
816 0x27, 0x000000c0, 0x00000000,
818 /* DQS Receiver Enable Timing Control Register
819 * Index 0x10, 0x13, 0x16, 0x19,
820 * [ 7: 0] Dqs RcvEnDelay (DQS Receiver Enable Delay)
826 * 0xaf-0xff = reserved
829 0x10, 0x000000ff, 0x00000000,
830 0x13, 0x000000ff, 0x00000000,
831 0x16, 0x000000ff, 0x00000000,
832 0x19, 0x000000ff, 0x00000000,
833 0x30, 0x000000ff, 0x00000000,
834 0x33, 0x000000ff, 0x00000000,
835 0x36, 0x000000ff, 0x00000000,
836 0x39, 0x000000ff, 0x00000000,
844 if (!controller_present(ctrl)) {
845 // print_debug("No memory controller present\r\n");
846 sysinfo->ctrl_present[ctrl->node_id] = 0;
850 sysinfo->ctrl_present[ctrl->node_id] = 1;
852 print_spew("setting up CPU");
853 print_spew_hex8(ctrl->node_id);
854 print_spew(" northbridge registers\r\n");
855 max = sizeof(register_values)/sizeof(register_values[0]);
856 for(i = 0; i < max; i += 3) {
860 dev = (register_values[i] & ~0xff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
861 where = register_values[i] & 0xff;
862 reg = pci_read_config32(dev, where);
863 reg &= register_values[i+1];
864 reg |= register_values[i+2];
865 pci_write_config32(dev, where, reg);
870 max = sizeof(index_register_values)/sizeof(index_register_values[0]);
871 for(i = 0; i < max; i += 3) {
874 index = register_values[i];
875 reg = pci_read_config32_index_wait(ctrl->f2, DRAM_CTRL_ADDI_DATA_OFFSET, index);
876 reg &= register_values[i+1];
877 reg |= register_values[i+2];
878 pci_write_config32_index_wait(ctrl->f2, DRAM_CTRL_ADDI_DATA_OFFSET, index, reg);
882 print_spew("done.\r\n");
885 static int is_dual_channel(const struct mem_controller *ctrl)
888 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
889 return dcl & DCL_Width128;
892 static int is_registered(const struct mem_controller *ctrl)
894 /* Test to see if we are dealing with registered SDRAM.
895 * If we are not registered we are unbuffered.
896 * This function must be called after spd_handle_unbuffered_dimms.
899 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
900 return !(dcl & DCL_UnBuffDimm);
903 static void spd_get_dimm_size(unsigned device, struct dimm_size *sz)
905 /* Calculate the log base 2 size of a DIMM in bits */
912 value = spd_read_byte(device, SPD_ROW_NUM); /* rows */
913 if (value < 0) goto hw_err;
914 if ((value & 0xff) == 0) goto val_err; // max is 16 ?
915 sz->per_rank += value & 0xff;
916 sz->rows = value & 0xff;
918 value = spd_read_byte(device, SPD_COL_NUM); /* columns */
919 if (value < 0) goto hw_err;
920 if ((value & 0xff) == 0) goto val_err; //max is 11
921 sz->per_rank += value & 0xff;
922 sz->col = value & 0xff;
924 value = spd_read_byte(device, SPD_BANK_NUM); /* banks */
925 if (value < 0) goto hw_err;
926 if ((value & 0xff) == 0) goto val_err;
927 sz->bank = log2(value & 0xff); // convert 4 to 2, and 8 to 3
928 sz->per_rank += sz->bank;
930 /* Get the module data width and convert it to a power of two */
931 value = spd_read_byte(device, SPD_DATA_WIDTH);
932 if (value < 0) goto hw_err;
934 if ((value != 72) && (value != 64)) goto val_err;
935 sz->per_rank += log2(value) - 3; //64 bit So another 3 lines
937 /* How many ranks? */
938 value = spd_read_byte(device, SPD_MOD_ATTRIB_RANK); /* number of physical banks */
939 if (value < 0) goto hw_err;
940 // value >>= SPD_MOD_ATTRIB_RANK_NUM_SHIFT;
941 value &= SPD_MOD_ATTRIB_RANK_NUM_MASK;
942 value += SPD_MOD_ATTRIB_RANK_NUM_BASE; // 0-->1, 1-->2, 3-->4
944 rank == 1 only one rank or say one side
945 rank == 2 two side , and two ranks
946 rank == 4 two side , and four ranks total
947 Some one side two ranks, because of stacked
949 if ((value != 1) && (value != 2) && (value != 4 )) {
954 /* verify if per_rank is equal byte 31
955 it has the DIMM size as a multiple of 128MB.
957 value = spd_read_byte(device, SPD_RANK_SIZE);
958 if (value < 0) goto hw_err;
961 if(value <=4 ) value += 8; // add back to 1G to high
962 value += (27-5); // make 128MB to the real lines
963 if( value != (sz->per_rank)) {
964 print_err("Bad RANK Size --\r\n");
971 die("Bad SPD value\r\n");
972 /* If an hw_error occurs report that I have no memory */
983 static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size *sz, unsigned index, int is_Width128)
985 uint32_t base0, base1;
988 /* For each base register.
989 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
990 * The initialize dimm size is in bits.
991 * Set the base enable bit0.
996 /* Make certain side1 of the dimm is at least 128MB */
997 if (sz->per_rank >= 27) {
998 base0 = (1 << ((sz->per_rank - 27 ) + 19)) | 1;
1001 /* Make certain side2 of the dimm is at least 128MB */
1002 if (sz->rank > 1) { // 2 ranks or 4 ranks
1003 base1 = (1 << ((sz->per_rank - 27 ) + 19)) | 1;
1006 /* Double the size if we are using dual channel memory */
1008 base0 = (base0 << 1) | (base0 & 1);
1009 base1 = (base1 << 1) | (base1 & 1);
1012 /* Clear the reserved bits */
1013 base0 &= ~0xe007fffe;
1014 base1 &= ~0xe007fffe;
1016 /* Set the appropriate DIMM base address register */
1017 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
1018 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
1019 #if QRANK_DIMM_SUPPORT == 1
1021 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
1022 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
1026 /* Enable the memory clocks for this DIMM by Clear the MemClkDis bit*/
1030 #if CPU_SOCKET_TYPE == 0x10 /* L1 */
1031 ClkDis0 = DTL_MemClkDis0;
1033 #if CPU_SOCKET_TYPE == 0x11 /* AM2 */
1034 ClkDis0 = DTL_MemClkDis0_AM2;
1038 dword = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW); //Channel A
1039 dword &= ~(ClkDis0 >> index);
1040 #if QRANK_DIMM_SUPPORT == 1
1042 dword &= ~(ClkDis0 >> (index+2));
1045 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dword);
1047 if (is_Width128) { //Channel B
1048 dword = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
1049 dword &= ~(ClkDis0 >> index);
1050 #if QRANK_DIMM_SUPPORT == 1
1052 dword &= ~(ClkDis0 >> (index+2));
1055 pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dword);
1061 /* row col bank for 64 bit
1076 static void set_dimm_cs_map(const struct mem_controller *ctrl, struct dimm_size *sz, unsigned index)
1078 static const uint8_t cs_map_aaa[24] = {
1079 /* (bank=2, row=13, col=9)(3, 16, 11) ---> (0, 0, 0) (1, 3, 2) */
1094 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
1095 map &= ~(0xf << (index * 4));
1096 #if QRANK_DIMM_SUPPORT == 1
1098 map &= ~(0xf << ( (index + 2) * 4));
1102 /* Make certain side1 of the dimm is at least 128MB */
1103 if (sz->per_rank >= 27) {
1105 temp_map = cs_map_aaa[(sz->bank-2)*3*4 + (sz->rows - 13)*3 + (sz->col - 9) ];
1106 map |= temp_map << (index*4);
1107 #if QRANK_DIMM_SUPPORT == 1
1109 map |= temp_map << ( (index + 2) * 4);
1114 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
1118 static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask, struct mem_info *meminfo)
1122 for(i = 0; i < DIMM_SOCKETS; i++) {
1123 struct dimm_size *sz = &(meminfo->sz[i]);
1124 if (!(dimm_mask & (1 << i))) {
1127 spd_get_dimm_size(ctrl->channel0[i], sz);
1128 if (sz->per_rank == 0) {
1129 return -1; /* Report SPD error */
1131 set_dimm_size(ctrl, sz, i, meminfo->is_Width128);
1132 set_dimm_cs_map (ctrl, sz, i);
1137 static void route_dram_accesses(const struct mem_controller *ctrl,
1138 unsigned long base_k, unsigned long limit_k)
1140 /* Route the addresses to the controller node */
1145 unsigned limit_reg, base_reg;
1148 node_id = ctrl->node_id;
1149 index = (node_id << 3);
1150 limit = (limit_k << 2);
1151 limit &= 0xffff0000;
1152 limit -= 0x00010000;
1153 limit |= ( 0 << 8) | (node_id << 0);
1154 base = (base_k << 2);
1156 base |= (0 << 8) | (1<<1) | (1<<0);
1158 limit_reg = 0x44 + index;
1159 base_reg = 0x40 + index;
1160 for(device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1); device += PCI_DEV(0, 1, 0)) {
1161 pci_write_config32(device, limit_reg, limit);
1162 pci_write_config32(device, base_reg, base);
1166 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
1168 /* Error if I don't have memory */
1173 /* Report the amount of memory. */
1174 print_debug("RAM: 0x");
1175 print_debug_hex32(tom_k);
1176 print_debug(" KB\r\n");
1179 if(tom_k > (4*1024*1024)) {
1180 /* Now set top of memory */
1181 msr.lo = (tom_k & 0x003fffff) << 10;
1182 msr.hi = (tom_k & 0xffc00000) >> 22;
1183 wrmsr(TOP_MEM2, msr);
1186 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
1187 * so I can see my rom chip and other I/O devices.
1189 if (tom_k >= 0x003f0000) {
1190 #if HW_MEM_HOLE_SIZEK != 0
1191 if(hole_startk != 0) {
1192 tom_k = hole_startk;
1197 msr.lo = (tom_k & 0x003fffff) << 10;
1198 msr.hi = (tom_k & 0xffc00000) >> 22;
1199 wrmsr(TOP_MEM, msr);
1202 static unsigned long interleave_chip_selects(const struct mem_controller *ctrl, int is_Width128)
1206 static const uint8_t csbase_low_f0_shift[] = {
1207 /* 128MB */ (14 - (13-5)),
1208 /* 256MB */ (15 - (13-5)),
1209 /* 512MB */ (15 - (13-5)),
1210 /* 512MB */ (16 - (13-5)),
1211 /* 512MB */ (16 - (13-5)),
1212 /* 1GB */ (16 - (13-5)),
1213 /* 1GB */ (16 - (13-5)),
1214 /* 2GB */ (16 - (13-5)),
1215 /* 2GB */ (17 - (13-5)),
1216 /* 4GB */ (17 - (13-5)),
1217 /* 4GB */ (16 - (13-5)),
1218 /* 8GB */ (17 - (13-5)),
1221 /* cs_base_high is not changed */
1223 uint32_t csbase_inc;
1224 int chip_selects, index;
1226 unsigned common_size;
1227 unsigned common_cs_mode;
1228 uint32_t csbase, csmask;
1230 /* See if all of the memory chip selects are the same size
1231 * and if so count them.
1235 common_cs_mode = 0xff;
1236 for(index = 0; index < 8; index++) {
1241 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1243 /* Is it enabled? */
1248 size = (value >> 19) & 0x3ff;
1249 if (common_size == 0) {
1252 /* The size differed fail */
1253 if (common_size != size) {
1257 value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
1258 cs_mode =( value >> ((index>>1)*4)) & 0xf;
1259 if(common_cs_mode == 0xff) {
1260 common_cs_mode = cs_mode;
1262 /* The cs_mode differed fail */
1263 if(common_cs_mode != cs_mode) {
1268 /* Chip selects can only be interleaved when there is
1269 * more than one and their is a power of two of them.
1271 bits = log2(chip_selects);
1272 if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) { //chip_selects max = 8
1276 /* Find the bits of csbase that we need to interleave on */
1277 csbase_inc = 1 << (csbase_low_f0_shift[common_cs_mode]);
1283 /* Compute the initial values for csbase and csbask.
1284 * In csbase just set the enable bit and the base to zero.
1285 * In csmask set the mask bits for the size and page level interleave.
1288 csmask = (((common_size << bits) - 1) << 19);
1289 csmask |= 0x3fe0 & ~((csbase_inc << bits) - csbase_inc);
1290 for(index = 0; index < 8; index++) {
1293 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1294 /* Is it enabled? */
1298 pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
1299 if((index & 1) == 0) { //only have 4 CSMASK
1300 pci_write_config32(ctrl->f2, DRAM_CSMASK + ((index>>1) << 2), csmask);
1302 csbase += csbase_inc;
1305 print_debug("Interleaved\r\n");
1307 /* Return the memory size in K */
1308 return common_size << ((27-10) + bits);
1310 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
1314 /* Remember which registers we have used in the high 8 bits of tom */
1317 /* Find the largest remaining canidate */
1318 unsigned index, canidate;
1319 uint32_t csbase, csmask;
1323 for(index = 0; index < 8; index++) {
1325 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1327 /* Is it enabled? */
1332 /* Is it greater? */
1333 if (value <= csbase) {
1337 /* Has it already been selected */
1338 if (tom & (1 << (index + 24))) {
1341 /* I have a new canidate */
1345 /* See if I have found a new canidate */
1350 /* Remember the dimm size */
1351 size = csbase >> 19;
1353 /* Remember I have used this register */
1354 tom |= (1 << (canidate + 24));
1356 /* Recompute the cs base register value */
1357 csbase = (tom << 19) | 1;
1359 /* Increment the top of memory */
1362 /* Compute the memory mask */
1363 csmask = ((size -1) << 19);
1364 csmask |= 0x3fe0; /* For now don't optimize */
1366 /* Write the new base register */
1367 pci_write_config32(ctrl->f2, DRAM_CSBASE + (canidate << 2), csbase);
1368 /* Write the new mask register */
1369 if((canidate & 1) == 0) { //only have 4 CSMASK
1370 pci_write_config32(ctrl->f2, DRAM_CSMASK + ((canidate>>1) << 2), csmask);
1374 /* Return the memory size in K */
1375 return (tom & ~0xff000000) << (27-10);
1378 unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
1382 /* Find the last memory address used */
1384 for(node_id = 0; node_id < max_node_id; node_id++) {
1385 uint32_t limit, base;
1387 index = node_id << 3;
1388 base = pci_read_config32(ctrl->f1, 0x40 + index);
1389 /* Only look at the limit if the base is enabled */
1390 if ((base & 3) == 3) {
1391 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1392 end_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1398 static void order_dimms(const struct mem_controller *ctrl, struct mem_info *meminfo)
1400 unsigned long tom_k, base_k;
1402 if (read_option(CMOS_VSTART_interleave_chip_selects, CMOS_VLEN_interleave_chip_selects, 1) != 0) {
1403 tom_k = interleave_chip_selects(ctrl, meminfo->is_Width128);
1405 print_debug("Interleaving disabled\r\n");
1409 tom_k = order_chip_selects(ctrl);
1411 /* Compute the memory base address */
1412 base_k = memory_end_k(ctrl, ctrl->node_id);
1414 route_dram_accesses(ctrl, base_k, tom_k);
1415 set_top_mem(tom_k, 0);
1418 static long disable_dimm(const struct mem_controller *ctrl, unsigned index, long dimm_mask, struct mem_info *meminfo)
1420 print_debug("disabling dimm");
1421 print_debug_hex8(index);
1422 print_debug("\r\n");
1423 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), 0);
1424 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), 0);
1425 #if QRANK_DIMM_SUPPORT == 1
1426 if(meminfo->sz[index].rank == 4) {
1427 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), 0);
1428 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), 0);
1432 dimm_mask &= ~(1 << index);
1436 static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl, long dimm_mask, struct mem_info *meminfo)
1439 uint32_t registered;
1442 for(i = 0; (i < DIMM_SOCKETS); i++) {
1444 if (!(dimm_mask & (1 << i))) {
1447 value = spd_read_byte(ctrl->channel0[i], SPD_DIMM_TYPE);
1451 /* Registered dimm ? */
1453 if ((value == SPD_DIMM_TYPE_RDIMM) || (value == SPD_DIMM_TYPE_mRDIMM)) {
1454 //check SPD_MOD_ATTRIB to verify it is SPD_MOD_ATTRIB_REGADC (0x11)?
1455 registered |= (1<<i);
1459 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1460 dcl &= ~DCL_UnBuffDimm;
1461 meminfo->is_registered = 1;
1463 dcl |= DCL_UnBuffDimm;
1464 meminfo->is_registered = 0;
1466 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1469 if (meminfo->is_registered) {
1470 print_debug("Registered\r\n");
1472 print_debug("Unbuffered\r\n");
1478 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
1483 for(i = 0; i < DIMM_SOCKETS; i++) {
1486 device = ctrl->channel0[i];
1488 byte = spd_read_byte(ctrl->channel0[i], SPD_MEM_TYPE); /* Type */
1489 if (byte == SPD_MEM_TYPE_SDRAM_DDR2) {
1490 dimm_mask |= (1 << i);
1493 device = ctrl->channel1[i];
1495 byte = spd_read_byte(ctrl->channel1[i], SPD_MEM_TYPE);
1496 if (byte == SPD_MEM_TYPE_SDRAM_DDR2) {
1497 dimm_mask |= (1 << (i + DIMM_SOCKETS));
1504 static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_mask, struct mem_info *meminfo)
1508 /* SPD addresses to verify are identical */
1509 static const uint8_t addresses[] = {
1510 2, /* Type should be DDR2 SDRAM */
1511 3, /* *Row addresses */
1512 4, /* *Column addresses */
1513 5, /* *Number of DIMM Ranks */
1514 6, /* *Module Data Width*/
1515 9, /* *Cycle time at highest CAS Latency CL=X */
1516 11, /* *DIMM Conf Type */
1517 13, /* *Pri SDRAM Width */
1518 17, /* *Logical Banks */
1519 18, /* *Supported CAS Latencies */
1520 20, /* *DIMM Type Info */
1521 21, /* *SDRAM Module Attributes */
1522 23, /* *Cycle time at CAS Latnecy (CLX - 1) */
1523 26, /* *Cycle time at CAS Latnecy (CLX - 2) */
1524 27, /* *tRP Row precharge time */
1525 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1526 29, /* *tRCD RAS to CAS */
1527 30, /* *tRAS Activate to Precharge */
1528 36, /* *Write recovery time (tWR) */
1529 37, /* *Internal write to read command delay (tRDP) */
1530 38, /* *Internal read to precharge commanfd delay (tRTP) */
1531 41, /* *Extension of Byte 41 tRC and Byte 42 tRFC */
1532 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1533 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1535 /* If the dimms are not in pairs do not do dual channels */
1536 if ((dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1537 ((dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1538 goto single_channel;
1540 /* If the cpu is not capable of doing dual channels don't do dual channels */
1541 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1542 if (!(nbcap & NBCAP_128Bit)) {
1543 goto single_channel;
1545 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1546 unsigned device0, device1;
1549 /* If I don't have a dimm skip this one */
1550 if (!(dimm_mask & (1 << i))) {
1553 device0 = ctrl->channel0[i];
1554 device1 = ctrl->channel1[i];
1555 for(j = 0; j < sizeof(addresses)/sizeof(addresses[0]); j++) {
1557 addr = addresses[j];
1558 value0 = spd_read_byte(device0, addr);
1562 value1 = spd_read_byte(device1, addr);
1566 if (value0 != value1) {
1567 goto single_channel;
1571 print_spew("Enabling dual channel memory\r\n");
1573 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1574 dcl &= ~DCL_BurstLength32; /* 32byte mode may be preferred in platforms that include graphics controllers that generate a lot of 32-bytes system memory accesses
1575 32byte mode is not supported when the DRAM interface is 128 bits wides, even 32byte mode is set, system still use 64 byte mode */
1576 dcl |= DCL_Width128;
1577 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1578 meminfo->is_Width128 = 1;
1581 dimm_mask &= ~((1 << (DIMM_SOCKETS *2)) - (1 << DIMM_SOCKETS));
1582 meminfo->is_Width128 = 0;
1587 uint16_t cycle_time;
1588 uint8_t divisor; /* In 1/40 ns increments */
1593 uint8_t DcqByPassMax;
1594 uint32_t dch_memclk;
1598 static const struct mem_param speed[] = {
1600 .name = "200Mhz\r\n",
1601 .cycle_time = 0x500,
1602 .divisor = 200, // how many 1/40ns per clock
1603 .dch_memclk = DCH_MemClkFreq_200MHz, //0
1612 .name = "266Mhz\r\n",
1613 .cycle_time = 0x375,
1614 .divisor = 150, //????
1615 .dch_memclk = DCH_MemClkFreq_266MHz, //1
1623 .name = "333Mhz\r\n",
1624 .cycle_time = 0x300,
1626 .dch_memclk = DCH_MemClkFreq_333MHz, //2
1635 .name = "400Mhz\r\n",
1636 .cycle_time = 0x250,
1638 .dch_memclk = DCH_MemClkFreq_400MHz,//3
1646 .cycle_time = 0x000,
1650 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1653 const struct mem_param *param;
1654 for(param = &speed[0]; param->cycle_time ; param++) {
1655 if (min_cycle_time > (param+1)->cycle_time) {
1659 if (!param->cycle_time) {
1660 die("min_cycle_time to low");
1662 print_spew(param->name);
1663 #ifdef DRAM_MIN_CYCLE_TIME
1664 print_debug(param->name);
1669 static uint8_t get_exact_divisor(int i, uint8_t divisor)
1671 //input divisor could be 200(200), 150(266), 120(333), 100 (400)
1672 static const uint8_t dv_a[] = {
1673 /* 200 266 333 400 */
1674 /*4 */ 250, 250, 250, 250,
1675 /*5 */ 200, 200, 200, 100,
1676 /*6 */ 200, 166, 166, 100,
1677 /*7 */ 200, 171, 142, 100,
1679 /*8 */ 200, 150, 125, 100,
1680 /*9 */ 200, 156, 133, 100,
1681 /*10*/ 200, 160, 120, 100,
1682 /*11*/ 200, 163, 127, 100,
1684 /*12*/ 200, 150, 133, 100,
1685 /*13*/ 200, 153, 123, 100,
1686 /*14*/ 200, 157, 128, 100,
1687 /*15*/ 200, 160, 120, 100,
1694 msr = rdmsr(0xc0010042);
1695 fid_cur = msr.lo & 0x3f;
1699 if(index>12) return divisor;
1701 if(i>3) return divisor;
1703 return dv_a[index * 4+i];
1707 struct spd_set_memclk_result {
1708 const struct mem_param *param;
1712 static unsigned convert_to_linear(unsigned value)
1714 static const unsigned fraction[] = { 0x25, 0x33, 0x66, 0x75 };
1717 /* We need to convert value to more readable */
1718 if((value & 0xf) < 10) { //no .25, .33, .66, .75
1721 valuex = ((value & 0xf0) << 4) | fraction [(value & 0xf)-10];
1727 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask, struct mem_info *meminfo)
1729 /* Compute the minimum cycle time for these dimms */
1730 struct spd_set_memclk_result result;
1731 unsigned min_cycle_time, min_latency, bios_cycle_time;
1735 static const uint8_t latency_indicies[] = { 25, 23, 9 };
1737 static const uint16_t min_cycle_times[] = { // use full speed to compare
1738 [NBCAP_MEMCLK_NOLIMIT] = 0x250, /*2.5ns */
1739 [NBCAP_MEMCLK_333MHZ] = 0x300, /* 3.0ns */
1740 [NBCAP_MEMCLK_266MHZ] = 0x375, /* 3.75ns */
1741 [NBCAP_MEMCLK_200MHZ] = 0x500, /* 5.0s */
1745 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1746 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1747 bios_cycle_time = min_cycle_times[
1748 read_option(CMOS_VSTART_max_mem_clock, CMOS_VLEN_max_mem_clock, 0)];
1749 if (bios_cycle_time > min_cycle_time) {
1750 min_cycle_time = bios_cycle_time;
1754 print_tx("1 min_cycle_time:", min_cycle_time);
1756 /* Compute the least latency with the fastest clock supported
1757 * by both the memory controller and the dimms.
1759 for(i = 0; i < DIMM_SOCKETS; i++) {
1760 int new_cycle_time, new_latency;
1765 if (!(dimm_mask & (1 << i))) {
1769 /* First find the supported CAS latencies
1770 * Byte 18 for DDR SDRAM is interpreted:
1771 * bit 3 == CAS Latency = 3
1772 * bit 4 == CAS Latency = 4
1773 * bit 5 == CAS Latency = 5
1774 * bit 6 == CAS Latency = 6
1776 new_cycle_time = 0x500;
1779 latencies = spd_read_byte(ctrl->channel0[i], SPD_CAS_LAT);
1780 if (latencies <= 0) continue;
1783 print_tx("\tlatencies:", latencies);
1784 /* Compute the lowest cas latency supported */
1785 latency = log2(latencies) - 2;
1787 /* Loop through and find a fast clock with a low latency */
1788 for(index = 0; index < 3; index++, latency++) {
1790 if ((latency < 3) || (latency > 6) ||
1791 (!(latencies & (1 << latency)))) {
1794 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1798 print_tx("\tindex:", index);
1799 print_tx("\t\tlatency:", latency);
1800 print_tx("\t\tvalue1:", value);
1802 value = convert_to_linear(value);
1804 print_tx("\t\tvalue2:", value);
1806 /* Only increase the latency if we decreas the clock */
1807 if (value >= min_cycle_time ) {
1808 if(value < new_cycle_time) {
1809 new_cycle_time = value;
1810 new_latency = latency;
1811 } else if (value == new_cycle_time) {
1812 if(new_latency > latency) {
1813 new_latency = latency;
1817 print_tx("\t\tnew_cycle_time:", new_cycle_time);
1818 print_tx("\t\tnew_latency:", new_latency);
1821 if (new_latency > 6){
1824 /* Does min_latency need to be increased? */
1825 if (new_cycle_time > min_cycle_time) {
1826 min_cycle_time = new_cycle_time;
1828 /* Does min_cycle_time need to be increased? */
1829 if (new_latency > min_latency) {
1830 min_latency = new_latency;
1833 print_tx("2 min_cycle_time:", min_cycle_time);
1834 print_tx("2 min_latency:", min_latency);
1836 /* Make a second pass through the dimms and disable
1837 * any that cannot support the selected memclk and cas latency.
1840 print_tx("3 min_cycle_time:", min_cycle_time);
1841 print_tx("3 min_latency:", min_latency);
1843 for(i = 0; (i < DIMM_SOCKETS) && (ctrl->channel0[i]); i++) {
1848 if (!(dimm_mask & (1 << i))) {
1851 latencies = spd_read_byte(ctrl->channel0[i], SPD_CAS_LAT);
1852 if (latencies < 0) goto hw_error;
1853 if (latencies == 0) {
1858 /* Compute the lowest cas latency supported */
1859 latency = log2(latencies) -2;
1861 /* Walk through searching for the selected latency */
1862 for(index = 0; index < 3; index++, latency++) {
1863 if (!(latencies & (1 << latency))) {
1866 if (latency == min_latency)
1869 /* If I can't find the latency or my index is bad error */
1870 if ((latency != min_latency) || (index >= 3)) {
1874 /* Read the min_cycle_time for this latency */
1875 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1876 if (value < 0) goto hw_error;
1878 value = convert_to_linear(value);
1879 /* All is good if the selected clock speed
1880 * is what I need or slower.
1882 if (value <= min_cycle_time) {
1885 /* Otherwise I have an error, disable the dimm */
1887 dimm_mask = disable_dimm(ctrl, i, dimm_mask, meminfo);
1890 print_tx("4 min_cycle_time:", min_cycle_time);
1892 /* Now that I know the minimum cycle time lookup the memory parameters */
1893 result.param = get_mem_param(min_cycle_time);
1895 /* Update DRAM Config High with our selected memory speed */
1896 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1897 value &= ~(DCH_MemClkFreq_MASK << DCH_MemClkFreq_SHIFT);
1899 value |= result.param->dch_memclk << DCH_MemClkFreq_SHIFT;
1900 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1902 print_debug(result.param->name);
1904 /* Update DRAM Timing Low with our selected cas latency */
1905 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1906 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1907 value |= (min_latency - DTL_TCL_BASE) << DTL_TCL_SHIFT;
1908 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1910 result.dimm_mask = dimm_mask;
1913 result.param = (const struct mem_param *)0;
1914 result.dimm_mask = -1;
1918 static unsigned convert_to_1_4(unsigned value)
1920 static const uint8_t fraction[] = { 0, 1, 2, 2, 3, 3, 0 };
1923 /* We need to convert value to more readable */
1924 valuex = fraction [value & 0x7];
1927 static int update_dimm_Trc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1929 unsigned clocks, old_clocks;
1933 value = spd_read_byte(ctrl->channel0[i], SPD_TRC);
1934 if (value < 0) return -1;
1936 value2 = spd_read_byte(ctrl->channel0[i], SPD_TRC -1);
1938 value += convert_to_1_4(value2>>4);
1942 clocks = (value + param->divisor - 1)/param->divisor;
1944 if (clocks < DTL_TRC_MIN) {
1945 clocks = DTL_TRC_MIN;
1947 if (clocks > DTL_TRC_MAX) {
1951 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1952 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
1953 if (old_clocks >= clocks) { //?? someone did it
1954 // clocks = old_clocks;
1957 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
1958 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
1959 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1963 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i, struct mem_info *meminfo)
1965 unsigned clocks, old_clocks;
1972 value = spd_read_byte(ctrl->channel0[i], SPD_TRFC);
1973 if (value < 0) return -1;
1975 value2 = spd_read_byte(ctrl->channel0[i], SPD_TRC -1);
1976 if(value2 & 1) value += 256;
1978 value += convert_to_1_4(value2>>1);
1981 value = param->tRFC;
1984 clocks = (value + param->divisor - 1)/param->divisor;
1986 //get the cs_size --> logic dimm size
1987 value = spd_read_byte(ctrl->channel0[i], SPD_PRI_WIDTH);
1992 value = 6 - log2(value); //4-->4, 8-->3, 16-->2
1994 clocks = meminfo->sz[i].per_rank - 27 + 2 - value;
1996 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1998 old_clocks = ((dth >> (DTH_TRFC0_SHIFT+i*3)) & DTH_TRFC_MASK);
1999 if (old_clocks >= clocks) { // some one did it?
2000 // clocks = old_clocks;
2003 dth &= ~(DTH_TRFC_MASK << (DTH_TRFC0_SHIFT+i*3));
2004 dth |= clocks << (DTH_TRFC0_SHIFT+i*3);
2005 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
2009 static int update_dimm_TT_1_4(const struct mem_controller *ctrl, const struct mem_param *param, int i,
2011 unsigned SPD_TT, unsigned TT_SHIFT, unsigned TT_MASK, unsigned TT_BASE, unsigned TT_MIN, unsigned TT_MAX )
2013 unsigned clocks, old_clocks;
2016 value = spd_read_byte(ctrl->channel0[i], SPD_TT); //already in 1/4 ns
2017 if (value < 0) return -1;
2019 clocks = (value + param->divisor -1)/param->divisor;
2020 if (clocks < TT_MIN) {
2023 if (clocks > TT_MAX) {
2026 dtl = pci_read_config32(ctrl->f2, TT_REG);
2028 old_clocks = ((dtl >> TT_SHIFT) & TT_MASK) + TT_BASE;
2029 if (old_clocks >= clocks) { //some one did it?
2030 // clocks = old_clocks;
2033 dtl &= ~(TT_MASK << TT_SHIFT);
2034 dtl |= ((clocks - TT_BASE) << TT_SHIFT);
2035 pci_write_config32(ctrl->f2, TT_REG, dtl);
2039 static int update_dimm_Trcd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
2041 return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_LOW, SPD_TRCD, DTL_TRCD_SHIFT, DTL_TRCD_MASK, DTL_TRCD_BASE, DTL_TRCD_MIN, DTL_TRCD_MAX);
2044 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
2046 return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_LOW, SPD_TRRD, DTL_TRRD_SHIFT, DTL_TRRD_MASK, DTL_TRRD_BASE, DTL_TRRD_MIN, DTL_TRRD_MAX);
2049 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i)
2051 unsigned clocks, old_clocks;
2054 value = spd_read_byte(ctrl->channel0[i], SPD_TRAS); //in 1 ns
2055 if (value < 0) return -1;
2056 print_tx("update_dimm_Tras: 0 value=", value);
2058 value<<=2; //convert it to in 1/4ns
2061 print_tx("update_dimm_Tras: 1 value=", value);
2063 clocks = (value + param->divisor - 1)/param->divisor;
2064 print_tx("update_dimm_Tras: divisor=", param->divisor);
2065 print_tx("update_dimm_Tras: clocks=", clocks);
2066 if (clocks < DTL_TRAS_MIN) {
2067 clocks = DTL_TRAS_MIN;
2069 if (clocks > DTL_TRAS_MAX) {
2072 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
2073 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
2074 if (old_clocks >= clocks) { // someone did it?
2075 // clocks = old_clocks;
2078 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
2079 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
2080 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
2084 static int update_dimm_Trp(const struct mem_controller *ctrl, const struct mem_param *param, int i)
2086 return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_LOW, SPD_TRP, DTL_TRP_SHIFT, DTL_TRP_MASK, DTL_TRP_BASE, DTL_TRP_MIN, DTL_TRP_MAX);
2089 static int update_dimm_Trtp(const struct mem_controller *ctrl, const struct mem_param *param, int i, struct mem_info *meminfo)
2091 //need to figure if it is 32 byte burst or 64 bytes burst
2093 if(!meminfo->is_Width128) {
2095 dword = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2096 if((dword & DCL_BurstLength32)) offset = 0;
2098 return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_LOW, SPD_TRTP, DTL_TRTP_SHIFT, DTL_TRTP_MASK, DTL_TRTP_BASE+offset, DTL_TRTP_MIN+offset, DTL_TRTP_MAX+offset);
2102 static int update_dimm_Twr(const struct mem_controller *ctrl, const struct mem_param *param, int i)
2104 return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_LOW, SPD_TWR, DTL_TWR_SHIFT, DTL_TWR_MASK, DTL_TWR_BASE, DTL_TWR_MIN, DTL_TWR_MAX);
2108 static int update_dimm_Tref(const struct mem_controller *ctrl, const struct mem_param *param, int i)
2110 uint32_t dth, dth_old;
2112 value = spd_read_byte(ctrl->channel0[i], SPD_TREF); // 0: 15.625us, 1: 3.9us 2: 7.8 us....
2113 if (value < 0) return -1;
2121 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
2124 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
2125 dth |= (value << DTH_TREF_SHIFT);
2126 if(dth_old != dth) {
2127 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
2132 static void set_4RankRDimm(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
2134 #if QRANK_DIMM_SUPPRT == 1
2139 if(!(meminfo->is_registered)) return;
2143 for(i = 0; i < DIMM_SOCKETS; i++) {
2144 if (!(dimm_mask & (1 << i))) {
2148 if(meminfo->sz.rank == 4) {
2156 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2157 dch |= DCH_FourRankRDimm;
2158 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2164 static uint32_t get_extra_dimm_mask(const struct mem_controller *ctrl, struct mem_info *meminfo)
2170 uint32_t mask_single_rank;
2171 uint32_t mask_page_1k;
2173 #if QRANK_DIMM_SUPPORT == 1
2177 long dimm_mask = meminfo->dimm_mask;
2182 mask_single_rank = 0;
2185 for(i = 0; i < DIMM_SOCKETS; i++) {
2187 if (!(dimm_mask & (1 << i))) {
2191 if(meminfo->sz[i].rank == 1) {
2192 mask_single_rank |= 1<<i;
2195 if(meminfo->sz[i].col==10) {
2196 mask_page_1k |= 1<<i;
2200 value = spd_read_byte(ctrl->channel0[i], SPD_PRI_WIDTH);
2202 #if QRANK_DIMM_SUPPORT == 1
2203 rank = meminfo->sz[i].rank;
2208 #if QRANK_DIMM_SUPPORT == 1
2210 mask_x4 |= 1<<(i+2);
2213 } else if(value==16) {
2215 #if QRANK_DIMM_SUPPORT == 1
2217 mask_x16 |= 1<<(i+2);
2224 meminfo->x4_mask= mask_x4;
2225 meminfo->x16_mask = mask_x16;
2227 meminfo->single_rank_mask = mask_single_rank;
2228 meminfo->page_1k_mask = mask_page_1k;
2235 static void set_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
2238 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2239 dcl &= ~(DCL_X4Dimm_MASK<<DCL_X4Dimm_SHIFT);
2240 dcl |= ((meminfo->x4_mask) & 0xf) << (DCL_X4Dimm_SHIFT);
2241 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2244 static int count_ones(uint32_t dimm_mask)
2249 for(index = 0; index < DIMM_SOCKETS; index++, dimm_mask>>=1) {
2250 if (dimm_mask & 1) {
2258 static void set_DramTerm(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
2264 if(param->divisor == 100) { //DDR2 800
2265 if(meminfo->is_Width128) {
2266 if(count_ones(meminfo->dimm_mask & 0x0f)==2) {
2272 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2273 dcl &= ~(DCL_DramTerm_MASK<<DCL_DramTerm_SHIFT);
2274 dcl |= (odt & DCL_DramTerm_MASK) << (DCL_DramTerm_SHIFT);
2275 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2279 static void set_ecc(const struct mem_controller *ctrl,const struct mem_param *param, long dimm_mask, struct mem_info *meminfo)
2284 uint32_t dcl, nbcap;
2285 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
2286 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2287 dcl &= ~DCL_DimmEccEn;
2288 if (nbcap & NBCAP_ECC) {
2289 dcl |= DCL_DimmEccEn;
2291 if (read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
2292 dcl &= ~DCL_DimmEccEn;
2294 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2296 meminfo->is_ecc = 1;
2297 if(!(dcl & DCL_DimmEccEn)) {
2298 meminfo->is_ecc = 0;
2299 return; // already disabled the ECC, so don't need to read SPD any more
2302 for(i = 0; i < DIMM_SOCKETS; i++) {
2304 if (!(dimm_mask & (1 << i))) {
2308 value = spd_read_byte(ctrl->channel0[i], SPD_DIMM_CONF_TYPE);
2310 if(!(value & SPD_DIMM_CONF_TYPE_ECC)) {
2311 dcl &= ~DCL_DimmEccEn;
2312 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2313 meminfo->is_ecc = 0;
2320 static int update_dimm_Twtr(const struct mem_controller *ctrl, const struct mem_param *param, int i)
2323 return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_HIGH, SPD_TWTR, DTH_TWTR_SHIFT, DTH_TWTR_MASK, DTH_TWTR_BASE, DTH_TWTR_MIN, DTH_TWTR_MAX);
2327 static void set_TT(const struct mem_controller *ctrl, const struct mem_param *param, unsigned TT_REG,
2328 unsigned TT_SHIFT, unsigned TT_MASK, unsigned TT_BASE, unsigned TT_MIN, unsigned TT_MAX, unsigned val, const char *str)
2332 if ((val < TT_MIN) || (val > TT_MAX)) {
2334 die(" Unknown\r\n");
2337 reg = pci_read_config32(ctrl->f2, TT_REG);
2338 reg &= ~(TT_MASK << TT_SHIFT);
2339 reg |= ((val - TT_BASE) << TT_SHIFT);
2340 pci_write_config32(ctrl->f2, TT_REG, reg);
2344 static void set_TrwtTO(const struct mem_controller *ctrl, const struct mem_param *param)
2346 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TRWTTO_SHIFT, DTH_TRWTTO_MASK,DTH_TRWTTO_BASE, DTH_TRWTTO_MIN, DTH_TRWTTO_MAX, param->TrwtTO, "TrwtTO");
2349 static void set_Twrrd(const struct mem_controller *ctrl, const struct mem_param *param)
2351 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TWRRD_SHIFT, DTH_TWRRD_MASK,DTH_TWRRD_BASE, DTH_TWRRD_MIN, DTH_TWRRD_MAX, param->Twrrd, "Twrrd");
2354 static void set_Twrwr(const struct mem_controller *ctrl, const struct mem_param *param)
2356 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TWRWR_SHIFT, DTH_TWRWR_MASK,DTH_TWRWR_BASE, DTH_TWRWR_MIN, DTH_TWRWR_MAX, param->Twrwr, "Twrwr");
2359 static void set_Trdrd(const struct mem_controller *ctrl, const struct mem_param *param)
2361 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TRDRD_SHIFT, DTH_TRDRD_MASK,DTH_TRDRD_BASE, DTH_TRDRD_MIN, DTH_TRDRD_MAX, param->Trdrd, "Trdrd");
2364 static void set_DcqBypassMax(const struct mem_controller *ctrl, const struct mem_param *param)
2366 set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_DcqBypassMax_SHIFT, DCH_DcqBypassMax_MASK,DCH_DcqBypassMax_BASE, DCH_DcqBypassMax_MIN, DCH_DcqBypassMax_MAX, param->DcqByPassMax, "DcqBypassMax"); // value need to be in CMOS
2369 static void set_Tfaw(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
2371 static const uint8_t faw_1k[] = {8, 10, 13, 14};
2372 static const uint8_t faw_2k[] = {10, 14, 17, 18};
2373 unsigned memclkfreq_index;
2377 memclkfreq_index = param->dch_memclk;
2379 if(meminfo->page_1k_mask != 0) { //1k page
2380 faw = faw_1k[memclkfreq_index];
2383 faw = faw_2k[memclkfreq_index];
2386 set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_FourActWindow_SHIFT, DCH_FourActWindow_MASK, DCH_FourActWindow_BASE, DCH_FourActWindow_MIN, DCH_FourActWindow_MAX, faw, "FourActWindow");
2391 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
2397 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2398 dch &= ~(DCH_MaxAsyncLat_MASK << DCH_MaxAsyncLat_SHIFT);
2403 dch |= ((async_lat - DCH_MaxAsyncLat_BASE) << DCH_MaxAsyncLat_SHIFT);
2404 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2407 static void set_SlowAccessMode(const struct mem_controller *ctrl)
2411 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2415 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2420 DRAM_OUTPUT_DRV_COMP_CTRL 0, 0x20
2421 DRAM_ADDR_TIMING_CTRL 04, 0x24
2423 static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info *meminfo)
2427 unsigned SlowAccessMode = 0;
2429 long dimm_mask = meminfo->dimm_mask & 0x0f;
2431 #if DIMM_SUPPORT==0x0104 /* DDR2 and REG */
2434 dwordx = 0x002f0000;
2435 switch (meminfo->memclk_set) {
2436 case DCH_MemClkFreq_266MHz:
2437 if( (dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) {
2438 dwordx = 0x002f2700;
2441 case DCH_MemClkFreq_333MHz:
2442 if( (dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) {
2443 if ((meminfo->single_rank_mask & 0x03)!=0x03) { //any double rank there?
2444 dwordx = 0x002f2f00;
2448 case DCH_MemClkFreq_400MHz:
2449 dwordx = 0x002f3300;
2455 #if DIMM_SUPPORT==0x0004 /* DDR2 and unbuffered */
2456 /* for UNBUF DIMM */
2458 dwordx = 0x002f2f00;
2459 switch (meminfo->memclk_set) {
2460 case DCH_MemClkFreq_200MHz:
2461 if(dimm_mask == 0x03) {
2466 case DCH_MemClkFreq_266MHz:
2467 if(dimm_mask == 0x03) {
2470 if((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0)) {
2471 switch (meminfo->single_rank_mask) {
2473 dwordx = 0x00002f00; //x8 single Rank
2476 dwordx = 0x00342f00; //x8 double Rank
2479 dwordx = 0x00372f00; //x8 single Rank and double Rank mixed
2481 } else if((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x01) && (meminfo->single_rank_mask == 0x01)) {
2482 dwordx = 0x00382f00; //x8 Double Rank and x16 single Rank mixed
2483 } else if((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x02) && (meminfo->single_rank_mask == 0x02)) {
2484 dwordx = 0x00382f00; //x16 single Rank and x8 double Rank mixed
2489 if((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x00) && ((meminfo->single_rank_mask == 0x01)||(meminfo->single_rank_mask == 0x02))) { //x8 single rank
2490 dwordx = 0x002f2f00;
2492 dwordx = 0x002b2f00;
2496 case DCH_MemClkFreq_333MHz:
2497 dwordx = 0x00202220;
2498 if(dimm_mask == 0x03) {
2501 if((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0)) {
2502 switch (meminfo->single_rank_mask) {
2504 dwordx = 0x00302220; //x8 single Rank
2507 dwordx = 0x002b2220; //x8 double Rank
2510 dwordx = 0x002a2220; //x8 single Rank and double Rank mixed
2512 } else if((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0x01) && (meminfo->single_rank_mask == 0x01)) {
2513 dwordx = 0x002c2220; //x8 Double Rank and x16 single Rank mixed
2514 } else if((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0x02) && (meminfo->single_rank_mask == 0x02)) {
2515 dwordx = 0x002c2220; //x16 single Rank and x8 double Rank mixed
2519 case DCH_MemClkFreq_400MHz:
2520 dwordx = 0x00202520;
2522 if(dimm_mask == 0x03) {
2530 print_raminit("\tdimm_mask = ", meminfo->dimm_mask);
2531 print_raminit("\tx4_mask = ", meminfo->x4_mask);
2532 print_raminit("\tx16_mask = ", meminfo->x16_mask);
2533 print_raminit("\tsingle_rank_mask = ", meminfo->single_rank_mask);
2534 print_raminit("\tODC = ", dword);
2535 print_raminit("\tAddr Timing= ", dwordx);
2538 #if (DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
2539 if(SlowAccessMode) {
2540 set_SlowAccessMode(ctrl);
2544 /* Program the Output Driver Compensation Control Registers (Function 2:Offset 0x9c, index 0, 0x20) */
2545 pci_write_config32_index_wait(ctrl->f2, 0x98, 0, dword);
2546 if(meminfo->is_Width128) {
2547 pci_write_config32_index_wait(ctrl->f2, 0x98, 0x20, dword);
2550 /* Program the Address Timing Control Registers (Function 2:Offset 0x9c, index 4, 0x24) */
2551 pci_write_config32_index_wait(ctrl->f2, 0x98, 4, dwordx);
2552 if(meminfo->is_Width128) {
2553 pci_write_config32_index_wait(ctrl->f2, 0x98, 0x24, dwordx);
2559 static void set_RDqsEn(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
2561 #if CPU_SOCKET_TYPE==0x10
2562 //only need to set for reg and x8
2565 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2568 if((!meminfo->x4_mask) && (!meminfo->x16_mask)) {
2572 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2577 static void set_idle_cycle_limit(const struct mem_controller *ctrl, const struct mem_param *param)
2580 /* AMD says to Hardcode this */
2581 dcm = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
2582 dcm &= ~(DCM_ILD_lmt_MASK << DCM_ILD_lmt_SHIFT);
2583 dcm |= DCM_ILD_lmt_16 << DCM_ILD_lmt_SHIFT;
2585 pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dcm);
2588 static void set_RdWrQByp(const struct mem_controller *ctrl, const struct mem_param *param)
2590 set_TT(ctrl, param, DRAM_CTRL_MISC, DCM_RdWrQByp_SHIFT, DCM_RdWrQByp_MASK,0, 0, 3, 2, "RdWrQByp");
2595 static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param, long dimm_mask, struct mem_info *meminfo)
2599 for(i = 0; i < DIMM_SOCKETS; i++) {
2601 if (!(dimm_mask & (1 << i))) {
2604 print_tx("dimm socket: ", i);
2605 /* DRAM Timing Low Register */
2606 print_t("\ttrc\r\n");
2607 if ((rc = update_dimm_Trc (ctrl, param, i)) <= 0) goto dimm_err;
2609 print_t("\ttrcd\r\n");
2610 if ((rc = update_dimm_Trcd(ctrl, param, i)) <= 0) goto dimm_err;
2612 print_t("\ttrrd\r\n");
2613 if ((rc = update_dimm_Trrd(ctrl, param, i)) <= 0) goto dimm_err;
2615 print_t("\ttras\r\n");
2616 if ((rc = update_dimm_Tras(ctrl, param, i)) <= 0) goto dimm_err;
2618 print_t("\ttrp\r\n");
2619 if ((rc = update_dimm_Trp (ctrl, param, i)) <= 0) goto dimm_err;
2621 print_t("\ttrtp\r\n");
2622 if ((rc = update_dimm_Trtp(ctrl, param, i, meminfo)) <= 0) goto dimm_err;
2624 print_t("\ttwr\r\n");
2625 if ((rc = update_dimm_Twr (ctrl, param, i)) <= 0) goto dimm_err;
2627 /* DRAM Timing High Register */
2628 print_t("\ttref\r\n");
2629 if ((rc = update_dimm_Tref(ctrl, param, i)) <= 0) goto dimm_err;
2631 print_t("\ttwtr\r\n");
2632 if ((rc = update_dimm_Twtr(ctrl, param, i)) <= 0) goto dimm_err;
2634 print_t("\ttrfc\r\n");
2635 if ((rc = update_dimm_Trfc(ctrl, param, i, meminfo)) <= 0) goto dimm_err;
2637 /* DRAM Config Low */
2644 dimm_mask = disable_dimm(ctrl, i, dimm_mask, meminfo);
2647 meminfo->dimm_mask = dimm_mask; // store final dimm_mask
2649 get_extra_dimm_mask(ctrl, meminfo); // will be used by RDqsEn and dimm_x4
2650 /* DRAM Timing Low Register */
2652 /* DRAM Timing High Register */
2653 set_TrwtTO(ctrl, param);
2654 set_Twrrd (ctrl, param);
2655 set_Twrwr (ctrl, param);
2656 set_Trdrd (ctrl, param);
2658 set_4RankRDimm(ctrl, param, meminfo);
2660 /* DRAM Config High */
2661 set_Tfaw(ctrl, param, meminfo);
2662 set_DcqBypassMax(ctrl, param);
2663 set_max_async_latency(ctrl, param);
2664 set_RDqsEn(ctrl, param, meminfo);
2666 /* DRAM Config Low */
2667 set_ecc(ctrl, param, dimm_mask, meminfo);
2668 set_dimm_x4(ctrl, param, meminfo);
2669 set_DramTerm(ctrl, param, meminfo);
2671 /* DRAM Control Misc */
2672 set_idle_cycle_limit(ctrl, param);
2673 set_RdWrQByp(ctrl, param);
2678 static void sdram_set_spd_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
2680 struct spd_set_memclk_result result;
2681 const struct mem_param *param;
2682 struct mem_param paramx;
2683 struct mem_info *meminfo;
2686 if (!sysinfo->ctrl_present[ctrl->node_id]) {
2687 // print_debug("No memory controller present\r\n");
2691 meminfo = &sysinfo->meminfo[ctrl->node_id];
2693 print_debug_addr("sdram_set_spd_registers: paramx :", ¶mx);
2695 activate_spd_rom(ctrl);
2696 dimm_mask = spd_detect_dimms(ctrl);
2697 if (!(dimm_mask & ((1 << DIMM_SOCKETS) - 1))) {
2698 print_debug("No memory for this cpu\r\n");
2701 dimm_mask = spd_enable_2channels(ctrl, dimm_mask, meminfo);
2704 dimm_mask = spd_set_ram_size(ctrl , dimm_mask, meminfo);
2707 dimm_mask = spd_handle_unbuffered_dimms(ctrl, dimm_mask, meminfo);
2710 result = spd_set_memclk(ctrl, dimm_mask, meminfo);
2711 param = result.param;
2712 dimm_mask = result.dimm_mask;
2716 //store memclk set to sysinfo, incase we need rebuilt param again
2717 meminfo->memclk_set = param->dch_memclk;
2719 memcpy(¶mx, param, sizeof(paramx));
2721 paramx.divisor = get_exact_divisor(param->dch_memclk, paramx.divisor);
2723 dimm_mask = spd_set_dram_timing(ctrl, ¶mx , dimm_mask, meminfo); // dimm_mask will be stored to meminfo->dimm_mask
2727 order_dimms(ctrl, meminfo);
2731 /* Unrecoverable error reading SPD data */
2732 print_err("SPD error - reset\r\n");
2737 #define TIMEOUT_LOOPS 300000
2739 #include "raminit_f_dqs.c"
2741 #if HW_MEM_HOLE_SIZEK != 0
2742 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
2745 uint32_t carry_over;
2747 uint32_t base, limit;
2752 carry_over = (4*1024*1024) - hole_startk;
2754 for(ii=controllers - 1;ii>i;ii--) {
2755 base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
2756 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2759 limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
2760 limit += (carry_over << 2 );
2761 base += (carry_over << 2 );
2762 for(j = 0; j < controllers; j++) {
2763 pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit);
2764 pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base );
2767 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2768 limit += (carry_over << 2);
2769 for(j = 0; j < controllers; j++) {
2770 pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit);
2773 base = pci_read_config32(dev, 0x40 + (i << 3));
2774 basek = (base & 0xffff0000) >> 2;
2775 if(basek == hole_startk) {
2776 //don't need set memhole here, because hole off set will be 0, overflow
2777 //so need to change base reg instead, new basek will be 4*1024*1024
2779 base |= (4*1024*1024)<<2;
2780 for(j = 0; j < controllers; j++) {
2781 pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
2786 hoist = /* hole start address */
2787 ((hole_startk << 10) & 0xff000000) +
2788 /* hole address to memory controller address */
2789 (((basek + carry_over) >> 6) & 0x0000ff00) +
2792 pci_write_config32(dev, 0xf0, hoist);
2798 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
2801 uint32_t hole_startk;
2804 hole_startk = 4*1024*1024 - HW_MEM_HOLE_SIZEK;
2806 #if HW_MEM_HOLE_SIZE_AUTO_INC == 1
2807 //We need to double check if the hole_startk is valid, if it is equal to basek, we need to decrease it some
2809 for(i=0; i<controllers; i++) {
2812 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2813 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2816 base_k = (base & 0xffff0000) >> 2;
2817 if(base_k == hole_startk) {
2818 hole_startk -= (base_k - basek_pri)>>1; // decrease mem hole startk to make sure it is on middle of previous node
2819 break; //only one hole
2824 //find node index that need do set hole
2825 for(i=0; i<controllers; i++) {
2826 uint32_t base, limit;
2827 unsigned base_k, limit_k;
2828 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2829 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2832 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2833 base_k = (base & 0xffff0000) >> 2;
2834 limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
2835 if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
2837 hoist_memory(controllers, ctrl, hole_startk, i);
2838 end_k = memory_end_k(ctrl, controllers);
2839 set_top_mem(end_k, hole_startk);
2840 break; //only one hole
2848 static void sdram_enable(int controllers, const struct mem_controller *ctrl, struct sys_info *sysinfo)
2853 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
2854 unsigned cpu_f0_f1[8];
2857 print_debug_addr("sdram_enable: tsc0[8]: ", &tsc0[0]);
2861 /* Error if I don't have memory */
2862 if (memory_end_k(ctrl, controllers) == 0) {
2863 die("No memory\r\n");
2866 /* Before enabling memory start the memory clocks */
2867 for(i = 0; i < controllers; i++) {
2869 if (!sysinfo->ctrl_present[ i ])
2871 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2873 // if no memory installed, disabled the interface
2874 if(sysinfo->meminfo[i].dimm_mask==0x00){
2875 dch |= DCH_DisDramInterface;
2876 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2880 dch |= DCH_MemClkFreqVal;
2881 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2882 /* address timing and Output driver comp Control */
2883 set_misc_timing(ctrl+i, sysinfo->meminfo+i );
2887 /* We need to wait a mimmium of 20 MEMCLKS to enable the InitDram */
2888 memreset(controllers, ctrl);
2890 for(i = 0; i < controllers; i++) {
2892 if (!sysinfo->ctrl_present[ i ])
2894 /* Skip everything if I don't have any memory on this controller */
2895 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2896 if (!(dch & DCH_MemClkFreqVal)) {
2901 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2902 if (dcl & DCL_DimmEccEn) {
2904 print_spew("ECC enabled\r\n");
2905 mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
2907 if (dcl & DCL_Width128) {
2908 mnc |= MNC_CHIPKILL_EN;
2910 pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
2913 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
2914 cpu_f0_f1[i] = is_cpu_pre_f2_in_bsp(i);
2916 //Rev F0/F1 workaround
2918 /* Set the DqsRcvEnTrain bit */
2919 dword = pci_read_config32(ctrl[i].f2, DRAM_CTRL);
2920 dword |= DC_DqsRcvEnTrain;
2921 pci_write_config32(ctrl[i].f2, DRAM_CTRL, dword);
2928 /* Set the DqsRcvEnTrain bit */
2929 dword = pci_read_config32(ctrl[i].f2, DRAM_CTRL);
2930 dword |= DC_DqsRcvEnTrain;
2931 pci_write_config32(ctrl[i].f2, DRAM_CTRL, dword);
2934 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2935 dcl |= DCL_InitDram;
2936 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2940 for(i = 0; i < controllers; i++) {
2941 uint32_t dcl, dch, dcm;
2942 if (!sysinfo->ctrl_present[ i ])
2944 /* Skip everything if I don't have any memory on this controller */
2945 if(sysinfo->meminfo[i].dimm_mask==0x00) continue;
2947 print_debug("Initializing memory: ");
2950 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2952 if ((loops & 1023) == 0) {
2955 } while(((dcl & DCL_InitDram) != 0) && (loops < TIMEOUT_LOOPS));
2956 if (loops >= TIMEOUT_LOOPS) {
2957 print_debug(" failed\r\n");
2961 /* Wait until it is safe to touch memory */
2963 dcm = pci_read_config32(ctrl[i].f2, DRAM_CTRL_MISC);
2964 } while(((dcm & DCM_MemClrStatus) == 0) /* || ((dcm & DCM_DramEnabled) == 0)*/ );
2966 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
2970 print_debug_dqs_tsc("\r\nbegin tsc0", i, tsc0[i].hi, tsc0[i].lo, 2);
2971 print_debug_dqs_tsc("end tsc ", i, tsc.hi, tsc.lo, 2);
2973 if(tsc.lo<tsc0[i].lo) {
2976 tsc.lo -= tsc0[i].lo;
2977 tsc.hi -= tsc0[i].hi;
2979 tsc0[i].lo = tsc.lo;
2980 tsc0[i].hi = tsc.hi;
2982 print_debug_dqs_tsc(" dtsc0", i, tsc0[i].hi, tsc0[i].lo, 2);
2985 print_debug(" done\r\n");
2988 #if HW_MEM_HOLE_SIZEK != 0
2989 // init hw mem hole here
2990 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
2991 set_hw_mem_hole(controllers, ctrl);
2994 //store tom to sysinfo, and it will be used by dqs_timing
2998 msr = rdmsr(TOP_MEM);
2999 sysinfo->tom_k = ((msr.hi<<24) | (msr.lo>>8))>>2;
3002 msr = rdmsr(TOP_MEM2);
3003 sysinfo->tom2_k = ((msr.hi<<24)| (msr.lo>>8))>>2;
3006 for(i = 0; i < controllers; i++) {
3007 sysinfo->mem_trained[i] = 0;
3010 #if MEM_TRAIN_SEQ == 0
3011 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
3012 dqs_timing(controllers, ctrl, tsc0, sysinfo);
3014 dqs_timing(controllers, ctrl, sysinfo);
3018 #if MEM_TRAIN_SEQ == 2
3019 //need to enable mtrr, so dqs training could access the test address
3020 setup_mtrr_dqs(sysinfo->tom_k, sysinfo->tom2_k);
3023 for(i = 0; i < controllers; i++) {
3024 if (!sysinfo->ctrl_present[ i ])
3027 /* Skip everything if I don't have any memory on this controller */
3028 if(sysinfo->meminfo[i].dimm_mask==0x00) continue;
3030 dqs_timing(i, ctrl, sysinfo, 1);
3032 #if MEM_TRAIN_SEQ == 1
3033 break; // only train the first node with ram
3037 #if MEM_TRAIN_SEQ == 2
3038 clear_mtrr_dqs(sysinfo->tom2_k);
3045 static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a, const uint16_t *spd_addr)
3049 struct mem_controller *ctrl;
3050 for(i=0;i<controllers; i++) {
3053 ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
3054 ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
3055 ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
3056 ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
3058 if(spd_addr == (void *)0) continue;
3060 for(j=0;j<DIMM_SOCKETS;j++) {
3061 ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
3062 ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];