2 * This file is part of the coreboot project.
4 * Copyright (C) 2002 Linux Networx
5 * (Written by Eric Biederman <ebiederman@lnxi.com> for Linux Networx)
6 * Copyright (C) 2004 YingHai Lu
7 * Copyright (C) 2008 Advanced Micro Devices, Inc.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <cpu/x86/cache.h>
24 #include <cpu/x86/mtrr.h>
25 #include <cpu/x86/tsc.h>
31 #if CONFIG_HAVE_OPTION_TABLE
32 #include "option_table.h"
35 #if CONFIG_DEBUG_RAM_SETUP
36 #define printk_raminit(args...) printk(BIOS_DEBUG, args)
38 #define printk_raminit(args...)
42 #if (CONFIG_RAMTOP & (CONFIG_RAMTOP -1)) != 0
43 # error "CONFIG_RAMTOP must be a power of 2"
49 /* for PCI_ADDR(0, 0x18, 2, 0x98) index,
50 and PCI_ADDR(0x, 0x18, 2, 0x9c) data */
53 [29: 0] DctOffset (Dram Controller Offset)
54 [30:30] DctAccessWrite (Dram Controller Read/Write Select)
57 [31:31] DctAccessDone (Dram Controller Access Done)
58 0 = Access in progress
59 1 = No access is progress
62 [31: 0] DctOffsetData (Dram Controller Offset Data)
65 - Write the register num to DctOffset with
67 - poll the DctAccessDone until it = 1
68 - Read the data from DctOffsetData
70 - Write the data to DctOffsetData
71 - Write register num to DctOffset with DctAccessWrite = 1
72 - poll the DctAccessDone untio it = 1
76 void setup_resource_map(const unsigned int *register_values, int max)
79 for (i = 0; i < max; i += 3) {
83 dev = register_values[i] & ~0xff;
84 where = register_values[i] & 0xff;
85 reg = pci_read_config32(dev, where);
86 reg &= register_values[i+1];
87 reg |= register_values[i+2];
88 pci_write_config32(dev, where, reg);
92 static int controller_present(const struct mem_controller *ctrl)
94 return pci_read_config32(ctrl->f0, 0) == 0x11001022;
97 static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
99 static const unsigned int register_values[] = {
101 /* Careful set limit registers before base registers which
102 contain the enables */
103 /* DRAM Limit i Registers
112 * [ 2: 0] Destination Node ID
122 * [10: 8] Interleave select
123 * specifies the values of A[14:12] to use with interleave enable.
125 * [31:16] DRAM Limit Address i Bits 39-24
126 * This field defines the upper address bits of a 40 bit address
127 * that define the end of the DRAM region.
129 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
130 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
131 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
132 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
133 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
134 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
135 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
136 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
137 /* DRAM Base i Registers
146 * [ 0: 0] Read Enable
149 * [ 1: 1] Write Enable
150 * 0 = Writes Disabled
153 * [10: 8] Interleave Enable
154 * 000 = No interleave
155 * 001 = Interleave on A[12] (2 nodes)
157 * 011 = Interleave on A[12] and A[14] (4 nodes)
161 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
163 * [13:16] DRAM Base Address i Bits 39-24
164 * This field defines the upper address bits of a 40-bit address
165 * that define the start of the DRAM region.
167 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
168 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
169 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
170 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
171 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
172 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
173 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
174 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
176 /* DRAM CS Base Address i Registers
185 * [ 0: 0] Chip-Select Bank Enable
189 * [ 2: 2] Memory Test Failed
191 * [13: 5] Base Address (21-13)
192 * An optimization used when all DIMM are the same size...
194 * [28:19] Base Address (36-27)
195 * This field defines the top 11 addresses bit of a 40-bit
196 * address that define the memory address space. These
197 * bits decode 32-MByte blocks of memory.
200 PCI_ADDR(0, 0x18, 2, 0x40), 0xe007c018, 0x00000000,
201 PCI_ADDR(0, 0x18, 2, 0x44), 0xe007c018, 0x00000000,
202 PCI_ADDR(0, 0x18, 2, 0x48), 0xe007c018, 0x00000000,
203 PCI_ADDR(0, 0x18, 2, 0x4C), 0xe007c018, 0x00000000,
204 PCI_ADDR(0, 0x18, 2, 0x50), 0xe007c018, 0x00000000,
205 PCI_ADDR(0, 0x18, 2, 0x54), 0xe007c018, 0x00000000,
206 PCI_ADDR(0, 0x18, 2, 0x58), 0xe007c018, 0x00000000,
207 PCI_ADDR(0, 0x18, 2, 0x5C), 0xe007c018, 0x00000000,
208 /* DRAM CS Mask Address i Registers
213 * Select bits to exclude from comparison with the DRAM Base address register.
215 * [13: 5] Address Mask (21-13)
216 * Address to be excluded from the optimized case
218 * [28:19] Address Mask (36-27)
219 * The bits with an address mask of 1 are excluded from address comparison
223 PCI_ADDR(0, 0x18, 2, 0x60), 0xe007c01f, 0x00000000,
224 PCI_ADDR(0, 0x18, 2, 0x64), 0xe007c01f, 0x00000000,
225 PCI_ADDR(0, 0x18, 2, 0x68), 0xe007c01f, 0x00000000,
226 PCI_ADDR(0, 0x18, 2, 0x6C), 0xe007c01f, 0x00000000,
228 /* DRAM Control Register
230 * [ 3: 0] RdPtrInit ( Read Pointer Initial Value)
231 * 0x03-0x00: reserved
232 * [ 6: 4] RdPadRcvFifoDly (Read Delay from Pad Receive FIFO)
235 * 010 = 1.5 Memory Clocks
236 * 011 = 2 Memory Clocks
237 * 100 = 2.5 Memory Clocks
238 * 101 = 3 Memory Clocks
239 * 110 = 3.5 Memory Clocks
242 * [16:16] AltVidC3MemClkTriEn (AltVID Memory Clock Tristate Enable)
243 * Enables the DDR memory clocks to be tristated when alternate VID
244 * mode is enabled. This bit has no effect if the DisNbClkRamp bit
246 * [17:17] DllTempAdjTime (DLL Temperature Adjust Cycle Time)
249 * [18:18] DqsRcvEnTrain (DQS Receiver Enable Training Mode)
250 * 0 = Normal DQS Receiver enable operation
251 * 1 = DQS receiver enable training mode
254 PCI_ADDR(0, 0x18, 2, 0x78), 0xfff80000, (6<<4)|(6<<0),
256 /* DRAM Initialization Register
258 * [15: 0] MrsAddress (Address for MRS/EMRS Commands)
259 * this field specifies the dsata driven on the DRAM address pins
260 * 15-0 for MRS and EMRS commands
261 * [18:16] MrsBank (Bank Address for MRS/EMRS Commands)
262 * this files specifies the data driven on the DRAM bank pins for
263 * the MRS and EMRS commands
265 * [24:24] SendPchgAll (Send Precharge All Command)
266 * Setting this bit causes the DRAM controller to send a precharge
267 * all command. This bit is cleared by the hardware after the
269 * [25:25] SendAutoRefresh (Send Auto Refresh Command)
270 * Setting this bit causes the DRAM controller to send an auto
271 * refresh command. This bit is cleared by the hardware after the
273 * [26:26] SendMrsCmd (Send MRS/EMRS Command)
274 * Setting this bit causes the DRAM controller to send the MRS or
275 * EMRS command defined by the MrsAddress and MrsBank fields. This
276 * bit is cleared by the hardware adter the commmand completes
277 * [27:27] DeassertMemRstX (De-assert Memory Reset)
278 * Setting this bit causes the DRAM controller to de-assert the
279 * memory reset pin. This bit cannot be used to assert the memory
281 * [28:28] AssertCke (Assert CKE)
282 * setting this bit causes the DRAM controller to assert the CKE
283 * pins. This bit cannot be used to de-assert the CKE pins
285 * [31:31] EnDramInit (Enable DRAM Initialization)
286 * Setting this bit puts the DRAM controller in a BIOS controlled
287 * DRAM initialization mode. BIOS must clear this bit aster DRAM
288 * initialization is complete.
290 // PCI_ADDR(0, 0x18, 2, 0x7C), 0x60f80000, 0,
293 /* DRAM Bank Address Mapping Register
295 * Specify the memory module size
315 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff0000, 0x00000000,
316 /* DRAM Timing Low Register
318 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
328 * [ 5: 4] Trcd (Ras#-active to Cas# read/write delay)
334 * [ 9: 8] Trp (Row Precharge Time, Precharge-to-Active or Auto-Refresh)
340 * [11:11] Trtp (Read to Precharge Time, read Cas# to precharge time)
341 * 0 = 2 clocks for Burst Length of 32 Bytes
342 * 4 clocks for Burst Length of 64 Bytes
343 * 1 = 3 clocks for Burst Length of 32 Bytes
344 * 5 clocks for Burst Length of 64 Bytes
345 * [15:12] Tras (Minimum Ras# Active Time)
348 * 0010 = 5 bus clocks
350 * 1111 = 18 bus clocks
351 * [19:16] Trc (Row Cycle Time, Ras#-active to Ras#-active or auto
352 * refresh of the same bank)
353 * 0000 = 11 bus clocks
354 * 0010 = 12 bus clocks
356 * 1110 = 25 bus clocks
357 * 1111 = 26 bus clocks
358 * [21:20] Twr (Write Recovery Time, From the last data to precharge,
359 * writes can go back-to-back)
364 * [23:22] Trrd (Active-to-active(Ras#-to-Ras#) Delay of different banks)
369 * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel A,
370 * BIOS should set it to reduce the power consumption)
371 * Bit F(1207) M2 Package S1g1 Package
373 * 1 N/A MA0_CLK1 MA0_CLK1
376 * 4 MA1_CLK MA1_CLK0 N/A
377 * 5 MA0_CLK MA0_CLK0 MA0_CLK0
379 * 7 N/A MA0_CLK2 MA0_CLK2
381 PCI_ADDR(0, 0x18, 2, 0x88), 0x000004c8, 0xff000002 /* 0x03623125 */ ,
382 /* DRAM Timing High Register
385 * [ 6: 4] TrwtTO (Read-to-Write Turnaround for Data, DQS Contention)
395 * [ 9: 8] Twtr (Internal DRAM Write-to-Read Command Delay,
396 * minium write-to-read delay when both access the same chip select)
401 * [11:10] Twrrd (Write to Read DIMM Termination Turnaround, minimum
402 * write-to-read delay when accessing two different DIMMs)
407 * [13:12] Twrwr (Write to Write Timing)
408 * 00 = 1 bus clocks ( 0 idle cycle on the bus)
409 * 01 = 2 bus clocks ( 1 idle cycle on the bus)
410 * 10 = 3 bus clocks ( 2 idle cycles on the bus)
412 * [15:14] Trdrd ( Read to Read Timing)
413 * 00 = 2 bus clocks ( 1 idle cycle on the bus)
414 * 01 = 3 bus clocks ( 2 idle cycles on the bus)
415 * 10 = 4 bus clocks ( 3 idle cycles on the bus)
416 * 11 = 5 bus clocks ( 4 idel cycles on the bus)
417 * [17:16] Tref (Refresh Rate)
418 * 00 = Undefined behavior
420 * 10 = Refresh interval of 7.8 microseconds
421 * 11 = Refresh interval of 3.9 microseconds
423 * [22:20] Trfc0 ( Auto-Refresh Row Cycle Time for the Logical DIMM0,
424 * based on DRAM density and speed)
425 * 000 = 75 ns (all speeds, 256Mbit)
426 * 001 = 105 ns (all speeds, 512Mbit)
427 * 010 = 127.5 ns (all speeds, 1Gbit)
428 * 011 = 195 ns (all speeds, 2Gbit)
429 * 100 = 327.5 ns (all speeds, 4Gbit)
433 * [25:23] Trfc1 ( Auto-Refresh Row Cycle Time for the Logical DIMM1,
434 * based on DRAM density and speed)
435 * [28:26] Trfc2 ( Auto-Refresh Row Cycle Time for the Logical DIMM2,
436 * based on DRAM density and speed)
437 * [31:29] Trfc3 ( Auto-Refresh Row Cycle Time for the Logical DIMM3,
438 * based on DRAM density and speed)
440 PCI_ADDR(0, 0x18, 2, 0x8c), 0x000c008f, (2 << 16)|(1 << 8),
441 /* DRAM Config Low Register
443 * [ 0: 0] InitDram (Initialize DRAM)
444 * 1 = write 1 cause DRAM controller to execute the DRAM
445 * initialization, when done it read to 0
446 * [ 1: 1] ExitSelfRef ( Exit Self Refresh Command )
447 * 1 = write 1 causes the DRAM controller to bring the DRAMs out
448 * for self refresh mode
450 * [ 5: 4] DramTerm (DRAM Termination)
451 * 00 = On die termination disabled
456 * [ 7: 7] DramDrvWeak ( DRAM Drivers Weak Mode)
457 * 0 = Normal drive strength mode.
458 * 1 = Weak drive strength mode
459 * [ 8: 8] ParEn (Parity Enable)
460 * 1 = Enable address parity computation output, PAR,
461 * and enables the parity error input, ERR
462 * [ 9: 9] SelfRefRateEn (Faster Self Refresh Rate Enable)
463 * 1 = Enable high temperature ( two times normal )
465 * [10:10] BurstLength32 ( DRAM Burst Length Set for 32 Bytes)
468 * [11:11] Width128 ( Width of DRAM interface)
469 * 0 = the controller DRAM interface is 64-bits wide
470 * 1 = the controller DRAM interface is 128-bits wide
471 * [12:12] X4Dimm (DIMM 0 is x4)
472 * [13:13] X4Dimm (DIMM 1 is x4)
473 * [14:14] X4Dimm (DIMM 2 is x4)
474 * [15:15] X4Dimm (DIMM 3 is x4)
476 * 1 = x4 DIMM present
477 * [16:16] UnBuffDimm ( Unbuffered DIMMs)
479 * 1 = Unbuffered DIMMs
481 * [19:19] DimmEccEn ( DIMM ECC Enable )
482 * 1 = ECC checking is being enabled for all DIMMs on the DRAM
483 * controller ( Through F3 0x44[EccEn])
486 PCI_ADDR(0, 0x18, 2, 0x90), 0xfff6004c, 0x00000010,
487 /* DRAM Config High Register
489 * [ 0: 2] MemClkFreq ( Memory Clock Frequency)
495 * [ 3: 3] MemClkFreqVal (Memory Clock Freqency Valid)
496 * 1 = BIOS need to set the bit when setting up MemClkFreq to
498 * [ 7: 4] MaxAsyncLat ( Maximum Asynchronous Latency)
503 * [12:12] RDqsEn ( Read DQS Enable) This bit is only be set if x8
504 * registered DIMMs are present in the system
505 * 0 = DM pins function as data mask pins
506 * 1 = DM pins function as read DQS pins
508 * [14:14] DisDramInterface ( Disable the DRAM interface ) When this bit
509 * is set, the DRAM controller is disabled, and interface in low power
511 * 0 = Enabled (default)
513 * [15:15] PowerDownEn ( Power Down Mode Enable )
514 * 0 = Disabled (default)
516 * [16:16] PowerDown ( Power Down Mode )
517 * 0 = Channel CKE Control
518 * 1 = Chip Select CKE Control
519 * [17:17] FourRankSODimm (Four Rank SO-DIMM)
520 * 1 = this bit is set by BIOS to indicate that a four rank
522 * [18:18] FourRankRDimm (Four Rank Registered DIMM)
523 * 1 = this bit is set by BIOS to indicate that a four rank
524 * registered DIMM is present
526 * [20:20] SlowAccessMode (Slow Access Mode (2T Mode))
527 * 0 = DRAM address and control signals are driven for one
529 * 1 = One additional MEMCLK of setup time is provided on all
530 * DRAM address and control signals except CS, CKE, and ODT;
531 * i.e., these signals are drivern for two MEMCLK cycles
534 * [22:22] BankSwizzleMode ( Bank Swizzle Mode),
535 * 0 = Disabled (default)
538 * [27:24] DcqBypassMax ( DRAM Controller Queue Bypass Maximum)
539 * 0000 = No bypass; the oldest request is never bypassed
540 * 0001 = The oldest request may be bypassed no more than 1 time
542 * 1111 = The oldest request may be bypassed no more than 15\
544 * [31:28] FourActWindow ( Four Bank Activate Window) , not more than
545 * 4 banks in a 8 bank device are activated
546 * 0000 = No tFAW window restriction
547 * 0001 = 8 MEMCLK cycles
548 * 0010 = 9 MEMCLK cycles
550 * 1101 = 20 MEMCLK cycles
553 PCI_ADDR(0, 0x18, 2, 0x94), 0x00a82f00,0x00008000,
554 /* DRAM Delay Line Register
556 * [ 0: 0] MemClrStatus (Memory Clear Status) : Readonly
557 * when set, this bit indicates that the memory clear function
558 * is complete. Only clear by reset. BIOS should not write or
559 * read the DRAM until this bit is set by hardware
560 * [ 1: 1] DisableJitter ( Disable Jitter)
561 * When set the DDR compensation circuit will not change the
562 * values unless the change is more than one step from the
564 * [ 3: 2] RdWrQByp ( Read/Write Queue Bypass Count)
569 * [ 4: 4] Mode64BitMux (Mismatched DIMM Support Enable)
570 * 1 When bit enables support for mismatched DIMMs when using
571 * 128-bit DRAM interface, the Width128 no effect, only for
573 * [ 5: 5] DCC_EN ( Dynamica Idle Cycle Counter Enable)
574 * When set to 1, indicates that each entry in the page tables
575 * dynamically adjusts the idle cycle limit based on page
576 * Conflict/Page Miss (PC/PM) traffic
577 * [ 8: 6] ILD_lmt ( Idle Cycle Limit)
586 * [ 9: 9] DramEnabled ( DRAM Enabled)
587 * When Set, this bit indicates that the DRAM is enabled, this
588 * bit is set by hardware after DRAM initialization or on an exit
589 * from self refresh. The DRAM controller is intialized after the
590 * hardware-controlled initialization process ( initiated by the
591 * F2 0x90[DramInit]) completes or when the BIOS-controlled
592 * initialization process completes (F2 0x7c(EnDramInit] is
593 * written from 1 to 0)
595 * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel B,
596 * BIOS should set it to reduce the power consumption)
597 * Bit F(1207) M2 Package S1g1 Package
599 * 1 N/A MA0_CLK1 MA0_CLK1
602 * 4 MA1_CLK MA1_CLK0 N/A
603 * 5 MA0_CLK MA0_CLK0 MA0_CLK0
605 * 7 N/A MA0_CLK2 MA0_CLK2
607 PCI_ADDR(0, 0x18, 2, 0xa0), 0x00fffc00, 0xff000000,
609 /* DRAM Scrub Control Register
611 * [ 4: 0] DRAM Scrube Rate
613 * [12: 8] L2 Scrub Rate
615 * [20:16] Dcache Scrub
618 * 00000 = Do not scrub
640 * All Others = Reserved
642 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
643 /* DRAM Scrub Address Low Register
645 * [ 0: 0] DRAM Scrubber Redirect Enable
647 * 1 = Scrubber Corrects errors found in normal operation
649 * [31: 6] DRAM Scrub Address 31-6
651 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
652 /* DRAM Scrub Address High Register
654 * [ 7: 0] DRAM Scrubb Address 39-32
657 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
659 /* for PCI_ADDR(0, 0x18, 2, 0x98) index,
660 and PCI_ADDR(0x, 0x18, 2, 0x9c) data */
663 [29: 0] DctOffset (Dram Controller Offset)
664 [30:30] DctAccessWrite (Dram Controller Read/Write Select)
667 [31:31] DctAccessDone (Dram Controller Access Done)
668 0 = Access in progress
669 1 = No access is progress
672 [31: 0] DctOffsetData (Dram Controller Offset Data)
675 - Write the register num to DctOffset with DctAccessWrite = 0
676 - poll the DctAccessDone until it = 1
677 - Read the data from DctOffsetData
679 - Write the data to DctOffsetData
680 - Write register num to DctOffset with DctAccessWrite = 1
681 - poll the DctAccessDone untio it = 1
687 if (!controller_present(ctrl)) {
688 sysinfo->ctrl_present[ctrl->node_id] = 0;
691 sysinfo->ctrl_present[ctrl->node_id] = 1;
693 printk(BIOS_SPEW, "setting up CPU %02x northbridge registers\n", ctrl->node_id);
694 max = ARRAY_SIZE(register_values);
695 for (i = 0; i < max; i += 3) {
699 dev = (register_values[i] & ~0xff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
700 where = register_values[i] & 0xff;
701 reg = pci_read_config32(dev, where);
702 reg &= register_values[i+1];
703 reg |= register_values[i+2];
704 pci_write_config32(dev, where, reg);
706 printk(BIOS_SPEW, "done.\n");
710 static int is_dual_channel(const struct mem_controller *ctrl)
713 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
714 return dcl & DCL_Width128;
718 static int is_opteron(const struct mem_controller *ctrl)
720 /* Test to see if I am an Opteron. M2 and S1G1 support dual
721 * channel, too, but only support unbuffered DIMMs so we need a
722 * better test for Opterons.
723 * However, all code uses is_opteron() to find out whether to
724 * use dual channel, so if we really check for opteron here, we
725 * need to fix up all code using this function, too.
729 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
730 return !!(nbcap & NBCAP_128Bit);
734 static int is_registered(const struct mem_controller *ctrl)
736 /* Test to see if we are dealing with registered SDRAM.
737 * If we are not registered we are unbuffered.
738 * This function must be called after spd_handle_unbuffered_dimms.
741 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
742 return !(dcl & DCL_UnBuffDimm);
746 static void spd_get_dimm_size(unsigned device, struct dimm_size *sz)
748 /* Calculate the log base 2 size of a DIMM in bits */
755 value = spd_read_byte(device, SPD_ROW_NUM); /* rows */
756 if (value < 0) goto hw_err;
757 if ((value & 0xff) == 0) goto val_err; /* max is 16 ? */
758 sz->per_rank += value & 0xff;
759 sz->rows = value & 0xff;
761 value = spd_read_byte(device, SPD_COL_NUM); /* columns */
762 if (value < 0) goto hw_err;
763 if ((value & 0xff) == 0) goto val_err; /* max is 11 */
764 sz->per_rank += value & 0xff;
765 sz->col = value & 0xff;
767 value = spd_read_byte(device, SPD_BANK_NUM); /* banks */
768 if (value < 0) goto hw_err;
769 if ((value & 0xff) == 0) goto val_err;
770 sz->bank = log2(value & 0xff); // convert 4 to 2, and 8 to 3
771 sz->per_rank += sz->bank;
773 /* Get the module data width and convert it to a power of two */
774 value = spd_read_byte(device, SPD_DATA_WIDTH);
775 if (value < 0) goto hw_err;
777 if ((value != 72) && (value != 64)) goto val_err;
778 sz->per_rank += log2(value) - 3; //64 bit So another 3 lines
780 /* How many ranks? */
781 /* number of physical banks */
782 value = spd_read_byte(device, SPD_MOD_ATTRIB_RANK);
783 if (value < 0) goto hw_err;
784 /* value >>= SPD_MOD_ATTRIB_RANK_NUM_SHIFT; */
785 value &= SPD_MOD_ATTRIB_RANK_NUM_MASK;
786 value += SPD_MOD_ATTRIB_RANK_NUM_BASE; // 0-->1, 1-->2, 3-->4
788 rank == 1 only one rank or say one side
789 rank == 2 two side , and two ranks
790 rank == 4 two side , and four ranks total
791 Some one side two ranks, because of stacked
793 if ((value != 1) && (value != 2) && (value != 4 )) {
798 /* verify if per_rank is equal byte 31
799 it has the DIMM size as a multiple of 128MB.
801 value = spd_read_byte(device, SPD_RANK_SIZE);
802 if (value < 0) goto hw_err;
805 if (value <=4 ) value += 8; // add back to 1G to high
806 value += (27-5); // make 128MB to the real lines
807 if ( value != (sz->per_rank)) {
808 printk(BIOS_ERR, "Bad RANK Size --\n");
815 die("Bad SPD value\n");
816 /* If an hw_error occurs report that I have no memory */
828 static void set_dimm_size(const struct mem_controller *ctrl,
829 struct dimm_size *sz, unsigned index,
830 struct mem_info *meminfo)
832 uint32_t base0, base1;
834 /* For each base register.
835 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
836 * The initialize dimm size is in bits.
837 * Set the base enable bit0.
842 /* Make certain side1 of the dimm is at least 128MB */
843 if (sz->per_rank >= 27) {
844 base0 = (1 << ((sz->per_rank - 27 ) + 19)) | 1;
847 /* Make certain side2 of the dimm is at least 128MB */
848 if (sz->rank > 1) { // 2 ranks or 4 ranks
849 base1 = (1 << ((sz->per_rank - 27 ) + 19)) | 1;
852 /* Double the size if we are using dual channel memory */
853 if (meminfo->is_Width128) {
854 base0 = (base0 << 1) | (base0 & 1);
855 base1 = (base1 << 1) | (base1 & 1);
858 /* Clear the reserved bits */
859 base0 &= ~0xe007fffe;
860 base1 &= ~0xe007fffe;
862 if (!(meminfo->dimm_mask & 0x0F) && (meminfo->dimm_mask & 0xF0)) { /* channelB only? */
863 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 4) << 2), base0);
864 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 5) << 2), base1);
866 /* Set the appropriate DIMM base address register */
867 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 0) << 2), base0);
868 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 1) << 2), base1);
869 #if CONFIG_QRANK_DIMM_SUPPORT
871 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 4) << 2), base0);
872 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 5) << 2), base1);
877 /* Enable the memory clocks for this DIMM by Clear the MemClkDis bit*/
881 #if CONFIG_CPU_SOCKET_TYPE == 0x10 /* L1 */
882 ClkDis0 = DTL_MemClkDis0;
883 #elif CONFIG_CPU_SOCKET_TYPE == 0x11 /* AM2 */
884 ClkDis0 = DTL_MemClkDis0_AM2;
885 #elif CONFIG_CPU_SOCKET_TYPE == 0x12 /* S1G1 */
886 ClkDis0 = DTL_MemClkDis0_S1g1;
889 if (!(meminfo->dimm_mask & 0x0F) && (meminfo->dimm_mask & 0xF0)) { /* channelB only? */
890 dword = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
891 dword &= ~(ClkDis0 >> index);
892 pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dword);
895 dword = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW); //Channel A
896 dword &= ~(ClkDis0 >> index);
897 #if CONFIG_QRANK_DIMM_SUPPORT
899 dword &= ~(ClkDis0 >> (index+2));
902 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dword);
904 if (meminfo->is_Width128) { // ChannelA+B
905 dword = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
906 dword &= ~(ClkDis0 >> index);
907 #if CONFIG_QRANK_DIMM_SUPPORT
909 dword &= ~(ClkDis0 >> (index+2));
912 pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dword);
919 /* row col bank for 64 bit
935 static void set_dimm_cs_map(const struct mem_controller *ctrl,
936 struct dimm_size *sz, unsigned index,
937 struct mem_info *meminfo)
939 static const uint8_t cs_map_aaa[24] = {
940 /* (bank=2, row=13, col=9)(3, 16, 11) ---> (0, 0, 0) (1, 3, 2) */
955 if (!(meminfo->dimm_mask & 0x0F) && (meminfo->dimm_mask & 0xF0)) { /* channelB only? */
958 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
959 map &= ~(0xf << (index * 4));
960 #if CONFIG_QRANK_DIMM_SUPPORT
962 map &= ~(0xf << ( (index + 2) * 4));
966 /* Make certain side1 of the dimm is at least 128MB */
967 if (sz->per_rank >= 27) {
969 temp_map = cs_map_aaa[(sz->bank-2)*3*4 + (sz->rows - 13)*3 + (sz->col - 9) ];
970 map |= temp_map << (index*4);
971 #if CONFIG_QRANK_DIMM_SUPPORT
973 map |= temp_map << ( (index + 2) * 4);
978 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
983 static long spd_set_ram_size(const struct mem_controller *ctrl,
984 struct mem_info *meminfo)
988 for (i = 0; i < DIMM_SOCKETS; i++) {
989 struct dimm_size *sz = &(meminfo->sz[i]);
990 u32 spd_device = ctrl->channel0[i];
992 if (!(meminfo->dimm_mask & (1 << i))) {
993 if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) { /* channelB only? */
994 spd_device = ctrl->channel1[i];
1000 spd_get_dimm_size(spd_device, sz);
1001 if (sz->per_rank == 0) {
1002 return -1; /* Report SPD error */
1004 set_dimm_size(ctrl, sz, i, meminfo);
1005 set_dimm_cs_map(ctrl, sz, i, meminfo);
1007 return meminfo->dimm_mask;
1010 static void route_dram_accesses(const struct mem_controller *ctrl,
1011 unsigned long base_k, unsigned long limit_k)
1013 /* Route the addresses to the controller node */
1018 unsigned limit_reg, base_reg;
1021 node_id = ctrl->node_id;
1022 index = (node_id << 3);
1023 limit = (limit_k << 2);
1024 limit &= 0xffff0000;
1025 limit -= 0x00010000;
1026 limit |= ( 0 << 8) | (node_id << 0);
1027 base = (base_k << 2);
1029 base |= (0 << 8) | (1<<1) | (1<<0);
1031 limit_reg = 0x44 + index;
1032 base_reg = 0x40 + index;
1033 for (device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1);
1034 device += PCI_DEV(0, 1, 0)) {
1035 pci_write_config32(device, limit_reg, limit);
1036 pci_write_config32(device, base_reg, base);
1040 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
1042 /* Error if I don't have memory */
1047 /* Report the amount of memory. */
1048 printk(BIOS_DEBUG, "RAM end at 0x%08x kB\n", tom_k);
1050 /* Now set top of memory */
1052 if (tom_k > (4*1024*1024)) {
1053 printk_raminit("Handling memory mapped above 4 GB\n");
1054 printk_raminit("Upper RAM end at 0x%08x kB\n", tom_k);
1055 msr.lo = (tom_k & 0x003fffff) << 10;
1056 msr.hi = (tom_k & 0xffc00000) >> 22;
1057 wrmsr(TOP_MEM2, msr);
1058 printk_raminit("Correcting memory amount mapped below 4 GB\n");
1061 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
1062 * so I can see my rom chip and other I/O devices.
1064 if (tom_k >= 0x003f0000) {
1065 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
1066 if (hole_startk != 0) {
1067 tom_k = hole_startk;
1071 printk_raminit("Adjusting lower RAM end\n");
1073 printk_raminit("Lower RAM end at 0x%08x kB\n", tom_k);
1074 msr.lo = (tom_k & 0x003fffff) << 10;
1075 msr.hi = (tom_k & 0xffc00000) >> 22;
1076 wrmsr(TOP_MEM, msr);
1079 static unsigned long interleave_chip_selects(const struct mem_controller *ctrl, int is_Width128)
1083 static const uint8_t csbase_low_f0_shift[] = {
1084 /* 128MB */ (14 - (13-5)),
1085 /* 256MB */ (15 - (13-5)),
1086 /* 512MB */ (15 - (13-5)),
1087 /* 512MB */ (16 - (13-5)),
1088 /* 512MB */ (16 - (13-5)),
1089 /* 1GB */ (16 - (13-5)),
1090 /* 1GB */ (16 - (13-5)),
1091 /* 2GB */ (16 - (13-5)),
1092 /* 2GB */ (17 - (13-5)),
1093 /* 4GB */ (17 - (13-5)),
1094 /* 4GB */ (16 - (13-5)),
1095 /* 8GB */ (17 - (13-5)),
1098 /* cs_base_high is not changed */
1100 uint32_t csbase_inc;
1101 int chip_selects, index;
1103 unsigned common_size;
1104 unsigned common_cs_mode;
1105 uint32_t csbase, csmask;
1107 /* See if all of the memory chip selects are the same size
1108 * and if so count them.
1110 #if defined(CMOS_VSTART_interleave_chip_selects)
1111 if (read_option(interleave_chip_selects, 1) == 0)
1114 #if !defined(CONFIG_INTERLEAVE_CHIP_SELECTS) || !CONFIG_INTERLEAVE_CHIP_SELECTS
1121 common_cs_mode = 0xff;
1122 for (index = 0; index < 8; index++) {
1127 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1129 /* Is it enabled? */
1134 size = (value >> 19) & 0x3ff;
1135 if (common_size == 0) {
1138 /* The size differed fail */
1139 if (common_size != size) {
1143 value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
1144 cs_mode =( value >> ((index>>1)*4)) & 0xf;
1145 if (common_cs_mode == 0xff) {
1146 common_cs_mode = cs_mode;
1148 /* The cs_mode differed fail */
1149 if (common_cs_mode != cs_mode) {
1154 /* Chip selects can only be interleaved when there is
1155 * more than one and their is a power of two of them.
1157 bits = log2(chip_selects);
1158 if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) {
1159 //chip_selects max = 8
1163 /* Find the bits of csbase that we need to interleave on */
1164 csbase_inc = 1 << (csbase_low_f0_shift[common_cs_mode]);
1169 /* Compute the initial values for csbase and csbask.
1170 * In csbase just set the enable bit and the base to zero.
1171 * In csmask set the mask bits for the size and page level interleave.
1174 csmask = (((common_size << bits) - 1) << 19);
1175 csmask |= 0x3fe0 & ~((csbase_inc << bits) - csbase_inc);
1176 for (index = 0; index < 8; index++) {
1179 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1180 /* Is it enabled? */
1184 pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
1185 if ((index & 1) == 0) { //only have 4 CSMASK
1186 pci_write_config32(ctrl->f2, DRAM_CSMASK + ((index>>1) << 2), csmask);
1188 csbase += csbase_inc;
1191 printk(BIOS_DEBUG, "Interleaved\n");
1193 /* Return the memory size in K */
1194 return common_size << ((27-10) + bits);
1197 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
1201 /* Remember which registers we have used in the high 8 bits of tom */
1204 /* Find the largest remaining canidate */
1205 unsigned index, canidate;
1206 uint32_t csbase, csmask;
1210 for (index = 0; index < 8; index++) {
1212 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1214 /* Is it enabled? */
1219 /* Is it greater? */
1220 if (value <= csbase) {
1224 /* Has it already been selected */
1225 if (tom & (1 << (index + 24))) {
1228 /* I have a new canidate */
1233 /* See if I have found a new canidate */
1238 /* Remember the dimm size */
1239 size = csbase >> 19;
1241 /* Remember I have used this register */
1242 tom |= (1 << (canidate + 24));
1244 /* Recompute the cs base register value */
1245 csbase = (tom << 19) | 1;
1247 /* Increment the top of memory */
1250 /* Compute the memory mask */
1251 csmask = ((size -1) << 19);
1252 csmask |= 0x3fe0; /* For now don't optimize */
1254 /* Write the new base register */
1255 pci_write_config32(ctrl->f2, DRAM_CSBASE + (canidate << 2), csbase);
1256 /* Write the new mask register */
1257 if ((canidate & 1) == 0) { //only have 4 CSMASK
1258 pci_write_config32(ctrl->f2, DRAM_CSMASK + ((canidate >> 1) << 2), csmask);
1262 /* Return the memory size in K */
1263 return (tom & ~0xff000000) << (27-10);
1266 static unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
1270 /* Find the last memory address used */
1272 for (node_id = 0; node_id < max_node_id; node_id++) {
1273 uint32_t limit, base;
1275 index = node_id << 3;
1276 base = pci_read_config32(ctrl->f1, 0x40 + index);
1277 /* Only look at the limit if the base is enabled */
1278 if ((base & 3) == 3) {
1279 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1280 end_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1286 static void order_dimms(const struct mem_controller *ctrl,
1287 struct mem_info *meminfo)
1289 unsigned long tom_k, base_k;
1291 tom_k = interleave_chip_selects(ctrl, meminfo->is_Width128);
1294 printk(BIOS_DEBUG, "Interleaving disabled\n");
1295 tom_k = order_chip_selects(ctrl);
1298 /* Compute the memory base address */
1299 base_k = memory_end_k(ctrl, ctrl->node_id);
1301 route_dram_accesses(ctrl, base_k, tom_k);
1302 set_top_mem(tom_k, 0);
1305 static long disable_dimm(const struct mem_controller *ctrl, unsigned index,
1306 struct mem_info *meminfo)
1308 printk(BIOS_DEBUG, "disabling dimm %02x\n", index);
1309 if (!(meminfo->dimm_mask & 0x0F) && (meminfo->dimm_mask & 0xF0)) { /* channelB only? */
1310 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 4) << 2), 0);
1311 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 5) << 2), 0);
1313 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 0) << 2), 0);
1314 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 1) << 2), 0);
1315 #if CONFIG_QRANK_DIMM_SUPPORT
1316 if (meminfo->sz[index].rank == 4) {
1317 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 4) << 2), 0);
1318 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 5) << 2), 0);
1323 meminfo->dimm_mask &= ~(1 << index);
1324 return meminfo->dimm_mask;
1327 static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl,
1328 struct mem_info *meminfo)
1331 uint32_t registered;
1334 for (i = 0; (i < DIMM_SOCKETS); i++) {
1336 u32 spd_device = ctrl->channel0[i];
1337 if (!(meminfo->dimm_mask & (1 << i))) {
1338 if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) { /* channelB only? */
1339 spd_device = ctrl->channel1[i];
1344 value = spd_read_byte(spd_device, SPD_DIMM_TYPE);
1349 /* Registered dimm ? */
1351 if ((value == SPD_DIMM_TYPE_RDIMM) || (value == SPD_DIMM_TYPE_mRDIMM)) {
1352 //check SPD_MOD_ATTRIB to verify it is SPD_MOD_ATTRIB_REGADC (0x11)?
1353 registered |= (1<<i);
1357 if (is_opteron(ctrl)) {
1359 if ( registered != (meminfo->dimm_mask & ((1<<DIMM_SOCKETS)-1)) ) {
1360 meminfo->dimm_mask &= (registered | (registered << DIMM_SOCKETS) ); //disable unbuffed dimm
1361 // die("Mixed buffered and registered dimms not supported");
1363 //By yhlu for debug M2, s1g1 can do dual channel, but it use unbuffer DIMM
1365 die("Unbuffered Dimms not supported on Opteron");
1371 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1372 dcl &= ~DCL_UnBuffDimm;
1373 meminfo->is_registered = 1;
1375 dcl |= DCL_UnBuffDimm;
1376 meminfo->is_registered = 0;
1378 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1380 if (meminfo->is_registered) {
1381 printk(BIOS_SPEW, "Registered\n");
1383 printk(BIOS_SPEW, "Unbuffered\n");
1385 return meminfo->dimm_mask;
1388 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
1393 for (i = 0; i < DIMM_SOCKETS; i++) {
1396 device = ctrl->channel0[i];
1397 printk_raminit("DIMM socket %i, channel 0 SPD device is 0x%02x\n", i, device);
1399 byte = spd_read_byte(ctrl->channel0[i], SPD_MEM_TYPE); /* Type */
1400 if (byte == SPD_MEM_TYPE_SDRAM_DDR2) {
1401 dimm_mask |= (1 << i);
1404 device = ctrl->channel1[i];
1405 printk_raminit("DIMM socket %i, channel 1 SPD device is 0x%02x\n", i, device);
1407 byte = spd_read_byte(ctrl->channel1[i], SPD_MEM_TYPE);
1408 if (byte == SPD_MEM_TYPE_SDRAM_DDR2) {
1409 dimm_mask |= (1 << (i + DIMM_SOCKETS));
1416 static long spd_enable_2channels(const struct mem_controller *ctrl, struct mem_info *meminfo)
1420 /* SPD addresses to verify are identical */
1421 static const uint8_t addresses[] = {
1422 2, /* Type should be DDR2 SDRAM */
1423 3, /* *Row addresses */
1424 4, /* *Column addresses */
1425 5, /* *Number of DIMM Ranks */
1426 6, /* *Module Data Width*/
1427 11, /* *DIMM Conf Type */
1428 13, /* *Pri SDRAM Width */
1429 17, /* *Logical Banks */
1430 20, /* *DIMM Type Info */
1431 21, /* *SDRAM Module Attributes */
1432 27, /* *tRP Row precharge time */
1433 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1434 29, /* *tRCD RAS to CAS */
1435 30, /* *tRAS Activate to Precharge */
1436 36, /* *Write recovery time (tWR) */
1437 37, /* *Internal write to read command delay (tRDP) */
1438 38, /* *Internal read to precharge command delay (tRTP) */
1439 40, /* *Extension of Byte 41 tRC and Byte 42 tRFC */
1440 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1441 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1442 /* The SPD addresses 18, 9, 23, 26 need special treatment like
1443 * in spd_set_memclk. Right now they cause many false negatives.
1444 * Keep them at the end to see other mismatches (if any).
1446 18, /* *Supported CAS Latencies */
1447 9, /* *Cycle time at highest CAS Latency CL=X */
1448 23, /* *Cycle time at CAS Latency (CLX - 1) */
1449 26, /* *Cycle time at CAS Latency (CLX - 2) */
1454 /* S1G1 and AM2 sockets are Mod64BitMux capable. */
1455 #if CONFIG_CPU_SOCKET_TYPE == 0x11 || CONFIG_CPU_SOCKET_TYPE == 0x12
1461 /* If the dimms are not in pairs do not do dual channels */
1462 if ((meminfo->dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1463 ((meminfo->dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1464 goto single_channel;
1466 /* If the cpu is not capable of doing dual channels don't do dual channels */
1467 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1468 if (!(nbcap & NBCAP_128Bit)) {
1469 goto single_channel;
1471 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1472 unsigned device0, device1;
1475 /* If I don't have a dimm skip this one */
1476 if (!(meminfo->dimm_mask & (1 << i))) {
1479 device0 = ctrl->channel0[i];
1480 device1 = ctrl->channel1[i];
1481 /* Abort if the chips don't support a common CAS latency. */
1482 common_cl = spd_read_byte(device0, 18) & spd_read_byte(device1, 18);
1484 printk(BIOS_DEBUG, "No common CAS latency supported\n");
1485 goto single_channel;
1487 printk_raminit("Common CAS latency bitfield: 0x%02x\n", common_cl);
1489 for (j = 0; j < ARRAY_SIZE(addresses); j++) {
1491 addr = addresses[j];
1492 value0 = spd_read_byte(device0, addr);
1496 value1 = spd_read_byte(device1, addr);
1500 if (value0 != value1) {
1501 printk_raminit("SPD values differ between channel 0/1 for byte %i\n", addr);
1502 goto single_channel;
1506 printk(BIOS_SPEW, "Enabling dual channel memory\n");
1507 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1508 dcl &= ~DCL_BurstLength32; /* 32byte mode may be preferred in platforms that include graphics controllers that generate a lot of 32-bytes system memory accesses
1509 32byte mode is not supported when the DRAM interface is 128 bits wides, even 32byte mode is set, system still use 64 byte mode */
1510 dcl |= DCL_Width128;
1511 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1512 meminfo->is_Width128 = 1;
1513 return meminfo->dimm_mask;
1516 meminfo->is_Width128 = 0;
1517 meminfo->is_64MuxMode = 0;
1520 if ((meminfo->dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1521 ((meminfo->dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1522 if (((meminfo->dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1523 /* mux capable and single dimm in channelB */
1525 printk(BIOS_SPEW, "Enable 64MuxMode & BurstLength32\n");
1526 dcm = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
1527 dcm |= DCM_Mode64BitMux;
1528 pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dcm);
1529 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1530 //dcl |= DCL_BurstLength32; /* 32byte mode for channelB only */
1531 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1532 meminfo->is_64MuxMode = 1;
1534 meminfo->dimm_mask &= ~((1 << (DIMM_SOCKETS * 2)) - (1 << DIMM_SOCKETS));
1537 } else { /* unmatched dual dimms ? */
1538 /* unmatched dual dimms not supported by meminit code. Use single channelA dimm. */
1539 meminfo->dimm_mask &= ~((1 << (DIMM_SOCKETS * 2)) - (1 << DIMM_SOCKETS));
1540 printk(BIOS_SPEW, "Unmatched dual dimms. Use single channelA dimm.\n");
1542 return meminfo->dimm_mask;
1546 uint16_t cycle_time;
1547 uint8_t divisor; /* In 1/40 ns increments */
1552 uint8_t DcqByPassMax;
1553 uint32_t dch_memclk;
1557 static const struct mem_param speed[] = {
1560 .cycle_time = 0x500,
1561 .divisor = 200, // how many 1/40ns per clock
1562 .dch_memclk = DCH_MemClkFreq_200MHz, //0
1572 .cycle_time = 0x375,
1573 .divisor = 150, //????
1574 .dch_memclk = DCH_MemClkFreq_266MHz, //1
1583 .cycle_time = 0x300,
1585 .dch_memclk = DCH_MemClkFreq_333MHz, //2
1595 .cycle_time = 0x250,
1597 .dch_memclk = DCH_MemClkFreq_400MHz,//3
1605 .cycle_time = 0x000,
1609 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1612 const struct mem_param *param;
1613 for (param = &speed[0]; param->cycle_time ; param++) {
1614 if (min_cycle_time > (param+1)->cycle_time) {
1618 if (!param->cycle_time) {
1619 die("min_cycle_time to low");
1621 printk(BIOS_SPEW, "%s\n", param->name);
1625 static uint8_t get_exact_divisor(int i, uint8_t divisor)
1627 //input divisor could be 200(200), 150(266), 120(333), 100 (400)
1628 static const uint8_t dv_a[] = {
1629 /* 200 266 333 400 */
1630 /*4 */ 250, 250, 250, 250,
1631 /*5 */ 200, 200, 200, 100,
1632 /*6 */ 200, 166, 166, 100,
1633 /*7 */ 200, 171, 142, 100,
1635 /*8 */ 200, 150, 125, 100,
1636 /*9 */ 200, 156, 133, 100,
1637 /*10*/ 200, 160, 120, 100,
1638 /*11*/ 200, 163, 127, 100,
1640 /*12*/ 200, 150, 133, 100,
1641 /*13*/ 200, 153, 123, 100,
1642 /*14*/ 200, 157, 128, 100,
1643 /*15*/ 200, 160, 120, 100,
1650 /* Check for FID control support */
1651 struct cpuid_result cpuid1;
1652 cpuid1 = cpuid(0x80000007);
1653 if( cpuid1.edx & 0x02 ) {
1654 /* Use current FID */
1656 msr = rdmsr(0xc0010042);
1657 fid_cur = msr.lo & 0x3f;
1661 /* Use startup FID */
1663 msr = rdmsr(0xc0010015);
1664 fid_start = (msr.lo & (0x3f << 24));
1666 index = fid_start>>25;
1669 if (index>12) return divisor;
1671 if (i>3) return divisor;
1673 return dv_a[index * 4+i];
1678 struct spd_set_memclk_result {
1679 const struct mem_param *param;
1684 static unsigned convert_to_linear(unsigned value)
1686 static const unsigned fraction[] = { 0x25, 0x33, 0x66, 0x75 };
1689 /* We need to convert value to more readable */
1690 if ((value & 0xf) < 10) { //no .25, .33, .66, .75
1693 valuex = ((value & 0xf0) << 4) | fraction [(value & 0xf)-10];
1699 static const uint8_t latency_indicies[] = { 25, 23, 9 };
1701 static int find_optimum_spd_latency(u32 spd_device, unsigned *min_latency, unsigned *min_cycle_time)
1703 int new_cycle_time, new_latency;
1708 /* First find the supported CAS latencies
1709 * Byte 18 for DDR SDRAM is interpreted:
1710 * bit 3 == CAS Latency = 3
1711 * bit 4 == CAS Latency = 4
1712 * bit 5 == CAS Latency = 5
1713 * bit 6 == CAS Latency = 6
1715 new_cycle_time = 0x500;
1718 latencies = spd_read_byte(spd_device, SPD_CAS_LAT);
1722 printk_raminit("\tlatencies: %08x\n", latencies);
1723 /* Compute the lowest cas latency which can be expressed in this
1724 * particular SPD EEPROM. You can store at most settings for 3
1725 * contiguous CAS latencies, so by taking the highest CAS
1726 * latency maked as supported in the SPD and subtracting 2 you
1727 * get the lowest expressable CAS latency. That latency is not
1728 * necessarily supported, but a (maybe invalid) entry exists
1731 latency = log2(latencies) - 2;
1733 /* Loop through and find a fast clock with a low latency */
1734 for (index = 0; index < 3; index++, latency++) {
1736 if ((latency < 3) || (latency > 6) ||
1737 (!(latencies & (1 << latency)))) {
1740 value = spd_read_byte(spd_device, latency_indicies[index]);
1745 printk_raminit("\tindex: %08x\n", index);
1746 printk_raminit("\t\tlatency: %08x\n", latency);
1747 printk_raminit("\t\tvalue1: %08x\n", value);
1749 value = convert_to_linear(value);
1751 printk_raminit("\t\tvalue2: %08x\n", value);
1753 /* Only increase the latency if we decrease the clock */
1754 if (value >= *min_cycle_time ) {
1755 if (value < new_cycle_time) {
1756 new_cycle_time = value;
1757 new_latency = latency;
1758 } else if (value == new_cycle_time) {
1759 if (new_latency > latency) {
1760 new_latency = latency;
1764 printk_raminit("\t\tnew_cycle_time: %08x\n", new_cycle_time);
1765 printk_raminit("\t\tnew_latency: %08x\n", new_latency);
1769 if (new_latency > 6){
1773 /* Does min_latency need to be increased? */
1774 if (new_cycle_time > *min_cycle_time) {
1775 *min_cycle_time = new_cycle_time;
1778 /* Does min_cycle_time need to be increased? */
1779 if (new_latency > *min_latency) {
1780 *min_latency = new_latency;
1783 printk_raminit("2 min_cycle_time: %08x\n", *min_cycle_time);
1784 printk_raminit("2 min_latency: %08x\n", *min_latency);
1789 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, struct mem_info *meminfo)
1791 /* Compute the minimum cycle time for these dimms */
1792 struct spd_set_memclk_result result;
1793 unsigned min_cycle_time, min_latency, bios_cycle_time;
1797 static const uint16_t min_cycle_times[] = { // use full speed to compare
1798 [NBCAP_MEMCLK_NOLIMIT] = 0x250, /*2.5ns */
1799 [NBCAP_MEMCLK_333MHZ] = 0x300, /* 3.0ns */
1800 [NBCAP_MEMCLK_266MHZ] = 0x375, /* 3.75ns */
1801 [NBCAP_MEMCLK_200MHZ] = 0x500, /* 5.0s */
1805 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1806 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1807 bios_cycle_time = min_cycle_times[
1808 #ifdef CMOS_VSTART_max_mem_clock
1809 read_option(max_mem_clock, 0)
1811 #if defined(CONFIG_MAX_MEM_CLOCK)
1812 CONFIG_MAX_MEM_CLOCK
1814 0 // use DDR400 as default
1819 if (bios_cycle_time > min_cycle_time) {
1820 min_cycle_time = bios_cycle_time;
1824 printk_raminit("1 min_cycle_time: %08x\n", min_cycle_time);
1826 /* Compute the least latency with the fastest clock supported
1827 * by both the memory controller and the dimms.
1829 for (i = 0; i < DIMM_SOCKETS; i++) {
1832 printk_raminit("1.1 dimm_mask: %08x\n", meminfo->dimm_mask);
1833 printk_raminit("i: %08x\n",i);
1835 if (meminfo->dimm_mask & (1 << i)) {
1836 spd_device = ctrl->channel0[i];
1837 printk_raminit("Channel 0 settings:\n");
1839 switch (find_optimum_spd_latency(spd_device, &min_latency, &min_cycle_time)) {
1847 if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) {
1848 spd_device = ctrl->channel1[i];
1849 printk_raminit("Channel 1 settings:\n");
1851 switch (find_optimum_spd_latency(spd_device, &min_latency, &min_cycle_time)) {
1861 /* Make a second pass through the dimms and disable
1862 * any that cannot support the selected memclk and cas latency.
1865 printk_raminit("3 min_cycle_time: %08x\n", min_cycle_time);
1866 printk_raminit("3 min_latency: %08x\n", min_latency);
1868 for (i = 0; (i < DIMM_SOCKETS); i++) {
1873 u32 spd_device = ctrl->channel0[i];
1875 if (!(meminfo->dimm_mask & (1 << i))) {
1876 if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) { /* channelB only? */
1877 spd_device = ctrl->channel1[i];
1883 latencies = spd_read_byte(spd_device, SPD_CAS_LAT);
1884 if (latencies < 0) goto hw_error;
1885 if (latencies == 0) {
1889 /* Compute the lowest cas latency supported */
1890 latency = log2(latencies) -2;
1892 /* Walk through searching for the selected latency */
1893 for (index = 0; index < 3; index++, latency++) {
1894 if (!(latencies & (1 << latency))) {
1897 if (latency == min_latency)
1900 /* If I can't find the latency or my index is bad error */
1901 if ((latency != min_latency) || (index >= 3)) {
1905 /* Read the min_cycle_time for this latency */
1906 val = spd_read_byte(spd_device, latency_indicies[index]);
1907 if (val < 0) goto hw_error;
1909 val = convert_to_linear(val);
1910 /* All is good if the selected clock speed
1911 * is what I need or slower.
1913 if (val <= min_cycle_time) {
1916 /* Otherwise I have an error, disable the dimm */
1918 meminfo->dimm_mask = disable_dimm(ctrl, i, meminfo);
1921 printk_raminit("4 min_cycle_time: %08x\n", min_cycle_time);
1923 /* Now that I know the minimum cycle time lookup the memory parameters */
1924 result.param = get_mem_param(min_cycle_time);
1926 /* Update DRAM Config High with our selected memory speed */
1927 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1928 value &= ~(DCH_MemClkFreq_MASK << DCH_MemClkFreq_SHIFT);
1930 value |= result.param->dch_memclk << DCH_MemClkFreq_SHIFT;
1931 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1933 printk(BIOS_DEBUG, "%s\n", result.param->name);
1935 /* Update DRAM Timing Low with our selected cas latency */
1936 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1937 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1938 value |= (min_latency - DTL_TCL_BASE) << DTL_TCL_SHIFT;
1939 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1941 result.dimm_mask = meminfo->dimm_mask;
1944 result.param = (const struct mem_param *)0;
1945 result.dimm_mask = -1;
1949 static unsigned convert_to_1_4(unsigned value)
1951 static const uint8_t fraction[] = { 0, 1, 2, 2, 3, 3, 0 };
1954 /* We need to convert value to more readable */
1955 valuex = fraction [value & 0x7];
1959 static int get_dimm_Trc_clocks(u32 spd_device, const struct mem_param *param)
1964 value = spd_read_byte(spd_device, SPD_TRC);
1967 printk_raminit("update_dimm_Trc: tRC (41) = %08x\n", value);
1969 value2 = spd_read_byte(spd_device, SPD_TRC -1);
1971 value += convert_to_1_4(value2>>4);
1974 printk_raminit("update_dimm_Trc: tRC final value = %i\n", value);
1976 clocks = (value + param->divisor - 1)/param->divisor;
1977 printk_raminit("update_dimm_Trc: clocks = %i\n", clocks);
1979 if (clocks < DTL_TRC_MIN) {
1980 // We might want to die here instead or (at least|better) disable this bank.
1981 printk(BIOS_NOTICE, "update_dimm_Trc: Can't refresh fast enough, "
1982 "want %i clocks, minimum is %i clocks.\n", clocks, DTL_TRC_MIN);
1983 clocks = DTL_TRC_MIN;
1988 static int update_dimm_Trc(const struct mem_controller *ctrl,
1989 const struct mem_param *param,
1990 int i, long dimm_mask)
1992 int clocks, old_clocks;
1994 u32 spd_device = ctrl->channel0[i];
1996 if (!(dimm_mask & (1 << i)) && (dimm_mask & (1 << (DIMM_SOCKETS + i)))) { /* channelB only? */
1997 spd_device = ctrl->channel1[i];
2000 clocks = get_dimm_Trc_clocks(spd_device, param);
2003 if (clocks > DTL_TRC_MAX) {
2006 printk_raminit("update_dimm_Trc: clocks after adjustment = %i\n", clocks);
2008 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
2009 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
2010 if (old_clocks >= clocks) { //?? someone did it
2011 // clocks = old_clocks;
2014 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
2015 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
2016 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
2020 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i, struct mem_info *meminfo)
2022 unsigned clocks, old_clocks;
2026 u32 spd_device = ctrl->channel0[i];
2028 if (!(meminfo->dimm_mask & (1 << i)) && (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i)))) { /* channelB only? */
2029 spd_device = ctrl->channel1[i];
2030 ch_b = 2; /* offset to channelB trfc setting */
2033 //get the cs_size --> logic dimm size
2034 value = spd_read_byte(spd_device, SPD_PRI_WIDTH);
2039 value = 6 - log2(value); //4-->4, 8-->3, 16-->2
2041 clocks = meminfo->sz[i].per_rank - 27 + 2 - value;
2043 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
2045 old_clocks = ((dth >> (DTH_TRFC0_SHIFT + ((i + ch_b) * 3))) & DTH_TRFC_MASK);
2047 if (old_clocks >= clocks) { // some one did it?
2050 dth &= ~(DTH_TRFC_MASK << (DTH_TRFC0_SHIFT + ((i + ch_b) * 3)));
2051 dth |= clocks << (DTH_TRFC0_SHIFT + ((i + ch_b) * 3));
2052 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
2056 static int update_dimm_TT_1_4(const struct mem_controller *ctrl, const struct mem_param *param, int i, long dimm_mask,
2058 unsigned SPD_TT, unsigned TT_SHIFT, unsigned TT_MASK, unsigned TT_BASE, unsigned TT_MIN, unsigned TT_MAX )
2060 unsigned clocks, old_clocks;
2063 u32 spd_device = ctrl->channel0[i];
2065 if (!(dimm_mask & (1 << i)) && (dimm_mask & (1 << (DIMM_SOCKETS + i)))) { /* channelB only? */
2066 spd_device = ctrl->channel1[i];
2069 value = spd_read_byte(spd_device, SPD_TT); //already in 1/4 ns
2070 if (value < 0) return -1;
2072 clocks = (value + param->divisor -1)/param->divisor;
2073 if (clocks < TT_MIN) {
2077 if (clocks > TT_MAX) {
2078 printk(BIOS_INFO, "warning spd byte : %x = %x > TT_MAX: %x, setting TT_MAX", SPD_TT, value, TT_MAX);
2082 dtl = pci_read_config32(ctrl->f2, TT_REG);
2084 old_clocks = ((dtl >> TT_SHIFT) & TT_MASK) + TT_BASE;
2085 if (old_clocks >= clocks) { //some one did it?
2086 // clocks = old_clocks;
2089 dtl &= ~(TT_MASK << TT_SHIFT);
2090 dtl |= ((clocks - TT_BASE) << TT_SHIFT);
2091 pci_write_config32(ctrl->f2, TT_REG, dtl);
2095 static int update_dimm_Trcd(const struct mem_controller *ctrl,
2096 const struct mem_param *param, int i, long dimm_mask)
2098 return update_dimm_TT_1_4(ctrl, param, i, dimm_mask, DRAM_TIMING_LOW, SPD_TRCD, DTL_TRCD_SHIFT, DTL_TRCD_MASK, DTL_TRCD_BASE, DTL_TRCD_MIN, DTL_TRCD_MAX);
2101 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i, long dimm_mask)
2103 return update_dimm_TT_1_4(ctrl, param, i, dimm_mask, DRAM_TIMING_LOW, SPD_TRRD, DTL_TRRD_SHIFT, DTL_TRRD_MASK, DTL_TRRD_BASE, DTL_TRRD_MIN, DTL_TRRD_MAX);
2106 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i, long dimm_mask)
2108 unsigned clocks, old_clocks;
2111 u32 spd_device = ctrl->channel0[i];
2113 if (!(dimm_mask & (1 << i)) && (dimm_mask & (1 << (DIMM_SOCKETS + i)))) { /* channelB only? */
2114 spd_device = ctrl->channel1[i];
2117 value = spd_read_byte(spd_device, SPD_TRAS); //in 1 ns
2118 if (value < 0) return -1;
2119 printk_raminit("update_dimm_Tras: 0 value= %08x\n", value);
2121 value <<= 2; //convert it to in 1/4ns
2124 printk_raminit("update_dimm_Tras: 1 value= %08x\n", value);
2126 clocks = (value + param->divisor - 1)/param->divisor;
2127 printk_raminit("update_dimm_Tras: divisor= %08x\n", param->divisor);
2128 printk_raminit("update_dimm_Tras: clocks= %08x\n", clocks);
2129 if (clocks < DTL_TRAS_MIN) {
2130 clocks = DTL_TRAS_MIN;
2132 if (clocks > DTL_TRAS_MAX) {
2135 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
2136 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
2137 if (old_clocks >= clocks) { // someone did it?
2140 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
2141 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
2142 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
2146 static int update_dimm_Trp(const struct mem_controller *ctrl,
2147 const struct mem_param *param, int i, long dimm_mask)
2149 return update_dimm_TT_1_4(ctrl, param, i, dimm_mask, DRAM_TIMING_LOW, SPD_TRP, DTL_TRP_SHIFT, DTL_TRP_MASK, DTL_TRP_BASE, DTL_TRP_MIN, DTL_TRP_MAX);
2153 static int update_dimm_Trtp(const struct mem_controller *ctrl,
2154 const struct mem_param *param, int i, struct mem_info *meminfo)
2156 /* need to figure if it is 32 byte burst or 64 bytes burst */
2158 if (!meminfo->is_Width128) {
2160 dword = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2161 if ((dword & DCL_BurstLength32)) offset = 0;
2163 return update_dimm_TT_1_4(ctrl, param, i, meminfo->dimm_mask, DRAM_TIMING_LOW, SPD_TRTP, DTL_TRTP_SHIFT, DTL_TRTP_MASK, DTL_TRTP_BASE+offset, DTL_TRTP_MIN+offset, DTL_TRTP_MAX+offset);
2167 static int update_dimm_Twr(const struct mem_controller *ctrl, const struct mem_param *param, int i, long dimm_mask)
2169 return update_dimm_TT_1_4(ctrl, param, i, dimm_mask, DRAM_TIMING_LOW, SPD_TWR, DTL_TWR_SHIFT, DTL_TWR_MASK, DTL_TWR_BASE, DTL_TWR_MIN, DTL_TWR_MAX);
2173 static int update_dimm_Tref(const struct mem_controller *ctrl,
2174 const struct mem_param *param, int i, long dimm_mask)
2176 uint32_t dth, dth_old;
2178 u32 spd_device = ctrl->channel0[i];
2180 if (!(dimm_mask & (1 << i)) && (dimm_mask & (1 << (DIMM_SOCKETS + i)))) { /* channelB only? */
2181 spd_device = ctrl->channel1[i];
2184 value = spd_read_byte(spd_device, SPD_TREF); // 0: 15.625us, 1: 3.9us 2: 7.8 us....
2185 if (value < 0) return -1;
2193 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
2196 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
2197 dth |= (value << DTH_TREF_SHIFT);
2198 if (dth_old != dth) {
2199 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
2204 static void set_4RankRDimm(const struct mem_controller *ctrl,
2205 const struct mem_param *param, struct mem_info *meminfo)
2207 #if CONFIG_QRANK_DIMM_SUPPORT
2210 long dimm_mask = meminfo->dimm_mask;
2213 if (!(meminfo->is_registered)) return;
2217 for (i = 0; i < DIMM_SOCKETS; i++) {
2218 if (!(dimm_mask & (1 << i))) {
2222 if (meminfo->sz[i].rank == 4) {
2230 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2231 dch |= DCH_FourRankRDimm;
2232 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2237 static uint32_t get_extra_dimm_mask(const struct mem_controller *ctrl,
2238 struct mem_info *meminfo)
2244 uint32_t mask_single_rank;
2245 uint32_t mask_page_1k;
2247 #if CONFIG_QRANK_DIMM_SUPPORT
2251 long dimm_mask = meminfo->dimm_mask;
2256 mask_single_rank = 0;
2259 for (i = 0; i < DIMM_SOCKETS; i++) {
2260 u32 spd_device = ctrl->channel0[i];
2261 if (!(dimm_mask & (1 << i))) {
2262 if (dimm_mask & (1 << (DIMM_SOCKETS + i))) { /* channelB only? */
2263 spd_device = ctrl->channel1[i];
2269 if (meminfo->sz[i].rank == 1) {
2270 mask_single_rank |= 1<<i;
2273 if (meminfo->sz[i].col==10) {
2274 mask_page_1k |= 1<<i;
2278 value = spd_read_byte(spd_device, SPD_PRI_WIDTH);
2280 #if CONFIG_QRANK_DIMM_SUPPORT
2281 rank = meminfo->sz[i].rank;
2286 #if CONFIG_QRANK_DIMM_SUPPORT
2288 mask_x4 |= 1<<(i+2);
2291 } else if (value==16) {
2293 #if CONFIG_QRANK_DIMM_SUPPORT
2295 mask_x16 |= 1<<(i+2);
2302 meminfo->x4_mask= mask_x4;
2303 meminfo->x16_mask = mask_x16;
2305 meminfo->single_rank_mask = mask_single_rank;
2306 meminfo->page_1k_mask = mask_page_1k;
2313 static void set_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
2316 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2317 dcl &= ~(DCL_X4Dimm_MASK<<DCL_X4Dimm_SHIFT);
2318 dcl |= ((meminfo->x4_mask) & 0xf) << (DCL_X4Dimm_SHIFT);
2319 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2323 static int count_ones(uint32_t dimm_mask)
2328 for (index = 0; index < (2 * DIMM_SOCKETS); index++, dimm_mask >>= 1) {
2329 if (dimm_mask & 1) {
2337 static void set_DramTerm(const struct mem_controller *ctrl,
2338 const struct mem_param *param, struct mem_info *meminfo)
2344 if (param->divisor == 100) { //DDR2 800
2345 if (meminfo->is_Width128) {
2346 if (count_ones(meminfo->dimm_mask & 0x0f)==2) {
2354 #if CONFIG_DIMM_SUPPORT == 0x0204
2355 odt = 0x2; /* 150 ohms */
2358 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2359 dcl &= ~(DCL_DramTerm_MASK<<DCL_DramTerm_SHIFT);
2360 dcl |= (odt & DCL_DramTerm_MASK) << (DCL_DramTerm_SHIFT);
2361 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2364 static void set_ecc(const struct mem_controller *ctrl,
2365 const struct mem_param *param, struct mem_info *meminfo)
2370 uint32_t dcl, nbcap;
2371 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
2372 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2373 dcl &= ~DCL_DimmEccEn;
2374 if (nbcap & NBCAP_ECC) {
2375 dcl |= DCL_DimmEccEn;
2377 #ifdef CMOS_VSTART_ECC_memory
2378 if (read_option(ECC_memory, 1) == 0) {
2379 dcl &= ~DCL_DimmEccEn;
2381 #else // CMOS_VSTART_ECC_memory not defined
2382 #if !CONFIG_ECC_MEMORY
2383 dcl &= ~DCL_DimmEccEn;
2386 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2388 meminfo->is_ecc = 1;
2389 if (!(dcl & DCL_DimmEccEn)) {
2390 meminfo->is_ecc = 0;
2391 printk(BIOS_DEBUG, "set_ecc: ECC disabled\n");
2392 return; // already disabled the ECC, so don't need to read SPD any more
2395 for (i = 0; i < DIMM_SOCKETS; i++) {
2396 u32 spd_device = ctrl->channel0[i];
2397 if (!(meminfo->dimm_mask & (1 << i))) {
2398 if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) { /* channelB only? */
2399 spd_device = ctrl->channel1[i];
2400 printk(BIOS_DEBUG, "set_ecc spd_device: 0x%x\n", spd_device);
2406 value = spd_read_byte(ctrl->channel0[i], SPD_DIMM_CONF_TYPE);
2408 if (!(value & SPD_DIMM_CONF_TYPE_ECC)) {
2409 dcl &= ~DCL_DimmEccEn;
2410 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2411 meminfo->is_ecc = 0;
2419 static int update_dimm_Twtr(const struct mem_controller *ctrl,
2420 const struct mem_param *param, int i, long dimm_mask)
2422 return update_dimm_TT_1_4(ctrl, param, i, dimm_mask, DRAM_TIMING_HIGH, SPD_TWTR, DTH_TWTR_SHIFT, DTH_TWTR_MASK, DTH_TWTR_BASE, DTH_TWTR_MIN, DTH_TWTR_MAX);
2425 static void set_TT(const struct mem_controller *ctrl,
2426 const struct mem_param *param, unsigned TT_REG, unsigned TT_SHIFT,
2427 unsigned TT_MASK, unsigned TT_BASE, unsigned TT_MIN, unsigned TT_MAX,
2428 unsigned val, const char *str)
2432 if ((val < TT_MIN) || (val > TT_MAX)) {
2433 printk(BIOS_ERR, "%s", str);
2437 reg = pci_read_config32(ctrl->f2, TT_REG);
2438 reg &= ~(TT_MASK << TT_SHIFT);
2439 reg |= ((val - TT_BASE) << TT_SHIFT);
2440 pci_write_config32(ctrl->f2, TT_REG, reg);
2445 static void set_TrwtTO(const struct mem_controller *ctrl,
2446 const struct mem_param *param)
2448 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TRWTTO_SHIFT, DTH_TRWTTO_MASK,DTH_TRWTTO_BASE, DTH_TRWTTO_MIN, DTH_TRWTTO_MAX, param->TrwtTO, "TrwtTO");
2452 static void set_Twrrd(const struct mem_controller *ctrl, const struct mem_param *param)
2454 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TWRRD_SHIFT, DTH_TWRRD_MASK,DTH_TWRRD_BASE, DTH_TWRRD_MIN, DTH_TWRRD_MAX, param->Twrrd, "Twrrd");
2458 static void set_Twrwr(const struct mem_controller *ctrl, const struct mem_param *param)
2460 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TWRWR_SHIFT, DTH_TWRWR_MASK,DTH_TWRWR_BASE, DTH_TWRWR_MIN, DTH_TWRWR_MAX, param->Twrwr, "Twrwr");
2463 static void set_Trdrd(const struct mem_controller *ctrl, const struct mem_param *param)
2465 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TRDRD_SHIFT, DTH_TRDRD_MASK,DTH_TRDRD_BASE, DTH_TRDRD_MIN, DTH_TRDRD_MAX, param->Trdrd, "Trdrd");
2468 static void set_DcqBypassMax(const struct mem_controller *ctrl, const struct mem_param *param)
2470 set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_DcqBypassMax_SHIFT, DCH_DcqBypassMax_MASK,DCH_DcqBypassMax_BASE, DCH_DcqBypassMax_MIN, DCH_DcqBypassMax_MAX, param->DcqByPassMax, "DcqBypassMax"); // value need to be in CMOS
2473 static void set_Tfaw(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
2475 static const uint8_t faw_1k[] = {8, 10, 13, 14};
2476 static const uint8_t faw_2k[] = {10, 14, 17, 18};
2477 unsigned memclkfreq_index;
2481 memclkfreq_index = param->dch_memclk;
2483 if (meminfo->page_1k_mask != 0) { //1k page
2484 faw = faw_1k[memclkfreq_index];
2486 faw = faw_2k[memclkfreq_index];
2489 set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_FourActWindow_SHIFT, DCH_FourActWindow_MASK, DCH_FourActWindow_BASE, DCH_FourActWindow_MIN, DCH_FourActWindow_MAX, faw, "FourActWindow");
2492 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
2498 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2499 dch &= ~(DCH_MaxAsyncLat_MASK << DCH_MaxAsyncLat_SHIFT);
2501 //FIXME: We need to use Max of DqsRcvEnDelay + 6ns here: After trainning and get that from index reg 0x10, 0x13, 0x16, 0x19, 0x30, 0x33, 0x36, 0x39
2505 dch |= ((async_lat - DCH_MaxAsyncLat_BASE) << DCH_MaxAsyncLat_SHIFT);
2506 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2509 #if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
2510 static void set_SlowAccessMode(const struct mem_controller *ctrl)
2514 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2518 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2523 DRAM_OUTPUT_DRV_COMP_CTRL 0, 0x20
2524 DRAM_ADDR_TIMING_CTRL 04, 0x24
2526 static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info *meminfo)
2530 #if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
2531 unsigned SlowAccessMode = 0;
2534 #if CONFIG_DIMM_SUPPORT==0x0104 /* DDR2 and REG */
2535 long dimm_mask = meminfo->dimm_mask & 0x0f;
2538 dwordx = 0x002f0000;
2539 switch (meminfo->memclk_set) {
2540 case DCH_MemClkFreq_266MHz:
2541 if ( (dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) {
2542 dwordx = 0x002f2700;
2545 case DCH_MemClkFreq_333MHz:
2546 if ( (dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) {
2547 if ((meminfo->single_rank_mask & 0x03)!=0x03) { //any double rank there?
2548 dwordx = 0x002f2f00;
2552 case DCH_MemClkFreq_400MHz:
2553 dwordx = 0x002f3300;
2559 #if CONFIG_DIMM_SUPPORT==0x0204 /* DDR2 and SO-DIMM, S1G1 */
2561 dwordx = 0x002F2F00;
2563 switch (meminfo->memclk_set) {
2564 case DCH_MemClkFreq_200MHz: /* nothing to be set here */
2566 case DCH_MemClkFreq_266MHz:
2567 if ((meminfo->single_rank_mask == 0)
2568 && (meminfo->x4_mask == 0) && (meminfo->x16_mask))
2569 dwordx = 0x002C2C00; /* Double rank x8 */
2570 /* else SRx16, SRx8, DRx16 == 0x002F2F00 */
2572 case DCH_MemClkFreq_333MHz:
2573 if ((meminfo->single_rank_mask == 1)
2574 && (meminfo->x16_mask == 1)) /* SR x16 */
2575 dwordx = 0x00272700;
2576 else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0)
2577 && (meminfo->single_rank_mask == 0)) { /* DR x8 */
2579 dwordx = 0x00002800;
2580 } else { /* SR x8, DR x16 */
2581 dwordx = 0x002A2A00;
2584 case DCH_MemClkFreq_400MHz:
2585 if ((meminfo->single_rank_mask == 1)
2586 && (meminfo->x16_mask == 1)) /* SR x16 */
2587 dwordx = 0x00292900;
2588 else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0)
2589 && (meminfo->single_rank_mask == 0)) { /* DR x8 */
2591 dwordx = 0x00002A00;
2592 } else { /* SR x8, DR x16 */
2593 dwordx = 0x002A2A00;
2599 #if CONFIG_DIMM_SUPPORT==0x0004 /* DDR2 and unbuffered */
2600 long dimm_mask = meminfo->dimm_mask & 0x0f;
2601 /* for UNBUF DIMM */
2603 dwordx = 0x002f2f00;
2604 switch (meminfo->memclk_set) {
2605 case DCH_MemClkFreq_200MHz:
2606 if (dimm_mask == 0x03) {
2611 case DCH_MemClkFreq_266MHz:
2612 if (dimm_mask == 0x03) {
2615 if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0)) {
2616 switch (meminfo->single_rank_mask) {
2618 dwordx = 0x00002f00; //x8 single Rank
2621 dwordx = 0x00342f00; //x8 double Rank
2624 dwordx = 0x00372f00; //x8 single Rank and double Rank mixed
2626 } else if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x01) && (meminfo->single_rank_mask == 0x01)) {
2627 dwordx = 0x00382f00; //x8 Double Rank and x16 single Rank mixed
2628 } else if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x02) && (meminfo->single_rank_mask == 0x02)) {
2629 dwordx = 0x00382f00; //x16 single Rank and x8 double Rank mixed
2633 if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x00) && ((meminfo->single_rank_mask == 0x01)||(meminfo->single_rank_mask == 0x02))) { //x8 single rank
2634 dwordx = 0x002f2f00;
2636 dwordx = 0x002b2f00;
2640 case DCH_MemClkFreq_333MHz:
2641 dwordx = 0x00202220;
2642 if (dimm_mask == 0x03) {
2645 if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0)) {
2646 switch (meminfo->single_rank_mask) {
2648 dwordx = 0x00302220; //x8 single Rank
2651 dwordx = 0x002b2220; //x8 double Rank
2654 dwordx = 0x002a2220; //x8 single Rank and double Rank mixed
2656 } else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0x01) && (meminfo->single_rank_mask == 0x01)) {
2657 dwordx = 0x002c2220; //x8 Double Rank and x16 single Rank mixed
2658 } else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0x02) && (meminfo->single_rank_mask == 0x02)) {
2659 dwordx = 0x002c2220; //x16 single Rank and x8 double Rank mixed
2663 case DCH_MemClkFreq_400MHz:
2664 dwordx = 0x00202520;
2666 if (dimm_mask == 0x03) {
2674 printk_raminit("\tdimm_mask = %08x\n", meminfo->dimm_mask);
2675 printk_raminit("\tx4_mask = %08x\n", meminfo->x4_mask);
2676 printk_raminit("\tx16_mask = %08x\n", meminfo->x16_mask);
2677 printk_raminit("\tsingle_rank_mask = %08x\n", meminfo->single_rank_mask);
2678 printk_raminit("\tODC = %08x\n", dword);
2679 printk_raminit("\tAddr Timing= %08x\n", dwordx);
2682 #if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
2683 if (SlowAccessMode) {
2684 set_SlowAccessMode(ctrl);
2688 if (!(meminfo->dimm_mask & 0x0F) && (meminfo->dimm_mask & 0xF0)) { /* channelB only? */
2689 /* Program the Output Driver Compensation Control Registers (Function 2:Offset 0x9c, index 0, 0x20) */
2690 pci_write_config32_index_wait(ctrl->f2, 0x98, 0x20, dword);
2692 /* Program the Address Timing Control Registers (Function 2:Offset 0x9c, index 4, 0x24) */
2693 pci_write_config32_index_wait(ctrl->f2, 0x98, 0x24, dwordx);
2695 /* Program the Output Driver Compensation Control Registers (Function 2:Offset 0x9c, index 0, 0x20) */
2696 pci_write_config32_index_wait(ctrl->f2, 0x98, 0, dword);
2697 if (meminfo->is_Width128) {
2698 pci_write_config32_index_wait(ctrl->f2, 0x98, 0x20, dword);
2701 /* Program the Address Timing Control Registers (Function 2:Offset 0x9c, index 4, 0x24) */
2702 pci_write_config32_index_wait(ctrl->f2, 0x98, 4, dwordx);
2703 if (meminfo->is_Width128) {
2704 pci_write_config32_index_wait(ctrl->f2, 0x98, 0x24, dwordx);
2710 static void set_RDqsEn(const struct mem_controller *ctrl,
2711 const struct mem_param *param, struct mem_info *meminfo)
2713 #if CONFIG_CPU_SOCKET_TYPE==0x10
2714 //only need to set for reg and x8
2717 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2720 if ((!meminfo->x4_mask) && (!meminfo->x16_mask)) {
2724 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2728 static void set_idle_cycle_limit(const struct mem_controller *ctrl,
2729 const struct mem_param *param)
2732 /* AMD says to Hardcode this */
2733 dcm = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
2734 dcm &= ~(DCM_ILD_lmt_MASK << DCM_ILD_lmt_SHIFT);
2735 dcm |= DCM_ILD_lmt_16 << DCM_ILD_lmt_SHIFT;
2737 pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dcm);
2740 static void set_RdWrQByp(const struct mem_controller *ctrl,
2741 const struct mem_param *param)
2743 set_TT(ctrl, param, DRAM_CTRL_MISC, DCM_RdWrQByp_SHIFT, DCM_RdWrQByp_MASK,0, 0, 3, 2, "RdWrQByp");
2746 static long spd_set_dram_timing(const struct mem_controller *ctrl,
2747 const struct mem_param *param,
2748 struct mem_info *meminfo)
2752 for (i = 0; i < DIMM_SOCKETS; i++) {
2754 if (!(meminfo->dimm_mask & (1 << i)) &&
2755 !(meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) ) {
2758 printk_raminit("spd_set_dram_timing dimm socket: %08x\n", i);
2759 /* DRAM Timing Low Register */
2760 printk_raminit("\ttrc\n");
2761 if ((rc = update_dimm_Trc (ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2763 printk_raminit("\ttrcd\n");
2764 if ((rc = update_dimm_Trcd(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2766 printk_raminit("\ttrrd\n");
2767 if ((rc = update_dimm_Trrd(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2769 printk_raminit("\ttras\n");
2770 if ((rc = update_dimm_Tras(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2772 printk_raminit("\ttrp\n");
2773 if ((rc = update_dimm_Trp (ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2775 printk_raminit("\ttrtp\n");
2776 if ((rc = update_dimm_Trtp(ctrl, param, i, meminfo)) <= 0) goto dimm_err;
2778 printk_raminit("\ttwr\n");
2779 if ((rc = update_dimm_Twr (ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2781 /* DRAM Timing High Register */
2782 printk_raminit("\ttref\n");
2783 if ((rc = update_dimm_Tref(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2785 printk_raminit("\ttwtr\n");
2786 if ((rc = update_dimm_Twtr(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2788 printk_raminit("\ttrfc\n");
2789 if ((rc = update_dimm_Trfc(ctrl, param, i, meminfo)) <= 0) goto dimm_err;
2791 /* DRAM Config Low */
2795 printk(BIOS_DEBUG, "spd_set_dram_timing dimm_err!\n");
2799 meminfo->dimm_mask = disable_dimm(ctrl, i, meminfo);
2802 get_extra_dimm_mask(ctrl, meminfo); // will be used by RDqsEn and dimm_x4
2803 /* DRAM Timing Low Register */
2805 /* DRAM Timing High Register */
2806 set_TrwtTO(ctrl, param);
2807 set_Twrrd (ctrl, param);
2808 set_Twrwr (ctrl, param);
2809 set_Trdrd (ctrl, param);
2811 set_4RankRDimm(ctrl, param, meminfo);
2813 /* DRAM Config High */
2814 set_Tfaw(ctrl, param, meminfo);
2815 set_DcqBypassMax(ctrl, param);
2816 set_max_async_latency(ctrl, param);
2817 set_RDqsEn(ctrl, param, meminfo);
2819 /* DRAM Config Low */
2820 set_ecc(ctrl, param, meminfo);
2821 set_dimm_x4(ctrl, param, meminfo);
2822 set_DramTerm(ctrl, param, meminfo);
2824 /* DRAM Control Misc */
2825 set_idle_cycle_limit(ctrl, param);
2826 set_RdWrQByp(ctrl, param);
2828 return meminfo->dimm_mask;
2831 static void sdram_set_spd_registers(const struct mem_controller *ctrl,
2832 struct sys_info *sysinfo)
2834 struct spd_set_memclk_result result;
2835 const struct mem_param *param;
2836 struct mem_param paramx;
2837 struct mem_info *meminfo;
2839 if (!sysinfo->ctrl_present[ctrl->node_id]) {
2843 meminfo = &sysinfo->meminfo[ctrl->node_id];
2845 printk(BIOS_DEBUG, "sdram_set_spd_registers: paramx :%p\n", ¶mx);
2847 activate_spd_rom(ctrl);
2848 meminfo->dimm_mask = spd_detect_dimms(ctrl);
2850 printk_raminit("sdram_set_spd_registers: dimm_mask=0x%x\n", meminfo->dimm_mask);
2852 if (!(meminfo->dimm_mask & ((1 << 2*DIMM_SOCKETS) - 1)))
2854 printk(BIOS_DEBUG, "No memory for this cpu\n");
2857 meminfo->dimm_mask = spd_enable_2channels(ctrl, meminfo);
2858 printk_raminit("spd_enable_2channels: dimm_mask=0x%x\n", meminfo->dimm_mask);
2859 if (meminfo->dimm_mask == -1)
2862 meminfo->dimm_mask = spd_set_ram_size(ctrl, meminfo);
2863 printk_raminit("spd_set_ram_size: dimm_mask=0x%x\n", meminfo->dimm_mask);
2864 if (meminfo->dimm_mask == -1)
2867 meminfo->dimm_mask = spd_handle_unbuffered_dimms(ctrl, meminfo);
2868 printk_raminit("spd_handle_unbuffered_dimms: dimm_mask=0x%x\n", meminfo->dimm_mask);
2869 if (meminfo->dimm_mask == -1)
2872 result = spd_set_memclk(ctrl, meminfo);
2873 param = result.param;
2874 meminfo->dimm_mask = result.dimm_mask;
2875 printk_raminit("spd_set_memclk: dimm_mask=0x%x\n", meminfo->dimm_mask);
2876 if (meminfo->dimm_mask == -1)
2879 //store memclk set to sysinfo, incase we need rebuilt param again
2880 meminfo->memclk_set = param->dch_memclk;
2882 memcpy(¶mx, param, sizeof(paramx));
2884 paramx.divisor = get_exact_divisor(param->dch_memclk, paramx.divisor);
2886 meminfo->dimm_mask = spd_set_dram_timing(ctrl, ¶mx, meminfo);
2887 printk_raminit("spd_set_dram_timing: dimm_mask=0x%x\n", meminfo->dimm_mask);
2888 if (meminfo->dimm_mask == -1)
2891 order_dimms(ctrl, meminfo);
2895 /* Unrecoverable error reading SPD data */
2896 die("Unrecoverable error reading SPD data. No qualified DIMMs?");
2900 #define TIMEOUT_LOOPS 300000
2902 #include "raminit_f_dqs.c"
2904 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
2905 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
2908 uint32_t carry_over;
2910 uint32_t base, limit;
2915 carry_over = (4*1024*1024) - hole_startk;
2917 for (ii=controllers - 1;ii>i;ii--) {
2918 base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
2919 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2922 limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
2923 limit += (carry_over << 2 );
2924 base += (carry_over << 2 );
2925 for (j = 0; j < controllers; j++) {
2926 pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit);
2927 pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base );
2930 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2931 limit += (carry_over << 2);
2932 for (j = 0; j < controllers; j++) {
2933 pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit);
2936 base = pci_read_config32(dev, 0x40 + (i << 3));
2937 basek = (base & 0xffff0000) >> 2;
2938 if (basek == hole_startk) {
2939 //don't need set memhole here, because hole off set will be 0, overflow
2940 //so need to change base reg instead, new basek will be 4*1024*1024
2942 base |= (4*1024*1024)<<2;
2943 for (j = 0; j < controllers; j++) {
2944 pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
2947 hoist = /* hole start address */
2948 ((hole_startk << 10) & 0xff000000) +
2949 /* hole address to memory controller address */
2950 (((basek + carry_over) >> 6) & 0x0000ff00) +
2953 pci_write_config32(dev, 0xf0, hoist);
2959 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
2962 uint32_t hole_startk;
2965 hole_startk = 4*1024*1024 - CONFIG_HW_MEM_HOLE_SIZEK;
2967 printk_raminit("Handling memory hole at 0x%08x (default)\n", hole_startk);
2968 #if CONFIG_HW_MEM_HOLE_SIZE_AUTO_INC == 1
2969 /* We need to double check if the hole_startk is valid, if it is equal
2970 to basek, we need to decrease it some */
2972 for (i=0; i<controllers; i++) {
2975 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2976 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2979 base_k = (base & 0xffff0000) >> 2;
2980 if (base_k == hole_startk) {
2981 /* decrease mem hole startk to make sure it is
2982 on middle of previous node */
2983 hole_startk -= (base_k - basek_pri) >> 1;
2984 break; //only one hole
2988 printk_raminit("Handling memory hole at 0x%08x (adjusted)\n", hole_startk);
2990 /* find node index that need do set hole */
2991 for (i=0; i < controllers; i++) {
2992 uint32_t base, limit;
2993 unsigned base_k, limit_k;
2994 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2995 if ((base & ((1 << 1) | (1 << 0))) != ((1 << 1) | (1 << 0))) {
2998 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2999 base_k = (base & 0xffff0000) >> 2;
3000 limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
3001 if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
3003 hoist_memory(controllers, ctrl, hole_startk, i);
3004 end_k = memory_end_k(ctrl, controllers);
3005 set_top_mem(end_k, hole_startk);
3006 break; //only one hole
3012 #if CONFIG_HAVE_ACPI_RESUME == 1
3013 #include "exit_from_self.c"
3016 static void sdram_enable(int controllers, const struct mem_controller *ctrl,
3017 struct sys_info *sysinfo)
3020 #if CONFIG_HAVE_ACPI_RESUME == 1
3021 int suspend = acpi_is_wakeup_early();
3026 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
3027 unsigned cpu_f0_f1[8];
3028 /* FIXME: How about 32 node machine later? */
3031 printk(BIOS_DEBUG, "sdram_enable: tsc0[8]: %p", &tsc0[0]);
3035 /* Error if I don't have memory */
3036 if (memory_end_k(ctrl, controllers) == 0) {
3040 /* Before enabling memory start the memory clocks */
3041 for (i = 0; i < controllers; i++) {
3043 if (!sysinfo->ctrl_present[ i ])
3045 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
3047 /* if no memory installed, disabled the interface */
3048 if (sysinfo->meminfo[i].dimm_mask==0x00){
3049 dch |= DCH_DisDramInterface;
3050 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
3053 dch |= DCH_MemClkFreqVal;
3054 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
3055 /* address timing and Output driver comp Control */
3056 set_misc_timing(ctrl+i, sysinfo->meminfo+i );
3060 /* We need to wait a minimum of 20 MEMCLKS to enable the InitDram */
3061 memreset(controllers, ctrl);
3063 /* lets override the rest of the routine */
3065 printk(BIOS_DEBUG, "Wakeup!\n");
3066 exit_from_self(controllers, ctrl, sysinfo);
3067 printk(BIOS_DEBUG, "Mem running !\n");
3071 for (i = 0; i < controllers; i++) {
3073 if (!sysinfo->ctrl_present[ i ])
3075 /* Skip everything if I don't have any memory on this controller */
3076 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
3077 if (!(dch & DCH_MemClkFreqVal)) {
3082 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
3083 if (dcl & DCL_DimmEccEn) {
3085 printk(BIOS_SPEW, "ECC enabled\n");
3086 mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
3088 if (dcl & DCL_Width128) {
3089 mnc |= MNC_CHIPKILL_EN;
3091 pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
3094 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
3095 cpu_f0_f1[i] = is_cpu_pre_f2_in_bsp(i);
3097 //Rev F0/F1 workaround
3099 /* Set the DqsRcvEnTrain bit */
3100 dword = pci_read_config32(ctrl[i].f2, DRAM_CTRL);
3101 dword |= DC_DqsRcvEnTrain;
3102 pci_write_config32(ctrl[i].f2, DRAM_CTRL, dword);
3108 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
3109 dcl |= DCL_InitDram;
3110 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
3113 for (i = 0; i < controllers; i++) {
3115 if (!sysinfo->ctrl_present[ i ])
3117 /* Skip everything if I don't have any memory on this controller */
3118 if (sysinfo->meminfo[i].dimm_mask==0x00) continue;
3120 printk(BIOS_DEBUG, "Initializing memory: ");
3123 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
3125 if ((loops & 1023) == 0) {
3126 printk(BIOS_DEBUG, ".");
3128 } while(((dcl & DCL_InitDram) != 0) && (loops < TIMEOUT_LOOPS));
3129 if (loops >= TIMEOUT_LOOPS) {
3130 printk(BIOS_DEBUG, " failed\n");
3134 /* Wait until it is safe to touch memory */
3136 dcm = pci_read_config32(ctrl[i].f2, DRAM_CTRL_MISC);
3137 } while(((dcm & DCM_MemClrStatus) == 0) /* || ((dcm & DCM_DramEnabled) == 0)*/ );
3139 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
3143 print_debug_dqs_tsc("\nbegin tsc0", i, tsc0[i].hi, tsc0[i].lo, 2);
3144 print_debug_dqs_tsc("end tsc ", i, tsc.hi, tsc.lo, 2);
3146 if (tsc.lo<tsc0[i].lo) {
3149 tsc.lo -= tsc0[i].lo;
3150 tsc.hi -= tsc0[i].hi;
3152 tsc0[i].lo = tsc.lo;
3153 tsc0[i].hi = tsc.hi;
3155 print_debug_dqs_tsc(" dtsc0", i, tsc0[i].hi, tsc0[i].lo, 2);
3158 printk(BIOS_DEBUG, " done\n");
3161 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
3162 /* init hw mem hole here */
3163 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
3164 set_hw_mem_hole(controllers, ctrl);
3167 /* store tom to sysinfo, and it will be used by dqs_timing */
3171 msr = rdmsr(TOP_MEM);
3172 sysinfo->tom_k = ((msr.hi<<24) | (msr.lo>>8))>>2;
3175 msr = rdmsr(TOP_MEM2);
3176 sysinfo->tom2_k = ((msr.hi<<24)| (msr.lo>>8))>>2;
3179 for (i = 0; i < controllers; i++) {
3180 sysinfo->mem_trained[i] = 0;
3182 if (!sysinfo->ctrl_present[ i ])
3185 /* Skip everything if I don't have any memory on this controller */
3186 if (sysinfo->meminfo[i].dimm_mask==0x00)
3189 sysinfo->mem_trained[i] = 0x80; // mem need to be trained
3193 #if CONFIG_MEM_TRAIN_SEQ == 0
3194 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
3195 dqs_timing(controllers, ctrl, tsc0, sysinfo);
3197 dqs_timing(controllers, ctrl, sysinfo);
3201 #if CONFIG_MEM_TRAIN_SEQ == 2
3202 /* need to enable mtrr, so dqs training could access the test address */
3203 setup_mtrr_dqs(sysinfo->tom_k, sysinfo->tom2_k);
3206 for (i = 0; i < controllers; i++) {
3207 /* Skip everything if I don't have any memory on this controller */
3208 if (sysinfo->mem_trained[i]!=0x80)
3211 dqs_timing(i, &ctrl[i], sysinfo, 1);
3213 #if CONFIG_MEM_TRAIN_SEQ == 1
3214 break; // only train the first node with ram
3218 #if CONFIG_MEM_TRAIN_SEQ == 2
3219 clear_mtrr_dqs(sysinfo->tom2_k);
3224 #if CONFIG_MEM_TRAIN_SEQ != 1
3225 wait_all_core0_mem_trained(sysinfo);
3230 void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
3231 const uint16_t *spd_addr)
3235 struct mem_controller *ctrl;
3236 for (i=0;i<controllers; i++) {
3239 ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
3240 ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
3241 ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
3242 ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
3244 if (spd_addr == (void *)0) continue;
3246 for (j=0;j<DIMM_SOCKETS;j++) {
3247 ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
3248 ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];