2 * This file is part of the coreboot project.
4 * Copyright (C) 2010 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 /* Description: Main memory controller system configuration for DDR 3 */
22 /* KNOWN ISSUES - ERRATA
24 * Trtp is not calculated correctly when the controller is in 64-bit mode, it
25 * is 1 busclock off. No fix planned. The controller is not ordinarily in
28 * 32 Byte burst not supported. No fix planned. The controller is not
29 * ordinarily in 64-bit mode.
31 * Trc precision does not use extra Jedec defined fractional component.
32 * InsteadTrc (course) is rounded up to nearest 1 ns.
34 * Mini and Micro DIMM not supported. Only RDIMM, UDIMM, SO-DIMM defined types
38 static u8 ReconfigureDIMMspare_D(struct MCTStatStruc *pMCTstat,
39 struct DCTStatStruc *pDCTstatA);
40 static void DQSTiming_D(struct MCTStatStruc *pMCTstat,
41 struct DCTStatStruc *pDCTstatA);
42 static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
43 struct DCTStatStruc *pDCTstatA);
44 static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
45 struct DCTStatStruc *pDCTstatA);
46 static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
47 struct DCTStatStruc *pDCTstatA);
48 static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
49 struct DCTStatStruc *pDCTstat);
50 static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
51 struct DCTStatStruc *pDCTstat);
52 static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
53 struct DCTStatStruc *pDCTstatA);
54 static u8 NodePresent_D(u8 Node);
55 static void SyncDCTsReady_D(struct MCTStatStruc *pMCTstat,
56 struct DCTStatStruc *pDCTstatA);
57 static void StartupDCT_D(struct MCTStatStruc *pMCTstat,
58 struct DCTStatStruc *pDCTstat, u8 dct);
59 static void ClearDCT_D(struct MCTStatStruc *pMCTstat,
60 struct DCTStatStruc *pDCTstat, u8 dct);
61 static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
62 struct DCTStatStruc *pDCTstat, u8 dct);
63 static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
64 struct DCTStatStruc *pDCTstat);
65 static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
66 struct DCTStatStruc *pDCTstat, u8 dct);
67 static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
68 struct DCTStatStruc *pDCTstat, u8 dct);
69 static u8 PlatformSpec_D(struct MCTStatStruc *pMCTstat,
70 struct DCTStatStruc *pDCTstat, u8 dct);
71 static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
72 struct DCTStatStruc *pDCTstat, u8 dct);
73 static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
74 struct DCTStatStruc *pDCTstat, u8 dct);
75 static u16 Get_Fk_D(u8 k);
76 static u8 Get_DIMMAddress_D(struct DCTStatStruc *pDCTstat, u8 i);
77 static void mct_initDCT(struct MCTStatStruc *pMCTstat,
78 struct DCTStatStruc *pDCTstat);
79 static void mct_DramInit(struct MCTStatStruc *pMCTstat,
80 struct DCTStatStruc *pDCTstat, u8 dct);
81 static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
82 struct DCTStatStruc *pDCTstat, u8 dct);
83 static void mct_SyncDCTsReady(struct DCTStatStruc *pDCTstat);
84 static void Get_Trdrd(struct MCTStatStruc *pMCTstat,
85 struct DCTStatStruc *pDCTstat, u8 dct);
86 static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
87 struct DCTStatStruc *pDCTstat, u8 dct);
88 static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,\
89 struct DCTStatStruc *pDCTstat, u8 dct);
90 static void mct_AfterStitchMemory(struct MCTStatStruc *pMCTstat,
91 struct DCTStatStruc *pDCTstat, u8 dct);
92 static u8 mct_DIMMPresence(struct MCTStatStruc *pMCTstat,
93 struct DCTStatStruc *pDCTstat, u8 dct);
94 static void Set_OtherTiming(struct MCTStatStruc *pMCTstat,
95 struct DCTStatStruc *pDCTstat, u8 dct);
96 static void Get_Twrwr(struct MCTStatStruc *pMCTstat,
97 struct DCTStatStruc *pDCTstat, u8 dct);
98 static void Get_Twrrd(struct MCTStatStruc *pMCTstat,
99 struct DCTStatStruc *pDCTstat, u8 dct);
100 static void Get_TrwtTO(struct MCTStatStruc *pMCTstat,
101 struct DCTStatStruc *pDCTstat, u8 dct);
102 static void Get_TrwtWB(struct MCTStatStruc *pMCTstat,
103 struct DCTStatStruc *pDCTstat);
104 static void Get_DqsRcvEnGross_Diff(struct DCTStatStruc *pDCTstat,
105 u32 dev, u32 index_reg);
106 static void Get_WrDatGross_Diff(struct DCTStatStruc *pDCTstat, u8 dct,
107 u32 dev, u32 index_reg);
108 static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
109 u32 dev, u32 index_reg, u32 index);
110 static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
111 struct DCTStatStruc *pDCTstat);
112 static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat, u8 dct,
113 u32 dev, u32 index_reg, u32 index);
114 static void mct_InitialMCT_D(struct MCTStatStruc *pMCTstat,
115 struct DCTStatStruc *pDCTstat);
116 static void mct_init(struct MCTStatStruc *pMCTstat,
117 struct DCTStatStruc *pDCTstat);
118 static void clear_legacy_Mode(struct MCTStatStruc *pMCTstat,
119 struct DCTStatStruc *pDCTstat);
120 static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
121 struct DCTStatStruc *pDCTstatA);
122 static void SetCSTriState(struct MCTStatStruc *pMCTstat,
123 struct DCTStatStruc *pDCTstat, u8 dct);
124 static void SetCKETriState(struct MCTStatStruc *pMCTstat,
125 struct DCTStatStruc *pDCTstat, u8 dct);
126 static void SetODTTriState(struct MCTStatStruc *pMCTstat,
127 struct DCTStatStruc *pDCTstat, u8 dct);
128 static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
129 struct DCTStatStruc *pDCTstat, u8 dct);
130 static u32 mct_NodePresent_D(void);
131 static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
132 struct DCTStatStruc *pDCTstatA);
133 static void mct_ResetDataStruct_D(struct MCTStatStruc *pMCTstat,
134 struct DCTStatStruc *pDCTstatA);
135 static void mct_EarlyArbEn_D(struct MCTStatStruc *pMCTstat,
136 struct DCTStatStruc *pDCTstat);
137 static void mct_BeforeDramInit_Prod_D(struct MCTStatStruc *pMCTstat,
138 struct DCTStatStruc *pDCTstat);
139 void mct_ClrClToNB_D(struct MCTStatStruc *pMCTstat,
140 struct DCTStatStruc *pDCTstat);
141 static u8 CheckNBCOFEarlyArbEn(struct MCTStatStruc *pMCTstat,
142 struct DCTStatStruc *pDCTstat);
143 void mct_ClrWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
144 struct DCTStatStruc *pDCTstat);
145 static void mct_BeforeDQSTrain_D(struct MCTStatStruc *pMCTstat,
146 struct DCTStatStruc *pDCTstatA);
147 static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct);
148 static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat,
149 struct DCTStatStruc *pDCTstat, u8 dct);
150 static void ProgDramMRSReg_D(struct MCTStatStruc *pMCTstat,
151 struct DCTStatStruc *pDCTstat, u8 dct);
152 static void mct_DramInit_Sw_D(struct MCTStatStruc *pMCTstat,
153 struct DCTStatStruc *pDCTstat, u8 dct);
154 static u32 mct_DisDllShutdownSR(struct MCTStatStruc *pMCTstat,
155 struct DCTStatStruc *pDCTstat, u32 DramConfigLo, u8 dct);
157 static u32 mct_DramTermDyn_RDimm(struct MCTStatStruc *pMCTstat,
158 struct DCTStatStruc *pDCTstat, u8 dimm);
159 static u32 mct_SetDramConfigMisc2(struct DCTStatStruc *pDCTstat, u8 dct, u32 misc2);
160 static void mct_BeforeDQSTrainSamp(struct DCTStatStruc *pDCTstat);
161 static void mct_WriteLevelization_HW(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
162 static u8 Get_Latency_Diff(struct MCTStatStruc *pMCTstat,
163 struct DCTStatStruc *pDCTstat, u8 dct);
164 static void SyncSetting(struct DCTStatStruc *pDCTstat);
165 static u8 crcCheck(u8 smbaddr);
167 /*See mctAutoInitMCT header for index relationships to CL and T*/
168 static const u16 Table_F_k[] = {00,200,266,333,400,533 };
169 static const u8 Tab_BankAddr[] = {0x3F,0x01,0x09,0x3F,0x3F,0x11,0x0A,0x19,0x12,0x1A,0x21,0x22,0x23};
170 static const u8 Table_DQSRcvEn_Offset[] = {0x00,0x01,0x10,0x11,0x2};
172 /****************************************************************************
173 Describe how platform maps MemClk pins to logical DIMMs. The MemClk pins
174 are identified based on BKDG definition of Fn2x88[MemClkDis] bitmap.
175 AGESA will base on this value to disable unused MemClk to save power.
177 If MEMCLK_MAPPING or MEMCLK_MAPPING contains all zeroes, AGESA will use
178 default MemClkDis setting based on package type.
181 BKDG definition of Fn2x88[MemClkDis] bitmap for AM3 package is like below:
182 Bit AM3/S1g3 pin name
192 And platform has the following routing:
193 CS0 M[B,A]_CLK_H/L[4]
194 CS1 M[B,A]_CLK_H/L[2]
195 CS2 M[B,A]_CLK_H/L[3]
196 CS3 M[B,A]_CLK_H/L[5]
199 ; CS0 CS1 CS2 CS3 CS4 CS5 CS6 CS7
200 MEMCLK_MAPPING EQU 00010000b, 00000100b, 00001000b, 00100000b, 00000000b, 00000000b, 00000000b, 00000000b
203 /* Note: If you are not sure about the pin mappings at initial stage, we dont have to disable MemClk.
204 * Set entries in the tables all 0xFF. */
205 static const u8 Tab_L1CLKDis[] = {0x20, 0x20, 0x10, 0x10, 0x08, 0x08, 0x04, 0x04};
206 static const u8 Tab_AM3CLKDis[] = {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00};
207 static const u8 Tab_S1CLKDis[] = {0xA2, 0xA2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
208 static const u8 Tab_ManualCLKDis[]= {0x10, 0x04, 0x08, 0x20, 0x00, 0x00, 0x00, 0x00};
210 static const u8 Table_Comp_Rise_Slew_20x[] = {7, 3, 2, 2, 0xFF};
211 static const u8 Table_Comp_Rise_Slew_15x[] = {7, 7, 3, 2, 0xFF};
212 static const u8 Table_Comp_Fall_Slew_20x[] = {7, 5, 3, 2, 0xFF};
213 static const u8 Table_Comp_Fall_Slew_15x[] = {7, 7, 5, 3, 0xFF};
215 static void mctAutoInitMCT_D(struct MCTStatStruc *pMCTstat,
216 struct DCTStatStruc *pDCTstatA)
219 * Memory may be mapped contiguously all the way up to 4GB (depending on setup
220 * options). It is the responsibility of PCI subsystem to create an uncacheable
221 * IO region below 4GB and to adjust TOP_MEM downward prior to any IO mapping or
222 * accesses. It is the same responsibility of the CPU sub-system prior to
225 * Slot Number is an external convention, and is determined by OEM with accompanying
226 * silk screening. OEM may choose to use Slot number convention which is consistent
227 * with DIMM number conventions. All AMD engineering platforms do.
229 * Build Requirements:
230 * 1. MCT_SEG0_START and MCT_SEG0_END macros to begin and end the code segment,
231 * defined in mcti.inc.
233 * Run-Time Requirements:
234 * 1. Complete Hypertransport Bus Configuration
235 * 2. SMBus Controller Initialized
236 * 1. BSP in Big Real Mode
237 * 2. Stack at SS:SP, located somewhere between A000:0000 and F000:FFFF
238 * 3. Checksummed or Valid NVRAM bits
239 * 4. MCG_CTL=-1, MC4_CTL_EN=0 for all CPUs
240 * 5. MCi_STS from shutdown/warm reset recorded (if desired) prior to entry
241 * 6. All var MTRRs reset to zero
242 * 7. State of NB_CFG.DisDatMsk set properly on all CPUs
243 * 8. All CPUs at 2Ghz Speed (unless DQS training is not installed).
244 * 9. All cHT links at max Speed/Width (unless DQS training is not installed).
247 * Global relationship between index values and item values:
249 * pDCTstat.CASL pDCTstat.Speed
251 * --------------------------
265 mctInitMemGPIOs_A_D(); /* Set any required GPIOs*/
268 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
269 struct DCTStatStruc *pDCTstat;
270 pDCTstat = pDCTstatA + Node;
271 pDCTstat->Node_ID = Node;
272 pDCTstat->dev_host = PA_HOST(Node);
273 pDCTstat->dev_map = PA_MAP(Node);
274 pDCTstat->dev_dct = PA_DCT(Node);
275 pDCTstat->dev_nbmisc = PA_NBMISC(Node);
276 pDCTstat->NodeSysBase = node_sys_base;
278 mct_init(pMCTstat, pDCTstat);
279 mctNodeIDDebugPort_D();
280 pDCTstat->NodePresent = NodePresent_D(Node);
281 if (pDCTstat->NodePresent) { /* See if Node is there*/
282 clear_legacy_Mode(pMCTstat, pDCTstat);
283 pDCTstat->LogicalCPUID = mctGetLogicalCPUID_D(Node);
285 mct_InitialMCT_D(pMCTstat, pDCTstat);
287 mctSMBhub_Init(Node); /* Switch SMBUS crossbar to proper node*/
289 mct_initDCT(pMCTstat, pDCTstat);
290 if (pDCTstat->ErrCode == SC_FatalErr) {
291 goto fatalexit; /* any fatal errors?*/
292 } else if (pDCTstat->ErrCode < SC_StopError) {
295 } /* if Node present */
296 node_sys_base = pDCTstat->NodeSysBase;
297 node_sys_base += (pDCTstat->NodeSysLimit + 2) & ~0x0F;
299 if (NodesWmem == 0) {
300 printk(BIOS_DEBUG, "No Nodes?!\n");
304 printk(BIOS_DEBUG, "mctAutoInitMCT_D: SyncDCTsReady_D\n");
305 SyncDCTsReady_D(pMCTstat, pDCTstatA); /* Make sure DCTs are ready for accesses.*/
307 printk(BIOS_DEBUG, "mctAutoInitMCT_D: HTMemMapInit_D\n");
308 HTMemMapInit_D(pMCTstat, pDCTstatA); /* Map local memory into system address space.*/
311 printk(BIOS_DEBUG, "mctAutoInitMCT_D: CPUMemTyping_D\n");
312 CPUMemTyping_D(pMCTstat, pDCTstatA); /* Map dram into WB/UC CPU cacheability */
313 mctHookAfterCPU(); /* Setup external northbridge(s) */
315 printk(BIOS_DEBUG, "mctAutoInitMCT_D: DQSTiming_D\n");
316 DQSTiming_D(pMCTstat, pDCTstatA); /* Get Receiver Enable and DQS signal timing*/
318 printk(BIOS_DEBUG, "mctAutoInitMCT_D: UMAMemTyping_D\n");
319 UMAMemTyping_D(pMCTstat, pDCTstatA); /* Fix up for UMA sizing */
321 printk(BIOS_DEBUG, "mctAutoInitMCT_D: :OtherTiming\n");
322 mct_OtherTiming(pMCTstat, pDCTstatA);
324 if (ReconfigureDIMMspare_D(pMCTstat, pDCTstatA)) { /* RESET# if 1st pass of DIMM spare enabled*/
328 InterleaveNodes_D(pMCTstat, pDCTstatA);
329 InterleaveChannels_D(pMCTstat, pDCTstatA);
331 printk(BIOS_DEBUG, "mctAutoInitMCT_D: ECCInit_D\n");
332 if (ECCInit_D(pMCTstat, pDCTstatA)) { /* Setup ECC control and ECC check-bits*/
333 printk(BIOS_DEBUG, "mctAutoInitMCT_D: MCTMemClr_D\n");
334 MCTMemClr_D(pMCTstat,pDCTstatA);
337 mct_FinalMCT_D(pMCTstat, (pDCTstatA + 0) ); /* Node 0 */
338 printk(BIOS_DEBUG, "All Done\n");
342 die("mct_d: fatalexit");
345 static u8 ReconfigureDIMMspare_D(struct MCTStatStruc *pMCTstat,
346 struct DCTStatStruc *pDCTstatA)
350 if (mctGet_NVbits(NV_CS_SpareCTL)) {
351 if (MCT_DIMM_SPARE_NO_WARM) {
352 /* Do no warm-reset DIMM spare */
353 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
354 LoadDQSSigTmgRegs_D(pMCTstat, pDCTstatA);
357 mct_ResetDataStruct_D(pMCTstat, pDCTstatA);
358 pMCTstat->GStatus |= 1 << GSB_EnDIMMSpareNW;
362 /* Do warm-reset DIMM spare */
363 if (mctGet_NVbits(NV_DQSTrainCTL))
374 static void DQSTiming_D(struct MCTStatStruc *pMCTstat,
375 struct DCTStatStruc *pDCTstatA)
379 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
383 nv_DQSTrainCTL = mctGet_NVbits(NV_DQSTrainCTL);
384 /* FIXME: BOZO- DQS training every time*/
387 mct_BeforeDQSTrain_D(pMCTstat, pDCTstatA);
388 phyAssistedMemFnceTraining(pMCTstat, pDCTstatA);
390 if (nv_DQSTrainCTL) {
391 mctHookBeforeAnyTraining(pMCTstat, pDCTstatA);
392 /* TODO: should be in mctHookBeforeAnyTraining */
393 _WRMSR(0x26C, 0x04040404, 0x04040404);
394 _WRMSR(0x26D, 0x04040404, 0x04040404);
395 _WRMSR(0x26E, 0x04040404, 0x04040404);
396 _WRMSR(0x26F, 0x04040404, 0x04040404);
397 mct_WriteLevelization_HW(pMCTstat, pDCTstatA);
399 TrainReceiverEn_D(pMCTstat, pDCTstatA, FirstPass);
401 mct_TrainDQSPos_D(pMCTstat, pDCTstatA);
403 /* Second Pass never used for Barcelona! */
404 /* TrainReceiverEn_D(pMCTstat, pDCTstatA, SecondPass); */
406 mctSetEccDQSRcvrEn_D(pMCTstat, pDCTstatA);
408 /* FIXME - currently uses calculated value TrainMaxReadLatency_D(pMCTstat, pDCTstatA); */
409 mctHookAfterAnyTraining();
410 mctSaveDQSSigTmg_D();
412 MCTMemClr_D(pMCTstat, pDCTstatA);
414 mctGetDQSSigTmg_D(); /* get values into data structure */
415 LoadDQSSigTmgRegs_D(pMCTstat, pDCTstatA); /* load values into registers.*/
416 /* mctDoWarmResetMemClr_D(); */
417 MCTMemClr_D(pMCTstat, pDCTstatA);
421 static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
422 struct DCTStatStruc *pDCTstatA)
424 u8 Node, Receiver, Channel, Dir, DIMM;
433 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
434 struct DCTStatStruc *pDCTstat;
435 pDCTstat = pDCTstatA + Node;
437 if (pDCTstat->DCTSysLimit) {
438 dev = pDCTstat->dev_dct;
439 for (Channel = 0;Channel < 2; Channel++) {
440 /* there are four receiver pairs,
441 loosely associated with chipselects.*/
442 index_reg = 0x98 + Channel * 0x100;
443 for (Receiver = 0; Receiver < 8; Receiver += 2) {
444 /* Set Receiver Enable Values */
445 mct_SetRcvrEnDly_D(pDCTstat,
447 1, /* FinalValue, From stack */
451 (Receiver >> 1) * 3 + 0x10, /* Addl_Index */
452 2); /* Pass Second Pass ? */
453 /* Restore Write levelization training data */
454 for (ByteLane = 0; ByteLane < 9; ByteLane ++) {
455 txdqs = pDCTstat->CH_D_B_TxDqs[Channel][Receiver >> 1][ByteLane];
456 index = Table_DQSRcvEn_Offset[ByteLane >> 1];
457 index += (Receiver >> 1) * 3 + 0x10 + 0x20; /* Addl_Index */
458 val = Get_NB32_index_wait(dev, 0x98 + 0x100*Channel, index);
459 if (ByteLane & 1) { /* odd byte lane */
460 val &= ~(0xFF << 16);
466 Set_NB32_index_wait(dev, 0x98 + 0x100*Channel, index, val);
470 for (Channel = 0; Channel<2; Channel++) {
471 SetEccDQSRcvrEn_D(pDCTstat, Channel);
474 for (Channel = 0; Channel < 2; Channel++) {
476 index_reg = 0x98 + Channel * 0x100;
479 * when 400, 533, 667, it will support dimm0/1/2/3,
480 * and set conf for dimm0, hw will copy to dimm1/2/3
481 * set for dimm1, hw will copy to dimm3
482 * Rev A/B only support DIMM0/1 when 800Mhz and above
483 * + 0x100 to next dimm
484 * Rev C support DIMM0/1/2/3 when 800Mhz and above
485 * + 0x100 to next dimm
487 for (DIMM = 0; DIMM < 4; DIMM++) {
489 index = 0; /* CHA Write Data Timing Low */
491 if (pDCTstat->Speed >= 4) {
492 index = 0x100 * DIMM;
497 for (Dir = 0; Dir < 2; Dir++) {/* RD/WR */
498 p = pDCTstat->CH_D_DIR_B_DQS[Channel][DIMM][Dir];
499 val = stream_to_int(p); /* CHA Read Data Timing High */
500 Set_NB32_index_wait(dev, index_reg, index+1, val);
501 val = stream_to_int(p+4); /* CHA Write Data Timing High */
502 Set_NB32_index_wait(dev, index_reg, index+2, val);
503 val = *(p+8); /* CHA Write ECC Timing */
504 Set_NB32_index_wait(dev, index_reg, index+3, val);
510 for (Channel = 0; Channel<2; Channel++) {
511 reg = 0x78 + Channel * 0x100;
512 val = Get_NB32(dev, reg);
514 val |= ((u32) pDCTstat->CH_MaxRdLat[Channel] << 22);
515 val &= ~(1<<DqsRcvEnTrain);
516 Set_NB32(dev, reg, val); /* program MaxRdLatency to correspond with current delay*/
522 static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
523 struct DCTStatStruc *pDCTstatA)
526 u32 NextBase, BottomIO;
527 u8 _MemHoleRemap, DramHoleBase, DramHoleOffset;
528 u32 HoleSize, DramSelBaseAddr;
534 struct DCTStatStruc *pDCTstat;
536 _MemHoleRemap = mctGet_NVbits(NV_MemHole);
538 if (pMCTstat->HoleBase == 0) {
539 DramHoleBase = mctGet_NVbits(NV_BottomIO);
541 DramHoleBase = pMCTstat->HoleBase >> (24-8);
544 BottomIO = DramHoleBase << (24-8);
547 pDCTstat = pDCTstatA + 0;
548 dev = pDCTstat->dev_map;
550 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
551 pDCTstat = pDCTstatA + Node;
552 devx = pDCTstat->dev_map;
554 pDCTstat = pDCTstatA + Node; /* ??? */
555 if (!pDCTstat->GangedMode) {
556 DramSelBaseAddr = pDCTstat->NodeSysLimit - pDCTstat->DCTSysLimit;
557 /*In unganged mode, we must add DCT0 and DCT1 to DCTSysLimit */
558 val = pDCTstat->NodeSysLimit;
559 if ((val & 0xFF) == 0xFE) {
563 pDCTstat->DCTSysLimit = val;
566 base = pDCTstat->DCTSysBase;
567 limit = pDCTstat->DCTSysLimit;
571 DramSelBaseAddr += NextBase;
572 printk(BIOS_DEBUG, " Node: %02x base: %02x limit: %02x BottomIO: %02x\n", Node, base, limit, BottomIO);
575 if ((base < BottomIO) && (limit >= BottomIO)) {
577 pDCTstat->Status |= 1 << SB_HWHole;
578 pMCTstat->GStatus |= 1 << GSB_HWHole;
579 pDCTstat->DCTSysBase = base;
580 pDCTstat->DCTSysLimit = limit;
581 pDCTstat->DCTHoleBase = BottomIO;
582 pMCTstat->HoleBase = BottomIO;
583 HoleSize = _4GB_RJ8 - BottomIO; /* HoleSize[39:8] */
584 if ((DramSelBaseAddr > 0) && (DramSelBaseAddr < BottomIO))
585 base = DramSelBaseAddr;
586 val = ((base + HoleSize) >> (24-8)) & 0xFF;
587 DramHoleOffset = val;
588 val <<= 8; /* shl 16, rol 24 */
589 val |= DramHoleBase << 24;
590 val |= 1 << DramHoleValid;
591 Set_NB32(devx, 0xF0, val); /* Dram Hole Address Reg */
592 pDCTstat->DCTSysLimit += HoleSize;
593 base = pDCTstat->DCTSysBase;
594 limit = pDCTstat->DCTSysLimit;
595 } else if (base == BottomIO) {
597 pMCTstat->GStatus |= 1<<GSB_SpIntRemapHole;
598 pDCTstat->Status |= 1<<SB_SWNodeHole;
599 pMCTstat->GStatus |= 1<<GSB_SoftHole;
600 pMCTstat->HoleBase = base;
604 pDCTstat->DCTSysBase = base;
605 pDCTstat->DCTSysLimit = limit;
607 /* No Remapping. Normal Contiguous mapping */
608 pDCTstat->DCTSysBase = base;
609 pDCTstat->DCTSysLimit = limit;
612 /*No Remapping. Normal Contiguous mapping*/
613 pDCTstat->DCTSysBase = base;
614 pDCTstat->DCTSysLimit = limit;
616 base |= 3; /* set WE,RE fields*/
617 pMCTstat->SysLimit = limit;
619 Set_NB32(dev, 0x40 + (Node << 3), base); /* [Node] + Dram Base 0 */
621 val = limit & 0xFFFF0000;
623 Set_NB32(dev, 0x44 + (Node << 3), val); /* set DstNode */
625 printk(BIOS_DEBUG, " Node: %02x base: %02x limit: %02x \n", Node, base, limit);
626 limit = pDCTstat->DCTSysLimit;
628 NextBase = (limit & 0xFFFF0000) + 0x10000;
632 /* Copy dram map from Node 0 to Node 1-7 */
633 for (Node = 1; Node < MAX_NODES_SUPPORTED; Node++) {
635 pDCTstat = pDCTstatA + Node;
636 devx = pDCTstat->dev_map;
638 if (pDCTstat->NodePresent) {
639 reg = 0x40; /*Dram Base 0*/
641 val = Get_NB32(dev, reg);
642 Set_NB32(devx, reg, val);
644 } while ( reg < 0x80);
646 break; /* stop at first absent Node */
650 /*Copy dram map to F1x120/124*/
651 mct_HTMemMapExt(pMCTstat, pDCTstatA);
654 static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
655 struct DCTStatStruc *pDCTstatA)
658 /* Initiates a memory clear operation for all node. The mem clr
659 * is done in parallel. After the memclr is complete, all processors
660 * status are checked to ensure that memclr has completed.
663 struct DCTStatStruc *pDCTstat;
665 if (!mctGet_NVbits(NV_DQSTrainCTL)){
666 /* FIXME: callback to wrapper: mctDoWarmResetMemClr_D */
667 } else { /* NV_DQSTrainCTL == 1 */
668 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
669 pDCTstat = pDCTstatA + Node;
671 if (pDCTstat->NodePresent) {
672 DCTMemClr_Init_D(pMCTstat, pDCTstat);
675 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
676 pDCTstat = pDCTstatA + Node;
678 if (pDCTstat->NodePresent) {
679 DCTMemClr_Sync_D(pMCTstat, pDCTstat);
685 static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
686 struct DCTStatStruc *pDCTstat)
692 /* Initiates a memory clear operation on one node */
693 if (pDCTstat->DCTSysLimit) {
694 dev = pDCTstat->dev_dct;
698 val = Get_NB32(dev, reg);
699 } while (val & (1 << MemClrBusy));
701 val |= (1 << MemClrInit);
702 Set_NB32(dev, reg, val);
706 static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
707 struct DCTStatStruc *pDCTstatA)
709 /* Ensures that memory clear has completed on all node.*/
711 struct DCTStatStruc *pDCTstat;
713 if (!mctGet_NVbits(NV_DQSTrainCTL)){
714 /* callback to wrapper: mctDoWarmResetMemClr_D */
715 } else { /* NV_DQSTrainCTL == 1 */
716 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
717 pDCTstat = pDCTstatA + Node;
719 if (pDCTstat->NodePresent) {
720 DCTMemClr_Sync_D(pMCTstat, pDCTstat);
726 static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
727 struct DCTStatStruc *pDCTstat)
730 u32 dev = pDCTstat->dev_dct;
733 /* Ensure that a memory clear operation has completed on one node */
734 if (pDCTstat->DCTSysLimit){
738 val = Get_NB32(dev, reg);
739 } while (val & (1 << MemClrBusy));
742 val = Get_NB32(dev, reg);
743 } while (!(val & (1 << Dr_MemClrStatus)));
746 val = 0x0FE40FC0; /* BKDG recommended */
747 val |= MCCH_FlushWrOnStpGnt; /* Set for S3 */
748 Set_NB32(dev, 0x11C, val);
751 static u8 NodePresent_D(u8 Node)
754 * Determine if a single Hammer Node exists within the network.
761 dev = PA_HOST(Node); /*test device/vendor id at host bridge */
762 val = Get_NB32(dev, 0);
763 dword = mct_NodePresent_D(); /* FIXME: BOZO -11001022h rev for F */
764 if (val == dword) { /* AMD Hammer Family CPU HT Configuration */
765 if (oemNodePresent_D(Node, &ret))
767 /* Node ID register */
768 val = Get_NB32(dev, 0x60);
771 if (val == dword) /* current nodeID = requested nodeID ? */
778 static void DCTInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 dct)
781 * Initialize DRAM on single Athlon 64/Opteron Node.
786 ClearDCT_D(pMCTstat, pDCTstat, dct);
787 stopDCTflag = 1; /*preload flag with 'disable' */
788 /* enable DDR3 support */
789 val = Get_NB32(pDCTstat->dev_dct, 0x94 + dct * 0x100);
790 val |= 1 << Ddr3Mode;
791 Set_NB32(pDCTstat->dev_dct, 0x94 + dct * 0x100, val);
792 if (mct_DIMMPresence(pMCTstat, pDCTstat, dct) < SC_StopError) {
793 printk(BIOS_DEBUG, "\t\tDCTInit_D: mct_DIMMPresence Done\n");
794 if (mct_SPDCalcWidth(pMCTstat, pDCTstat, dct) < SC_StopError) {
795 printk(BIOS_DEBUG, "\t\tDCTInit_D: mct_SPDCalcWidth Done\n");
796 if (AutoCycTiming_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
797 printk(BIOS_DEBUG, "\t\tDCTInit_D: AutoCycTiming_D Done\n");
798 if (AutoConfig_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
799 printk(BIOS_DEBUG, "\t\tDCTInit_D: AutoConfig_D Done\n");
800 if (PlatformSpec_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
801 printk(BIOS_DEBUG, "\t\tDCTInit_D: PlatformSpec_D Done\n");
803 if (!(pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW))) {
804 printk(BIOS_DEBUG, "\t\tDCTInit_D: StartupDCT_D\n");
805 StartupDCT_D(pMCTstat, pDCTstat, dct); /*yeaahhh! */
814 u32 reg_off = dct * 0x100;
815 val = 1<<DisDramInterface;
816 Set_NB32(pDCTstat->dev_dct, reg_off+0x94, val);
817 /*To maximize power savings when DisDramInterface=1b,
818 all of the MemClkDis bits should also be set.*/
820 Set_NB32(pDCTstat->dev_dct, reg_off+0x88, val);
822 /* mct_EnDllShutdownSR */
826 static void SyncDCTsReady_D(struct MCTStatStruc *pMCTstat,
827 struct DCTStatStruc *pDCTstatA)
829 /* Wait (and block further access to dram) for all DCTs to be ready,
830 * by polling all InitDram bits and waiting for possible memory clear
831 * operations to be complete. Read MemClkFreqVal bit to see if
832 * the DIMMs are present in this node.
837 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
838 struct DCTStatStruc *pDCTstat;
839 pDCTstat = pDCTstatA + Node;
840 mct_SyncDCTsReady(pDCTstat);
843 /* re-enable phy compensation engine when dram init is completed on all nodes. */
844 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
845 struct DCTStatStruc *pDCTstat;
846 pDCTstat = pDCTstatA + Node;
847 if (pDCTstat->NodePresent) {
848 if (pDCTstat->DIMMValidDCT[0] > 0 || pDCTstat->DIMMValidDCT[1] > 0) {
849 /* re-enable phy compensation engine when dram init on both DCTs is completed. */
850 val = Get_NB32_index_wait(pDCTstat->dev_dct, 0x98, 0x8);
851 val &= ~(1 << DisAutoComp);
852 Set_NB32_index_wait(pDCTstat->dev_dct, 0x98, 0x8, val);
856 /* wait 750us before any memory access can be made. */
860 static void StartupDCT_D(struct MCTStatStruc *pMCTstat,
861 struct DCTStatStruc *pDCTstat, u8 dct)
863 /* Read MemClkFreqVal bit to see if the DIMMs are present in this node.
864 * If the DIMMs are present then set the DRAM Enable bit for this node.
866 * Setting dram init starts up the DCT state machine, initializes the
867 * dram devices with MRS commands, and kicks off any
868 * HW memory clear process that the chip is capable of. The sooner
869 * that dram init is set for all nodes, the faster the memory system
870 * initialization can complete. Thus, the init loop is unrolled into
871 * two loops so as to start the processes for non BSP nodes sooner.
872 * This procedure will not wait for the process to finish.
873 * Synchronization is handled elsewhere.
877 u32 reg_off = dct * 0x100;
879 dev = pDCTstat->dev_dct;
880 val = Get_NB32(dev, 0x94 + reg_off);
881 if (val & (1<<MemClkFreqVal)) {
882 mctHookBeforeDramInit(); /* generalized Hook */
883 if (!(pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)))
884 mct_DramInit(pMCTstat, pDCTstat, dct);
885 AfterDramInit_D(pDCTstat, dct);
886 mctHookAfterDramInit(); /* generalized Hook*/
890 static void ClearDCT_D(struct MCTStatStruc *pMCTstat,
891 struct DCTStatStruc *pDCTstat, u8 dct)
894 u32 dev = pDCTstat->dev_dct;
895 u32 reg = 0x40 + 0x100 * dct;
898 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
899 reg_end = 0x78 + 0x100 * dct;
901 reg_end = 0xA4 + 0x100 * dct;
904 while(reg < reg_end) {
905 Set_NB32(dev, reg, val);
910 dev = pDCTstat->dev_map;
912 Set_NB32(dev, reg, val);
915 static void SPD2ndTiming(struct MCTStatStruc *pMCTstat,
916 struct DCTStatStruc *pDCTstat, u8 dct)
920 u16 Trp, Trrd, Trcd, Tras, Trc;
923 u32 DramTimingLo, DramTimingHi;
935 /* Gather all DIMM mini-max values for cycle timing data */
944 for (i=0; i < 4; i++)
948 for ( i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
950 if (pDCTstat->DIMMValid & (1 << i)) {
951 smbaddr = Get_DIMMAddress_D(pDCTstat, (dct + i));
953 val = mctRead_SPD(smbaddr, SPD_MTBDivisor); /* MTB=Dividend/Divisor */
954 MTB16x = ((mctRead_SPD(smbaddr, SPD_MTBDividend) & 0xFF)<<4);
955 MTB16x /= val; /* transfer to MTB*16 */
957 byte = mctRead_SPD(smbaddr, SPD_tRPmin);
962 byte = mctRead_SPD(smbaddr, SPD_tRRDmin);
967 byte = mctRead_SPD(smbaddr, SPD_tRCDmin);
972 byte = mctRead_SPD(smbaddr, SPD_tRTPmin);
977 byte = mctRead_SPD(smbaddr, SPD_tWRmin);
982 byte = mctRead_SPD(smbaddr, SPD_tWTRmin);
987 val = mctRead_SPD(smbaddr, SPD_Upper_tRAS_tRC) & 0xFF;
990 val |= mctRead_SPD(smbaddr, SPD_tRCmin) & 0xFF;
995 byte = mctRead_SPD(smbaddr, SPD_Density) & 0xF;
996 if (Trfc[LDIMM] < byte)
999 val = mctRead_SPD(smbaddr, SPD_Upper_tRAS_tRC) & 0xF;
1001 val |= (mctRead_SPD(smbaddr, SPD_tRASmin) & 0xFF);
1006 val = mctRead_SPD(smbaddr, SPD_Upper_tFAW) & 0xF;
1008 val |= mctRead_SPD(smbaddr, SPD_tFAWmin) & 0xFF;
1012 } /* Dimm Present */
1015 /* Convert DRAM CycleTiming values and store into DCT structure */
1016 byte = pDCTstat->DIMMAutoSpeed;
1027 1. All secondary time values given in SPDs are in binary with units of ns.
1028 2. Some time values are scaled by 16, in order to have least count of 0.25 ns
1029 (more accuracy). JEDEC SPD spec. shows which ones are x1 and x4.
1030 3. Internally to this SW, cycle time, tCK16x, is scaled by 16 to match time values
1034 pDCTstat->DIMMTras = (u16)Tras;
1035 val = Tras / tCK16x;
1036 if (Tras % tCK16x) { /* round up number of busclocks */
1039 if (val < Min_TrasT)
1041 else if (val > Max_TrasT)
1043 pDCTstat->Tras = val;
1046 pDCTstat->DIMMTrp = Trp;
1048 if (Trp % tCK16x) { /* round up number of busclocks */
1053 else if (val > Max_TrpT)
1055 pDCTstat->Trp = val;
1058 pDCTstat->DIMMTrrd = Trrd;
1059 val = Trrd / tCK16x;
1060 if (Trrd % tCK16x) { /* round up number of busclocks */
1063 if (val < Min_TrrdT)
1065 else if (val > Max_TrrdT)
1067 pDCTstat->Trrd = val;
1070 pDCTstat->DIMMTrcd = Trcd;
1071 val = Trcd / tCK16x;
1072 if (Trcd % tCK16x) { /* round up number of busclocks */
1075 if (val < Min_TrcdT)
1077 else if (val > Max_TrcdT)
1079 pDCTstat->Trcd = val;
1082 pDCTstat->DIMMTrc = Trc;
1084 if (Trc % tCK16x) { /* round up number of busclocks */
1089 else if (val > Max_TrcT)
1091 pDCTstat->Trc = val;
1094 pDCTstat->DIMMTrtp = Trtp;
1095 val = Trtp / tCK16x;
1096 if (Trtp % tCK16x) {
1099 if (val < Min_TrtpT)
1101 else if (val > Max_TrtpT)
1103 pDCTstat->Trtp = val;
1106 pDCTstat->DIMMTwr = Twr;
1108 if (Twr % tCK16x) { /* round up number of busclocks */
1113 else if (val > Max_TwrT)
1115 pDCTstat->Twr = val;
1118 pDCTstat->DIMMTwtr = Twtr;
1119 val = Twtr / tCK16x;
1120 if (Twtr % tCK16x) { /* round up number of busclocks */
1123 if (val < Min_TwtrT)
1125 else if (val > Max_TwtrT)
1127 pDCTstat->Twtr = val;
1131 pDCTstat->Trfc[i] = Trfc[i];
1134 pDCTstat->DIMMTfaw = Tfaw;
1135 val = Tfaw / tCK16x;
1136 if (Tfaw % tCK16x) { /* round up number of busclocks */
1139 if (val < Min_TfawT)
1141 else if (val > Max_TfawT)
1143 pDCTstat->Tfaw = val;
1145 mctAdjustAutoCycTmg_D();
1147 /* Program DRAM Timing values */
1148 DramTimingLo = 0; /* Dram Timing Low init */
1149 val = pDCTstat->CASL - 2; /* pDCTstat.CASL to reg. definition */
1150 DramTimingLo |= val;
1152 val = pDCTstat->Trcd - Bias_TrcdT;
1153 DramTimingLo |= val<<4;
1155 val = pDCTstat->Trp - Bias_TrpT;
1156 val = mct_AdjustSPDTimings(pMCTstat, pDCTstat, val);
1157 DramTimingLo |= val<<7;
1159 val = pDCTstat->Trtp - Bias_TrtpT;
1160 DramTimingLo |= val<<10;
1162 val = pDCTstat->Tras - Bias_TrasT;
1163 DramTimingLo |= val<<12;
1165 val = pDCTstat->Trc - Bias_TrcT;
1166 DramTimingLo |= val<<16;
1168 val = pDCTstat->Trrd - Bias_TrrdT;
1169 DramTimingLo |= val<<22;
1171 DramTimingHi = 0; /* Dram Timing High init */
1172 val = pDCTstat->Twtr - Bias_TwtrT;
1173 DramTimingHi |= val<<8;
1176 DramTimingHi |= val<<16;
1183 DramTimingHi |= val << 20;
1185 dev = pDCTstat->dev_dct;
1186 reg_off = 0x100 * dct;
1188 val = pDCTstat->Twr;
1193 val = mct_AdjustSPDTimings(pMCTstat, pDCTstat, val);
1196 dword = Get_NB32(dev, 0x84 + reg_off);
1199 Set_NB32(dev, 0x84 + reg_off, dword);
1202 val = pDCTstat->Tfaw;
1203 val = mct_AdjustSPDTimings(pMCTstat, pDCTstat, val);
1207 dword = Get_NB32(dev, 0x94 + reg_off);
1208 dword &= ~0xf0000000;
1210 Set_NB32(dev, 0x94 + reg_off, dword);
1212 /* dev = pDCTstat->dev_dct; */
1213 /* reg_off = 0x100 * dct; */
1215 if (pDCTstat->Speed > 4) {
1216 val = Get_NB32(dev, 0x88 + reg_off);
1218 DramTimingLo |= val;
1220 Set_NB32(dev, 0x88 + reg_off, DramTimingLo); /*DCT Timing Low*/
1222 if (pDCTstat->Speed > 4) {
1223 DramTimingHi |= 1 << DisAutoRefresh;
1225 DramTimingHi |= 0x000018FF;
1226 Set_NB32(dev, 0x8c + reg_off, DramTimingHi); /*DCT Timing Hi*/
1228 /* dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2)); */
1231 static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
1232 struct DCTStatStruc *pDCTstat, u8 dct)
1234 /* Initialize DCT Timing registers as per DIMM SPD.
1235 * For primary timing (T, CL) use best case T value.
1236 * For secondary timing params., use most aggressive settings
1239 * There are three components to determining "maximum frequency":
1240 * SPD component, Bus load component, and "Preset" max frequency
1243 * The SPD component is a function of the min cycle time specified
1244 * by each DIMM, and the interaction of cycle times from all DIMMs
1245 * in conjunction with CAS latency. The SPD component only applies
1246 * when user timing mode is 'Auto'.
1248 * The Bus load component is a limiting factor determined by electrical
1249 * characteristics on the bus as a result of varying number of device
1250 * loads. The Bus load component is specific to each platform but may
1251 * also be a function of other factors. The bus load component only
1252 * applies when user timing mode is 'Auto'.
1254 * The Preset component is subdivided into three items and is
1255 * the minimum of the set: Silicon revision, user limit
1256 * setting when user timing mode is 'Auto' and memclock mode
1257 * is 'Limit', OEM build specification of the maximum
1258 * frequency. The Preset component is only applies when user
1259 * timing mode is 'Auto'.
1262 /* Get primary timing (CAS Latency and Cycle Time) */
1263 if (pDCTstat->Speed == 0) {
1264 mctGet_MaxLoadFreq(pDCTstat);
1266 /* and Factor in presets (setup options, Si cap, etc.) */
1267 GetPresetmaxF_D(pMCTstat, pDCTstat);
1269 /* Go get best T and CL as specified by DIMM mfgs. and OEM */
1270 SPDGetTCL_D(pMCTstat, pDCTstat, dct);
1271 /* skip callback mctForce800to1067_D */
1272 pDCTstat->Speed = pDCTstat->DIMMAutoSpeed;
1273 pDCTstat->CASL = pDCTstat->DIMMCASL;
1276 mct_AfterGetCLT(pMCTstat, pDCTstat, dct);
1278 SPD2ndTiming(pMCTstat, pDCTstat, dct);
1280 printk(BIOS_DEBUG, "AutoCycTiming: Status %x\n", pDCTstat->Status);
1281 printk(BIOS_DEBUG, "AutoCycTiming: ErrStatus %x\n", pDCTstat->ErrStatus);
1282 printk(BIOS_DEBUG, "AutoCycTiming: ErrCode %x\n", pDCTstat->ErrCode);
1283 printk(BIOS_DEBUG, "AutoCycTiming: Done\n\n");
1285 mctHookAfterAutoCycTmg();
1287 return pDCTstat->ErrCode;
1290 static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
1291 struct DCTStatStruc *pDCTstat)
1293 /* Get max frequency from OEM platform definition, from any user
1294 * override (limiting) of max frequency, and from any Si Revision
1295 * Specific information. Return the least of these three in
1296 * DCTStatStruc.PresetmaxFreq.
1298 /* TODO: Set the proper max frequency in wrappers/mcti_d.c. */
1302 /* Get CPU Si Revision defined limit (NPT) */
1303 proposedFreq = 533; /* Rev F0 programmable max memclock is */
1305 /*Get User defined limit if "limit" mode */
1306 if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 1) {
1307 word = Get_Fk_D(mctGet_NVbits(NV_MemCkVal) + 1);
1308 if (word < proposedFreq)
1309 proposedFreq = word;
1311 /* Get Platform defined limit */
1312 word = mctGet_NVbits(NV_MAX_MEMCLK);
1313 if (word < proposedFreq)
1314 proposedFreq = word;
1316 word = pDCTstat->PresetmaxFreq;
1317 if (word > proposedFreq)
1318 word = proposedFreq;
1320 pDCTstat->PresetmaxFreq = word;
1322 /* Check F3xE8[DdrMaxRate] for maximum DRAM data rate support */
1325 static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
1326 struct DCTStatStruc *pDCTstat, u8 dct)
1328 /* Find the best T and CL primary timing parameter pair, per Mfg.,
1329 * for the given set of DIMMs, and store into DCTStatStruc
1330 * (.DIMMAutoSpeed and .DIMMCASL). See "Global relationship between
1331 * index values and item values" for definition of CAS latency
1332 * index (j) and Frequency index (k).
1334 u8 i, CASLatLow, CASLatHigh;
1339 u8 CLactual, CLdesired, CLT_Fail;
1341 u8 smbaddr, byte, bytex;
1349 for (i = 0; i < MAX_DIMMS_SUPPORTED; i++) {
1350 if (pDCTstat->DIMMValid & (1 << i)) {
1351 smbaddr = Get_DIMMAddress_D(pDCTstat, (dct + i));
1352 /* Step 1: Determine the common set of supported CAS Latency
1353 * values for all modules on the memory channel using the CAS
1354 * Latencies Supported in SPD bytes 14 and 15.
1356 byte = mctRead_SPD(smbaddr, SPD_CASLow);
1358 byte = mctRead_SPD(smbaddr, SPD_CASHigh);
1360 /* Step 2: Determine tAAmin(all) which is the largest tAAmin
1361 value for all modules on the memory channel (SPD byte 16). */
1362 byte = mctRead_SPD(smbaddr, SPD_MTBDivisor);
1364 MTB16x = ((mctRead_SPD(smbaddr, SPD_MTBDividend) & 0xFF)<<4);
1365 MTB16x /= byte; /* transfer to MTB*16 */
1367 byte = mctRead_SPD(smbaddr, SPD_tAAmin);
1368 if (tAAmin16x < byte * MTB16x)
1369 tAAmin16x = byte * MTB16x;
1370 /* Step 3: Determine tCKmin(all) which is the largest tCKmin
1371 value for all modules on the memory channel (SPD byte 12). */
1372 byte = mctRead_SPD(smbaddr, SPD_tCKmin);
1374 if (tCKmin16x < byte * MTB16x)
1375 tCKmin16x = byte * MTB16x;
1378 /* calculate tCKproposed16x */
1379 tCKproposed16x = 16000 / pDCTstat->PresetmaxFreq;
1380 if (tCKmin16x > tCKproposed16x)
1381 tCKproposed16x = tCKmin16x;
1383 /* mctHookTwo1333DimmOverride(); */
1384 /* For UDIMM, if there are two DDR3-1333 on the same channel,
1385 downgrade DDR speed to 1066. */
1387 /* TODO: get user manual tCK16x(Freq.) and overwrite current tCKproposed16x if manual. */
1388 if (tCKproposed16x == 20)
1389 pDCTstat->TargetFreq = 7;
1390 else if (tCKproposed16x <= 24) {
1391 pDCTstat->TargetFreq = 6;
1392 tCKproposed16x = 24;
1394 else if (tCKproposed16x <= 30) {
1395 pDCTstat->TargetFreq = 5;
1396 tCKproposed16x = 30;
1399 pDCTstat->TargetFreq = 4;
1400 tCKproposed16x = 40;
1402 /* Running through this loop twice:
1403 - First time find tCL at target frequency
1404 - Second tim find tCL at 400MHz */
1408 /* Step 4: For a proposed tCK value (tCKproposed) between tCKmin(all) and tCKmax,
1409 determine the desired CAS Latency. If tCKproposed is not a standard JEDEC
1410 value (2.5, 1.875, 1.5, or 1.25 ns) then tCKproposed must be adjusted to the
1411 next lower standard tCK value for calculating CLdesired.
1412 CLdesired = ceiling ( tAAmin(all) / tCKproposed )
1413 where tAAmin is defined in Byte 16. The ceiling function requires that the
1414 quotient be rounded up always. */
1415 CLdesired = tAAmin16x / tCKproposed16x;
1416 if (tAAmin16x % tCKproposed16x)
1418 /* Step 5: Chose an actual CAS Latency (CLactual) that is greather than or equal
1419 to CLdesired and is supported by all modules on the memory channel as
1420 determined in step 1. If no such value exists, choose a higher tCKproposed
1421 value and repeat steps 4 and 5 until a solution is found. */
1422 for (i = 0, CLactual = 4; i < 15; i++, CLactual++) {
1423 if ((CASLatHigh << 8 | CASLatLow) & (1 << i)) {
1424 if (CLdesired <= CLactual)
1430 /* Step 6: Once the calculation of CLactual is completed, the BIOS must also
1431 verify that this CAS Latency value does not exceed tAAmax, which is 20 ns
1432 for all DDR3 speed grades, by multiplying CLactual times tCKproposed. If
1433 not, choose a lower CL value and repeat steps 5 and 6 until a solution is found. */
1434 if (CLactual * tCKproposed16x > 320)
1438 bytex = CLactual - 2;
1439 if (tCKproposed16x == 20)
1441 else if (tCKproposed16x == 24)
1443 else if (tCKproposed16x == 30)
1448 /* mctHookManualCLOverride */
1452 if (tCKproposed16x != 40) {
1453 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
1454 pDCTstat->DIMMAutoSpeed = byte;
1455 pDCTstat->DIMMCASL = bytex;
1458 pDCTstat->TargetCASL = bytex;
1459 tCKproposed16x = 40;
1462 pDCTstat->DIMMAutoSpeed = byte;
1463 pDCTstat->DIMMCASL = bytex;
1468 printk(BIOS_DEBUG, "SPDGetTCL_D: DIMMCASL %x\n", pDCTstat->DIMMCASL);
1469 printk(BIOS_DEBUG, "SPDGetTCL_D: DIMMAutoSpeed %x\n", pDCTstat->DIMMAutoSpeed);
1471 printk(BIOS_DEBUG, "SPDGetTCL_D: Status %x\n", pDCTstat->Status);
1472 printk(BIOS_DEBUG, "SPDGetTCL_D: ErrStatus %x\n", pDCTstat->ErrStatus);
1473 printk(BIOS_DEBUG, "SPDGetTCL_D: ErrCode %x\n", pDCTstat->ErrCode);
1474 printk(BIOS_DEBUG, "SPDGetTCL_D: Done\n\n");
1477 static u8 PlatformSpec_D(struct MCTStatStruc *pMCTstat,
1478 struct DCTStatStruc *pDCTstat, u8 dct)
1484 mctGet_PS_Cfg_D(pMCTstat, pDCTstat, dct);
1486 if (pDCTstat->GangedMode == 1) {
1487 mctGet_PS_Cfg_D(pMCTstat, pDCTstat, 1);
1490 if ( pDCTstat->_2Tmode == 2) {
1491 dev = pDCTstat->dev_dct;
1492 reg = 0x94 + 0x100 * dct; /* Dram Configuration Hi */
1493 val = Get_NB32(dev, reg);
1494 val |= 1 << 20; /* 2T CMD mode */
1495 Set_NB32(dev, reg, val);
1498 mct_PlatformSpec(pMCTstat, pDCTstat, dct);
1499 if (pDCTstat->DIMMAutoSpeed == 4)
1500 InitPhyCompensation(pMCTstat, pDCTstat, dct);
1501 mctHookAfterPSCfg();
1503 return pDCTstat->ErrCode;
1506 static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
1507 struct DCTStatStruc *pDCTstat, u8 dct)
1509 u32 DramControl, DramTimingLo, Status;
1510 u32 DramConfigLo, DramConfigHi, DramConfigMisc, DramConfigMisc2;
1521 DramConfigMisc2 = 0;
1523 /* set bank addressing and Masks, plus CS pops */
1524 SPDSetBanks_D(pMCTstat, pDCTstat, dct);
1525 if (pDCTstat->ErrCode == SC_StopError)
1526 goto AutoConfig_exit;
1528 /* map chip-selects into local address space */
1529 StitchMemory_D(pMCTstat, pDCTstat, dct);
1530 InterleaveBanks_D(pMCTstat, pDCTstat, dct);
1532 /* temp image of status (for convenience). RO usage! */
1533 Status = pDCTstat->Status;
1535 dev = pDCTstat->dev_dct;
1536 reg_off = 0x100 * dct;
1539 /* Build Dram Control Register Value */
1540 DramConfigMisc2 = Get_NB32 (dev, 0xA8 + reg_off); /* Dram Control*/
1541 DramControl = Get_NB32 (dev, 0x78 + reg_off); /* Dram Control*/
1543 /* FIXME: Skip mct_checkForDxSupport */
1544 /* REV_CALL mct_DoRdPtrInit if not Dx */
1545 if (pDCTstat->LogicalCPUID & AMD_DR_Bx)
1549 DramControl &= ~0xFF;
1550 DramControl |= val; /* RdPtrInit = 6 for Cx CPU */
1552 if (mctGet_NVbits(NV_CLKHZAltVidC3))
1553 DramControl |= 1<<16; /* check */
1555 DramControl |= 0x00002A00;
1557 /* FIXME: Skip for Ax versions */
1558 /* callback not required - if (!mctParityControl_D()) */
1559 if (Status & (1 << SB_128bitmode))
1560 DramConfigLo |= 1 << Width128; /* 128-bit mode (normal) */
1565 if (pDCTstat->Dimmx4Present & (1 << word))
1566 DramConfigLo |= 1 << dword; /* X4Dimm[3:0] */
1572 if (!(Status & (1 << SB_Registered)))
1573 DramConfigLo |= 1 << UnBuffDimm; /* Unbuffered DIMMs */
1575 if (mctGet_NVbits(NV_ECC_CAP))
1576 if (Status & (1 << SB_ECCDIMMs))
1577 if ( mctGet_NVbits(NV_ECC))
1578 DramConfigLo |= 1 << DimmEcEn;
1580 DramConfigLo = mct_DisDllShutdownSR(pMCTstat, pDCTstat, DramConfigLo, dct);
1582 /* Build Dram Config Hi Register Value */
1583 dword = pDCTstat->Speed;
1584 DramConfigHi |= dword - 1; /* get MemClk encoding */
1585 DramConfigHi |= 1 << MemClkFreqVal;
1587 if (Status & (1 << SB_Registered))
1588 if ((pDCTstat->Dimmx4Present != 0) && (pDCTstat->Dimmx8Present != 0))
1589 /* set only if x8 Registered DIMMs in System*/
1590 DramConfigHi |= 1 << RDqsEn;
1592 if (mctGet_NVbits(NV_CKE_CTL))
1593 /*Chip Select control of CKE*/
1594 DramConfigHi |= 1 << 16;
1596 /* Control Bank Swizzle */
1597 if (0) /* call back not needed mctBankSwizzleControl_D()) */
1598 DramConfigHi &= ~(1 << BankSwizzleMode);
1600 DramConfigHi |= 1 << BankSwizzleMode; /* recommended setting (default) */
1602 /* Check for Quadrank DIMM presence */
1603 if ( pDCTstat->DimmQRPresent != 0) {
1604 byte = mctGet_NVbits(NV_4RANKType);
1606 DramConfigHi |= 1 << 17; /* S4 (4-Rank SO-DIMMs) */
1608 DramConfigHi |= 1 << 18; /* R4 (4-Rank Registered DIMMs) */
1611 if (0) /* call back not needed mctOverrideDcqBypMax_D ) */
1612 val = mctGet_NVbits(NV_BYPMAX);
1614 val = 0x0f; /* recommended setting (default) */
1615 DramConfigHi |= val << 24;
1617 if (pDCTstat->LogicalCPUID & (AMD_DR_Cx | AMD_DR_Bx))
1618 DramConfigHi |= 1 << DcqArbBypassEn;
1620 /* Build MemClkDis Value from Dram Timing Lo and
1621 Dram Config Misc Registers
1622 1. We will assume that MemClkDis field has been preset prior to this
1624 2. We will only set MemClkDis bits if a DIMM is NOT present AND if:
1625 NV_AllMemClks <>0 AND SB_DiagClks ==0 */
1627 /* Dram Timing Low (owns Clock Enable bits) */
1628 DramTimingLo = Get_NB32(dev, 0x88 + reg_off);
1629 if (mctGet_NVbits(NV_AllMemClks) == 0) {
1630 /* Special Jedec SPD diagnostic bit - "enable all clocks" */
1631 if (!(pDCTstat->Status & (1<<SB_DiagClks))) {
1634 p = Tab_ManualCLKDis;
1637 byte = mctGet_NVbits(NV_PACK_TYPE);
1640 else if (byte == PT_M2 || byte == PT_AS)
1647 while(dword < MAX_CS_SUPPORTED) {
1648 if (pDCTstat->CSPresent & (1<<dword)){
1649 /* re-enable clocks for the enabled CS */
1655 DramTimingLo |= byte << 24;
1659 printk(BIOS_DEBUG, "AutoConfig_D: DramControl: %x\n", DramControl);
1660 printk(BIOS_DEBUG, "AutoConfig_D: DramTimingLo: %x\n", DramTimingLo);
1661 printk(BIOS_DEBUG, "AutoConfig_D: DramConfigMisc: %x\n", DramConfigMisc);
1662 printk(BIOS_DEBUG, "AutoConfig_D: DramConfigMisc2: %x\n", DramConfigMisc2);
1663 printk(BIOS_DEBUG, "AutoConfig_D: DramConfigLo: %x\n", DramConfigLo);
1664 printk(BIOS_DEBUG, "AutoConfig_D: DramConfigHi: %x\n", DramConfigHi);
1666 /* Write Values to the registers */
1667 Set_NB32(dev, 0x78 + reg_off, DramControl);
1668 Set_NB32(dev, 0x88 + reg_off, DramTimingLo);
1669 Set_NB32(dev, 0xA0 + reg_off, DramConfigMisc);
1670 DramConfigMisc2 = mct_SetDramConfigMisc2(pDCTstat, dct, DramConfigMisc2);
1671 Set_NB32(dev, 0xA8 + reg_off, DramConfigMisc2);
1672 Set_NB32(dev, 0x90 + reg_off, DramConfigLo);
1673 ProgDramMRSReg_D(pMCTstat, pDCTstat, dct);
1674 dword = Get_NB32(dev, 0x94 + reg_off);
1675 DramConfigHi |= dword;
1676 mct_SetDramConfigHi_D(pDCTstat, dct, DramConfigHi);
1677 mct_EarlyArbEn_D(pMCTstat, pDCTstat);
1678 mctHookAfterAutoCfg();
1680 /* dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2)); */
1682 printk(BIOS_DEBUG, "AutoConfig: Status %x\n", pDCTstat->Status);
1683 printk(BIOS_DEBUG, "AutoConfig: ErrStatus %x\n", pDCTstat->ErrStatus);
1684 printk(BIOS_DEBUG, "AutoConfig: ErrCode %x\n", pDCTstat->ErrCode);
1685 printk(BIOS_DEBUG, "AutoConfig: Done\n\n");
1687 return pDCTstat->ErrCode;
1690 static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
1691 struct DCTStatStruc *pDCTstat, u8 dct)
1693 /* Set bank addressing, program Mask values and build a chip-select
1694 * population map. This routine programs PCI 0:24N:2x80 config register
1695 * and PCI 0:24N:2x60,64,68,6C config registers (CS Mask 0-3).
1697 u8 ChipSel, Rows, Cols, Ranks, Banks;
1698 u32 BankAddrReg, csMask;
1709 dev = pDCTstat->dev_dct;
1710 reg_off = 0x100 * dct;
1713 for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel+=2) {
1715 if ((pDCTstat->Status & (1 << SB_64MuxedMode)) && ChipSel >=4)
1718 if (pDCTstat->DIMMValid & (1<<byte)) {
1719 smbaddr = Get_DIMMAddress_D(pDCTstat, (ChipSel + dct));
1721 byte = mctRead_SPD(smbaddr, SPD_Addressing);
1722 Rows = (byte >> 3) & 0x7; /* Rows:0b=12-bit,... */
1723 Cols = byte & 0x7; /* Cols:0b=9-bit,... */
1725 byte = mctRead_SPD(smbaddr, SPD_Density);
1726 Banks = (byte >> 4) & 7; /* Banks:0b=3-bit,... */
1728 byte = mctRead_SPD(smbaddr, SPD_Organization);
1729 Ranks = ((byte >> 3) & 7) + 1;
1731 /* Configure Bank encoding
1732 * Use a 6-bit key into a lookup table.
1733 * Key (index) = RRRBCC, where CC is the number of Columns minus 9,
1734 * RRR is the number of Rows minus 12, and B is the number of banks
1741 byte |= Rows << 3; /* RRRBCC internal encode */
1743 for (dword=0; dword < 13; dword++) {
1744 if (byte == Tab_BankAddr[dword])
1751 /* bit no. of CS field in address mapping reg.*/
1752 dword <<= (ChipSel<<1);
1753 BankAddrReg |= dword;
1755 /* Mask value=(2pow(rows+cols+banks+3)-1)>>8,
1756 or 2pow(rows+cols+banks-5)-1*/
1759 byte = Rows + Cols; /* cl=rows+cols*/
1760 byte += 21; /* row:12+col:9 */
1761 byte -= 2; /* 3 banks - 5 */
1763 if (pDCTstat->Status & (1 << SB_128bitmode))
1764 byte++; /* double mask size if in 128-bit mode*/
1766 csMask |= 1 << byte;
1769 /*set ChipSelect population indicator even bits*/
1770 pDCTstat->CSPresent |= (1<<ChipSel);
1772 /*set ChipSelect population indicator odd bits*/
1773 pDCTstat->CSPresent |= 1 << (ChipSel + 1);
1775 reg = 0x60+(ChipSel<<1) + reg_off; /*Dram CS Mask Register */
1777 val &= 0x1FF83FE0; /* Mask out reserved bits.*/
1778 Set_NB32(dev, reg, val);
1780 if (pDCTstat->DIMMSPDCSE & (1<<ChipSel))
1781 pDCTstat->CSTestFail |= (1<<ChipSel);
1783 } /* while ChipSel*/
1785 SetCSTriState(pMCTstat, pDCTstat, dct);
1786 SetCKETriState(pMCTstat, pDCTstat, dct);
1787 SetODTTriState(pMCTstat, pDCTstat, dct);
1789 if (pDCTstat->Status & (1 << SB_128bitmode)) {
1790 SetCSTriState(pMCTstat, pDCTstat, 1); /* force dct1) */
1791 SetCKETriState(pMCTstat, pDCTstat, 1); /* force dct1) */
1792 SetODTTriState(pMCTstat, pDCTstat, 1); /* force dct1) */
1795 word = pDCTstat->CSPresent;
1796 mctGetCS_ExcludeMap(); /* mask out specified chip-selects */
1797 word ^= pDCTstat->CSPresent;
1798 pDCTstat->CSTestFail |= word; /* enable ODT to disabled DIMMs */
1799 if (!pDCTstat->CSPresent)
1800 pDCTstat->ErrCode = SC_StopError;
1802 reg = 0x80 + reg_off; /* Bank Addressing Register */
1803 Set_NB32(dev, reg, BankAddrReg);
1805 pDCTstat->CSPresent_DCT[dct] = pDCTstat->CSPresent;
1806 /* dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2)); */
1808 printk(BIOS_DEBUG, "SPDSetBanks: CSPresent %x\n", pDCTstat->CSPresent_DCT[dct]);
1809 printk(BIOS_DEBUG, "SPDSetBanks: Status %x\n", pDCTstat->Status);
1810 printk(BIOS_DEBUG, "SPDSetBanks: ErrStatus %x\n", pDCTstat->ErrStatus);
1811 printk(BIOS_DEBUG, "SPDSetBanks: ErrCode %x\n", pDCTstat->ErrCode);
1812 printk(BIOS_DEBUG, "SPDSetBanks: Done\n\n");
1815 static void SPDCalcWidth_D(struct MCTStatStruc *pMCTstat,
1816 struct DCTStatStruc *pDCTstat)
1818 /* Per SPDs, check the symmetry of DIMM pairs (DIMM on Channel A
1819 * matching with DIMM on Channel B), the overall DIMM population,
1820 * and determine the width mode: 64-bit, 64-bit muxed, 128-bit.
1823 u8 smbaddr, smbaddr1;
1826 /* Check Symmetry of Channel A and Channel B DIMMs
1827 (must be matched for 128-bit mode).*/
1828 for (i=0; i < MAX_DIMMS_SUPPORTED; i += 2) {
1829 if ((pDCTstat->DIMMValid & (1 << i)) && (pDCTstat->DIMMValid & (1<<(i+1)))) {
1830 smbaddr = Get_DIMMAddress_D(pDCTstat, i);
1831 smbaddr1 = Get_DIMMAddress_D(pDCTstat, i+1);
1833 byte = mctRead_SPD(smbaddr, SPD_Addressing) & 0x7;
1834 byte1 = mctRead_SPD(smbaddr1, SPD_Addressing) & 0x7;
1835 if (byte != byte1) {
1836 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1840 byte = mctRead_SPD(smbaddr, SPD_Density) & 0x0f;
1841 byte1 = mctRead_SPD(smbaddr1, SPD_Density) & 0x0f;
1842 if (byte != byte1) {
1843 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1847 byte = mctRead_SPD(smbaddr, SPD_Organization) & 0x7;
1848 byte1 = mctRead_SPD(smbaddr1, SPD_Organization) & 0x7;
1849 if (byte != byte1) {
1850 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1854 byte = (mctRead_SPD(smbaddr, SPD_Organization) >> 3) & 0x7;
1855 byte1 = (mctRead_SPD(smbaddr1, SPD_Organization) >> 3) & 0x7;
1856 if (byte != byte1) {
1857 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1861 byte = mctRead_SPD(smbaddr, SPD_DMBANKS) & 7; /* #ranks-1 */
1862 byte1 = mctRead_SPD(smbaddr1, SPD_DMBANKS) & 7; /* #ranks-1 */
1863 if (byte != byte1) {
1864 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1873 static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
1874 struct DCTStatStruc *pDCTstat, u8 dct)
1876 /* Requires that Mask values for each bank be programmed first and that
1877 * the chip-select population indicator is correctly set.
1880 u32 nxtcsBase, curcsBase;
1882 u32 Sizeq, BiggestBank;
1891 dev = pDCTstat->dev_dct;
1892 reg_off = 0x100 * dct;
1896 /* CS Sparing 1=enabled, 0=disabled */
1897 if (mctGet_NVbits(NV_CS_SpareCTL) & 1) {
1898 if (MCT_DIMM_SPARE_NO_WARM) {
1899 /* Do no warm-reset DIMM spare */
1900 if (pMCTstat->GStatus & 1 << GSB_EnDIMMSpareNW) {
1901 word = pDCTstat->CSPresent;
1905 /* Make sure at least two chip-selects are available */
1908 pDCTstat->ErrStatus |= 1 << SB_SpareDis;
1911 if (!mctGet_NVbits(NV_DQSTrainCTL)) { /*DQS Training 1=enabled, 0=disabled */
1912 word = pDCTstat->CSPresent;
1914 word &= ~(1 << val);
1916 /* Make sure at least two chip-selects are available */
1919 pDCTstat->ErrStatus |= 1 << SB_SpareDis;
1924 nxtcsBase = 0; /* Next available cs base ADDR[39:8] */
1925 for (p=0; p < MAX_DIMMS_SUPPORTED; p++) {
1927 for (q = 0; q < MAX_CS_SUPPORTED; q++) { /* from DIMMS to CS */
1928 if (pDCTstat->CSPresent & (1 << q)) { /* bank present? */
1929 reg = 0x40 + (q << 2) + reg_off; /* Base[q] reg.*/
1930 val = Get_NB32(dev, reg);
1931 if (!(val & 3)) { /* (CSEnable|Spare==1)bank is enabled already? */
1932 reg = 0x60 + (q << 1) + reg_off; /*Mask[q] reg.*/
1933 val = Get_NB32(dev, reg);
1937 Sizeq = val; /* never used */
1938 if (val > BiggestBank) {
1939 /*Bingo! possibly Map this chip-select next! */
1944 } /*if bank present */
1946 if (BiggestBank !=0) {
1947 curcsBase = nxtcsBase; /* curcsBase=nxtcsBase*/
1948 /* DRAM CS Base b Address Register offset */
1949 reg = 0x40 + (b << 2) + reg_off;
1952 val = 1 << Spare; /* Spare Enable*/
1955 val |= 1 << CSEnable; /* Bank Enable */
1957 if (((reg - 0x40) >> 2) & 1) {
1958 if (!(pDCTstat->Status & (1 << SB_Registered))) {
1960 dimValid = pDCTstat->DIMMValid;
1963 if ((dimValid & pDCTstat->MirrPresU_NumRegR) != 0) {
1964 val |= 1 << onDimmMirror;
1968 Set_NB32(dev, reg, val);
1972 /* let nxtcsBase+=Size[b] */
1973 nxtcsBase += BiggestBank;
1976 /* bank present but disabled?*/
1977 if ( pDCTstat->CSTestFail & (1 << p)) {
1978 /* DRAM CS Base b Address Register offset */
1979 reg = (p << 2) + 0x40 + reg_off;
1980 val = 1 << TestFail;
1981 Set_NB32(dev, reg, val);
1986 pDCTstat->DCTSysLimit = nxtcsBase - 1;
1987 mct_AfterStitchMemory(pMCTstat, pDCTstat, dct);
1990 /* dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2)); */
1992 printk(BIOS_DEBUG, "StitchMemory: Status %x\n", pDCTstat->Status);
1993 printk(BIOS_DEBUG, "StitchMemory: ErrStatus %x\n", pDCTstat->ErrStatus);
1994 printk(BIOS_DEBUG, "StitchMemory: ErrCode %x\n", pDCTstat->ErrCode);
1995 printk(BIOS_DEBUG, "StitchMemory: Done\n\n");
1998 static u16 Get_Fk_D(u8 k)
2000 return Table_F_k[k]; /* FIXME: k or k<<1 ? */
2003 static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
2004 struct DCTStatStruc *pDCTstat)
2006 /* Check DIMMs present, verify checksum, flag SDRAM type,
2007 * build population indicator bitmaps, and preload bus loading
2008 * of DIMMs into DCTStatStruc.
2009 * MAAload=number of devices on the "A" bus.
2010 * MABload=number of devices on the "B" bus.
2011 * MAAdimms=number of DIMMs on the "A" bus slots.
2012 * MABdimms=number of DIMMs on the "B" bus slots.
2013 * DATAAload=number of ranks on the "A" bus slots.
2014 * DATABload=number of ranks on the "B" bus slots.
2019 u16 RegDIMMPresent, MaxDimms;
2024 /* preload data structure with addrs */
2025 mctGet_DIMMAddr(pDCTstat, pDCTstat->Node_ID);
2027 DimmSlots = MaxDimms = mctGet_NVbits(NV_MAX_DIMMS);
2029 SPDCtrl = mctGet_NVbits(NV_SPDCHK_RESTRT);
2032 pDCTstat->DimmQRPresent = 0;
2034 for (i = 0; i < MAX_DIMMS_SUPPORTED; i++) {
2038 if ((pDCTstat->DimmQRPresent & (1 << i)) || (i < DimmSlots)) {
2040 smbaddr = Get_DIMMAddress_D(pDCTstat, i);
2041 status = mctRead_SPD(smbaddr, SPD_ByteUse);
2042 if (status >= 0) { /* SPD access is ok */
2043 pDCTstat->DIMMPresent |= 1 << i;
2044 if (crcCheck(smbaddr)) { /* CRC is OK */
2045 byte = mctRead_SPD(smbaddr, SPD_TYPE);
2046 if (byte == JED_DDR3SDRAM) {
2047 /*Dimm is 'Present'*/
2048 pDCTstat->DIMMValid |= 1 << i;
2051 pDCTstat->DIMMSPDCSE = 1 << i;
2053 pDCTstat->ErrStatus |= 1 << SB_DIMMChkSum;
2054 pDCTstat->ErrCode = SC_StopError;
2056 /*if NV_SPDCHK_RESTRT is set to 1, ignore faulty SPD checksum*/
2057 pDCTstat->ErrStatus |= 1<<SB_DIMMChkSum;
2058 byte = mctRead_SPD(smbaddr, SPD_TYPE);
2059 if (byte == JED_DDR3SDRAM)
2060 pDCTstat->DIMMValid |= 1 << i;
2063 /* Check module type */
2064 byte = mctRead_SPD(smbaddr, SPD_DIMMTYPE) & 0x7;
2065 if (byte == JED_RDIMM || byte == JED_MiniRDIMM)
2066 RegDIMMPresent |= 1 << i;
2067 /* Check ECC capable */
2068 byte = mctRead_SPD(smbaddr, SPD_BusWidth);
2069 if (byte & JED_ECC) {
2070 /* DIMM is ECC capable */
2071 pDCTstat->DimmECCPresent |= 1 << i;
2073 /* Check if x4 device */
2074 devwidth = mctRead_SPD(smbaddr, SPD_Organization) & 0x7; /* 0:x4,1:x8,2:x16 */
2075 if (devwidth == 0) {
2076 /* DIMM is made with x4 or x16 drams */
2077 pDCTstat->Dimmx4Present |= 1 << i;
2078 } else if (devwidth == 1) {
2079 pDCTstat->Dimmx8Present |= 1 << i;
2080 } else if (devwidth == 2) {
2081 pDCTstat->Dimmx16Present |= 1 << i;
2084 byte = (mctRead_SPD(smbaddr, SPD_Organization) >> 3);
2086 if (byte == 3) { /* 4ranks */
2087 /* if any DIMMs are QR, we have to make two passes through DIMMs*/
2088 if ( pDCTstat->DimmQRPresent == 0) {
2091 if (i < DimmSlots) {
2092 pDCTstat->DimmQRPresent |= (1 << i) | (1 << (i+4));
2094 pDCTstat->MAdimms[i & 1] --;
2096 byte = 1; /* upper two ranks of QR DIMM will be counted on another DIMM number iteration*/
2097 } else if (byte == 1) { /* 2ranks */
2098 pDCTstat->DimmDRPresent |= 1 << i;
2103 else if (devwidth == 1)
2105 else if (devwidth == 2)
2108 byte++; /* al+1=rank# */
2110 bytex <<= 1; /*double Addr bus load value for dual rank DIMMs*/
2113 pDCTstat->DATAload[j] += byte; /*number of ranks on DATA bus*/
2114 pDCTstat->MAload[j] += bytex; /*number of devices on CMD/ADDR bus*/
2115 pDCTstat->MAdimms[j]++; /*number of DIMMs on A bus */
2117 /* check address mirror support for unbuffered dimm */
2118 /* check number of registers on a dimm for registered dimm */
2119 byte = mctRead_SPD(smbaddr, SPD_AddressMirror);
2120 if (RegDIMMPresent & (1 << i)) {
2122 pDCTstat->MirrPresU_NumRegR |= 1 << i;
2124 if ((byte & 1) == 1)
2125 pDCTstat->MirrPresU_NumRegR |= 1 << i;
2127 /* Get byte62: Reference Raw Card information. We dont need it now. */
2128 /* byte = mctRead_SPD(smbaddr, SPD_RefRawCard); */
2129 /* Get Byte65/66 for register manufacture ID code */
2130 if ((0x97 == mctRead_SPD(smbaddr, SPD_RegManufactureID_H)) &&
2131 (0x80 == mctRead_SPD(smbaddr, SPD_RegManufactureID_L))) {
2132 if (0x16 == mctRead_SPD(smbaddr, SPD_RegManRevID))
2133 pDCTstat->RegMan2Present |= 1 << i;
2135 pDCTstat->RegMan1Present |= 1 << i;
2137 /* Get Control word values for RC3. We dont need it. */
2138 byte = mctRead_SPD(smbaddr, 70);
2139 pDCTstat->CtrlWrd3 |= (byte >> 4) << (i << 2); /* C3 = SPD byte 70 [7:4] */
2140 /* Get Control word values for RC4, and RC5 */
2141 byte = mctRead_SPD(smbaddr, 71);
2142 pDCTstat->CtrlWrd4 |= (byte & 0xFF) << (i << 2); /* RC4 = SPD byte 71 [3:0] */
2143 pDCTstat->CtrlWrd5 |= (byte >> 4) << (i << 2); /* RC5 = SPD byte 71 [7:4] */
2147 printk(BIOS_DEBUG, "\t DIMMPresence: DIMMValid=%x\n", pDCTstat->DIMMValid);
2148 printk(BIOS_DEBUG, "\t DIMMPresence: DIMMPresent=%x\n", pDCTstat->DIMMPresent);
2149 printk(BIOS_DEBUG, "\t DIMMPresence: RegDIMMPresent=%x\n", RegDIMMPresent);
2150 printk(BIOS_DEBUG, "\t DIMMPresence: DimmECCPresent=%x\n", pDCTstat->DimmECCPresent);
2151 printk(BIOS_DEBUG, "\t DIMMPresence: DimmPARPresent=%x\n", pDCTstat->DimmPARPresent);
2152 printk(BIOS_DEBUG, "\t DIMMPresence: Dimmx4Present=%x\n", pDCTstat->Dimmx4Present);
2153 printk(BIOS_DEBUG, "\t DIMMPresence: Dimmx8Present=%x\n", pDCTstat->Dimmx8Present);
2154 printk(BIOS_DEBUG, "\t DIMMPresence: Dimmx16Present=%x\n", pDCTstat->Dimmx16Present);
2155 printk(BIOS_DEBUG, "\t DIMMPresence: DimmPlPresent=%x\n", pDCTstat->DimmPlPresent);
2156 printk(BIOS_DEBUG, "\t DIMMPresence: DimmDRPresent=%x\n", pDCTstat->DimmDRPresent);
2157 printk(BIOS_DEBUG, "\t DIMMPresence: DimmQRPresent=%x\n", pDCTstat->DimmQRPresent);
2158 printk(BIOS_DEBUG, "\t DIMMPresence: DATAload[0]=%x\n", pDCTstat->DATAload[0]);
2159 printk(BIOS_DEBUG, "\t DIMMPresence: MAload[0]=%x\n", pDCTstat->MAload[0]);
2160 printk(BIOS_DEBUG, "\t DIMMPresence: MAdimms[0]=%x\n", pDCTstat->MAdimms[0]);
2161 printk(BIOS_DEBUG, "\t DIMMPresence: DATAload[1]=%x\n", pDCTstat->DATAload[1]);
2162 printk(BIOS_DEBUG, "\t DIMMPresence: MAload[1]=%x\n", pDCTstat->MAload[1]);
2163 printk(BIOS_DEBUG, "\t DIMMPresence: MAdimms[1]=%x\n", pDCTstat->MAdimms[1]);
2165 if (pDCTstat->DIMMValid != 0) { /* If any DIMMs are present...*/
2166 if (RegDIMMPresent != 0) {
2167 if ((RegDIMMPresent ^ pDCTstat->DIMMValid) !=0) {
2168 /* module type DIMM mismatch (reg'ed, unbuffered) */
2169 pDCTstat->ErrStatus |= 1<<SB_DimmMismatchM;
2170 pDCTstat->ErrCode = SC_StopError;
2172 /* all DIMMs are registered */
2173 pDCTstat->Status |= 1<<SB_Registered;
2176 if (pDCTstat->DimmECCPresent != 0) {
2177 if ((pDCTstat->DimmECCPresent ^ pDCTstat->DIMMValid )== 0) {
2178 /* all DIMMs are ECC capable */
2179 pDCTstat->Status |= 1<<SB_ECCDIMMs;
2182 if (pDCTstat->DimmPARPresent != 0) {
2183 if ((pDCTstat->DimmPARPresent ^ pDCTstat->DIMMValid) == 0) {
2184 /*all DIMMs are Parity capable */
2185 pDCTstat->Status |= 1<<SB_PARDIMMs;
2189 /* no DIMMs present or no DIMMs that qualified. */
2190 pDCTstat->ErrStatus |= 1<<SB_NoDimms;
2191 pDCTstat->ErrCode = SC_StopError;
2194 printk(BIOS_DEBUG, "\t DIMMPresence: Status %x\n", pDCTstat->Status);
2195 printk(BIOS_DEBUG, "\t DIMMPresence: ErrStatus %x\n", pDCTstat->ErrStatus);
2196 printk(BIOS_DEBUG, "\t DIMMPresence: ErrCode %x\n", pDCTstat->ErrCode);
2197 printk(BIOS_DEBUG, "\t DIMMPresence: Done\n\n");
2199 mctHookAfterDIMMpre();
2201 return pDCTstat->ErrCode;
2204 static u8 Get_DIMMAddress_D(struct DCTStatStruc *pDCTstat, u8 i)
2208 p = pDCTstat->DIMMAddr;
2209 /* mct_BeforeGetDIMMAddress(); */
2213 static void mct_initDCT(struct MCTStatStruc *pMCTstat,
2214 struct DCTStatStruc *pDCTstat)
2219 /* Config. DCT0 for Ganged or unganged mode */
2220 DCTInit_D(pMCTstat, pDCTstat, 0);
2221 if (pDCTstat->ErrCode == SC_FatalErr) {
2222 /* Do nothing goto exitDCTInit; any fatal errors? */
2224 /* Configure DCT1 if unganged and enabled*/
2225 if (!pDCTstat->GangedMode) {
2226 if (pDCTstat->DIMMValidDCT[1] > 0) {
2227 err_code = pDCTstat->ErrCode; /* save DCT0 errors */
2228 pDCTstat->ErrCode = 0;
2229 DCTInit_D(pMCTstat, pDCTstat, 1);
2230 if (pDCTstat->ErrCode == 2) /* DCT1 is not Running */
2231 pDCTstat->ErrCode = err_code; /* Using DCT0 Error code to update pDCTstat.ErrCode */
2233 val = 1 << DisDramInterface;
2234 Set_NB32(pDCTstat->dev_dct, 0x100 + 0x94, val);
2241 static void mct_DramInit(struct MCTStatStruc *pMCTstat,
2242 struct DCTStatStruc *pDCTstat, u8 dct)
2244 mct_BeforeDramInit_Prod_D(pMCTstat, pDCTstat);
2245 mct_DramInit_Sw_D(pMCTstat, pDCTstat, dct);
2246 /* mct_DramInit_Hw_D(pMCTstat, pDCTstat, dct); */
2249 static u8 mct_setMode(struct MCTStatStruc *pMCTstat,
2250 struct DCTStatStruc *pDCTstat)
2257 byte = bytex = pDCTstat->DIMMValid;
2258 bytex &= 0x55; /* CHA DIMM pop */
2259 pDCTstat->DIMMValidDCT[0] = bytex;
2261 byte &= 0xAA; /* CHB DIMM popa */
2263 pDCTstat->DIMMValidDCT[1] = byte;
2265 if (byte != bytex) {
2266 pDCTstat->ErrStatus &= ~(1 << SB_DimmMismatchO);
2268 byte = mctGet_NVbits(NV_Unganged);
2270 pDCTstat->ErrStatus |= (1 << SB_DimmMismatchO); /* Set temp. to avoid setting of ganged mode */
2272 if (!(pDCTstat->ErrStatus & (1 << SB_DimmMismatchO))) {
2273 pDCTstat->GangedMode = 1;
2274 /* valid 128-bit mode population. */
2275 pDCTstat->Status |= 1 << SB_128bitmode;
2277 val = Get_NB32(pDCTstat->dev_dct, reg);
2278 val |= 1 << DctGangEn;
2279 Set_NB32(pDCTstat->dev_dct, reg, val);
2281 if (byte) /* NV_Unganged */
2282 pDCTstat->ErrStatus &= ~(1 << SB_DimmMismatchO); /* Clear so that there is no DIMM missmatch error */
2284 return pDCTstat->ErrCode;
2287 u32 Get_NB32(u32 dev, u32 reg)
2289 return pci_read_config32(dev, reg);
2292 void Set_NB32(u32 dev, u32 reg, u32 val)
2294 pci_write_config32(dev, reg, val);
2298 u32 Get_NB32_index(u32 dev, u32 index_reg, u32 index)
2302 Set_NB32(dev, index_reg, index);
2303 dword = Get_NB32(dev, index_reg+0x4);
2308 void Set_NB32_index(u32 dev, u32 index_reg, u32 index, u32 data)
2310 Set_NB32(dev, index_reg, index);
2311 Set_NB32(dev, index_reg + 0x4, data);
2314 u32 Get_NB32_index_wait(u32 dev, u32 index_reg, u32 index)
2320 index &= ~(1 << DctAccessWrite);
2321 Set_NB32(dev, index_reg, index);
2323 dword = Get_NB32(dev, index_reg);
2324 } while (!(dword & (1 << DctAccessDone)));
2325 dword = Get_NB32(dev, index_reg + 0x4);
2330 void Set_NB32_index_wait(u32 dev, u32 index_reg, u32 index, u32 data)
2335 Set_NB32(dev, index_reg + 0x4, data);
2336 index |= (1 << DctAccessWrite);
2337 Set_NB32(dev, index_reg, index);
2339 dword = Get_NB32(dev, index_reg);
2340 } while (!(dword & (1 << DctAccessDone)));
2344 static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
2345 struct DCTStatStruc *pDCTstat, u8 dct)
2347 /* Get platform specific config/timing values from the interface layer
2348 * and program them into DCT.
2351 u32 dev = pDCTstat->dev_dct;
2353 u8 i, i_start, i_end;
2355 if (pDCTstat->GangedMode) {
2356 SyncSetting(pDCTstat);
2357 /* mct_SetupSync_D */
2364 for (i=i_start; i<i_end; i++) {
2365 index_reg = 0x98 + (i * 0x100);
2366 Set_NB32_index_wait(dev, index_reg, 0x00, pDCTstat->CH_ODC_CTL[i]); /* Channel A Output Driver Compensation Control */
2367 Set_NB32_index_wait(dev, index_reg, 0x04, pDCTstat->CH_ADDR_TMG[i]); /* Channel A Output Driver Compensation Control */
2370 return pDCTstat->ErrCode;
2374 static void mct_SyncDCTsReady(struct DCTStatStruc *pDCTstat)
2379 if (pDCTstat->NodePresent) {
2380 dev = pDCTstat->dev_dct;
2382 if ((pDCTstat->DIMMValidDCT[0] ) || (pDCTstat->DIMMValidDCT[1])) { /* This Node has dram */
2384 val = Get_NB32(dev, 0x110);
2385 } while (!(val & (1 << DramEnabled)));
2387 } /* Node is present */
2390 static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
2391 struct DCTStatStruc *pDCTstat, u8 dct)
2393 if (!pDCTstat->GangedMode) {
2395 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
2396 if (pDCTstat->DIMMValidDCT[dct] == 0)
2397 pDCTstat->ErrCode = SC_StopError;
2399 pDCTstat->CSPresent = 0;
2400 pDCTstat->CSTestFail = 0;
2401 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
2402 if (pDCTstat->DIMMValidDCT[dct] == 0)
2403 pDCTstat->ErrCode = SC_StopError;
2408 static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,
2409 struct DCTStatStruc *pDCTstat, u8 dct)
2415 SPDCalcWidth_D(pMCTstat, pDCTstat);
2416 ret = mct_setMode(pMCTstat, pDCTstat);
2418 ret = pDCTstat->ErrCode;
2421 if (pDCTstat->DIMMValidDCT[0] == 0) {
2422 val = Get_NB32(pDCTstat->dev_dct, 0x94);
2423 val |= 1 << DisDramInterface;
2424 Set_NB32(pDCTstat->dev_dct, 0x94, val);
2426 if (pDCTstat->DIMMValidDCT[1] == 0) {
2427 val = Get_NB32(pDCTstat->dev_dct, 0x94 + 0x100);
2428 val |= 1 << DisDramInterface;
2429 Set_NB32(pDCTstat->dev_dct, 0x94 + 0x100, val);
2432 printk(BIOS_DEBUG, "SPDCalcWidth: Status %x\n", pDCTstat->Status);
2433 printk(BIOS_DEBUG, "SPDCalcWidth: ErrStatus %x\n", pDCTstat->ErrStatus);
2434 printk(BIOS_DEBUG, "SPDCalcWidth: ErrCode %x\n", pDCTstat->ErrCode);
2435 printk(BIOS_DEBUG, "SPDCalcWidth: Done\n");
2436 /* Disable dram interface before DRAM init */
2441 static void mct_AfterStitchMemory(struct MCTStatStruc *pMCTstat,
2442 struct DCTStatStruc *pDCTstat, u8 dct)
2451 _MemHoleRemap = mctGet_NVbits(NV_MemHole);
2452 DramHoleBase = mctGet_NVbits(NV_BottomIO);
2454 /* Increase hole size so;[31:24]to[31:16]
2455 * it has granularity of 128MB shl eax,8
2456 * Set 'effective' bottom IOmov DramHoleBase,eax
2458 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2460 /* In unganged mode, we must add DCT0 and DCT1 to DCTSysLimit */
2461 if (!pDCTstat->GangedMode) {
2462 dev = pDCTstat->dev_dct;
2463 pDCTstat->NodeSysLimit += pDCTstat->DCTSysLimit;
2464 /* if DCT0 and DCT1 both exist, set DctSelBaseAddr[47:27] to the top of DCT0 */
2466 if (pDCTstat->DIMMValidDCT[1] > 0) {
2467 dword = pDCTstat->DCTSysLimit + 1;
2468 dword += pDCTstat->NodeSysBase;
2469 dword >>= 8; /* scale [39:8] to [47:27],and to F2x110[31:11] */
2470 if ((dword >= DramHoleBase) && _MemHoleRemap) {
2471 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2472 val = pMCTstat->HoleBase;
2474 val = (((~val) & 0xFF) + 1);
2479 val = Get_NB32(dev, reg);
2482 val |= 3; /* Set F2x110[DctSelHiRngEn], F2x110[DctSelHi] */
2483 Set_NB32(dev, reg, val);
2487 Set_NB32(dev, reg, val);
2490 /* Program the DctSelBaseAddr value to 0
2491 if DCT 0 is disabled */
2492 if (pDCTstat->DIMMValidDCT[0] == 0) {
2493 dword = pDCTstat->NodeSysBase;
2495 if ((dword >= DramHoleBase) && _MemHoleRemap) {
2496 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2497 val = pMCTstat->HoleBase;
2500 val |= (((~val) & 0xFFFF) + 1);
2505 Set_NB32(dev, reg, val);
2508 val |= 3; /* Set F2x110[DctSelHiRngEn], F2x110[DctSelHi] */
2509 Set_NB32(dev, reg, val);
2513 pDCTstat->NodeSysLimit += pDCTstat->DCTSysLimit;
2515 printk(BIOS_DEBUG, "AfterStitch pDCTstat->NodeSysBase = %x\n", pDCTstat->NodeSysBase);
2516 printk(BIOS_DEBUG, "mct_AfterStitchMemory: pDCTstat->NodeSysLimit = %x\n", pDCTstat->NodeSysLimit);
2519 static u8 mct_DIMMPresence(struct MCTStatStruc *pMCTstat,
2520 struct DCTStatStruc *pDCTstat, u8 dct)
2525 ret = DIMMPresence_D(pMCTstat, pDCTstat);
2527 ret = pDCTstat->ErrCode;
2532 /* mct_BeforeGetDIMMAddress inline in C */
2534 static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
2535 struct DCTStatStruc *pDCTstatA)
2539 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
2540 struct DCTStatStruc *pDCTstat;
2541 pDCTstat = pDCTstatA + Node;
2542 if (pDCTstat->NodePresent) {
2543 if (pDCTstat->DIMMValidDCT[0]) {
2544 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[0];
2545 Set_OtherTiming(pMCTstat, pDCTstat, 0);
2547 if (pDCTstat->DIMMValidDCT[1] && !pDCTstat->GangedMode ) {
2548 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[1];
2549 Set_OtherTiming(pMCTstat, pDCTstat, 1);
2551 } /* Node is present*/
2555 static void Set_OtherTiming(struct MCTStatStruc *pMCTstat,
2556 struct DCTStatStruc *pDCTstat, u8 dct)
2559 u32 reg_off = 0x100 * dct;
2562 u32 dev = pDCTstat->dev_dct;
2564 Get_DqsRcvEnGross_Diff(pDCTstat, dev, 0x98 + reg_off);
2565 Get_WrDatGross_Diff(pDCTstat, dct, dev, 0x98 + reg_off);
2566 Get_Trdrd(pMCTstat, pDCTstat, dct);
2567 Get_Twrwr(pMCTstat, pDCTstat, dct);
2568 Get_Twrrd(pMCTstat, pDCTstat, dct);
2569 Get_TrwtTO(pMCTstat, pDCTstat, dct);
2570 Get_TrwtWB(pMCTstat, pDCTstat);
2572 reg = 0x8C + reg_off; /* Dram Timing Hi */
2573 val = Get_NB32(dev, reg);
2575 dword = pDCTstat->TrwtTO;
2577 dword = pDCTstat->Twrrd & 3;
2579 dword = pDCTstat->Twrwr & 3;
2581 dword = pDCTstat->Trdrd & 3;
2583 dword = pDCTstat->TrwtWB;
2585 Set_NB32(dev, reg, val);
2587 reg = 0x78 + reg_off;
2588 val = Get_NB32(dev, reg);
2590 dword = pDCTstat->Twrrd >> 2;
2592 dword = pDCTstat->Twrwr >> 2;
2594 dword = pDCTstat->Trdrd >> 2;
2596 Set_NB32(dev, reg, val);
2599 static void Get_Trdrd(struct MCTStatStruc *pMCTstat,
2600 struct DCTStatStruc *pDCTstat, u8 dct)
2604 Trdrd = ((int8_t)(pDCTstat->DqsRcvEnGrossMax - pDCTstat->DqsRcvEnGrossMin) >> 1) + 1;
2607 pDCTstat->Trdrd = Trdrd;
2610 static void Get_Twrwr(struct MCTStatStruc *pMCTstat,
2611 struct DCTStatStruc *pDCTstat, u8 dct)
2615 Twrwr = ((int8_t)(pDCTstat->WrDatGrossMax - pDCTstat->WrDatGrossMin) >> 1) + 2;
2622 pDCTstat->Twrwr = Twrwr;
2625 static void Get_Twrrd(struct MCTStatStruc *pMCTstat,
2626 struct DCTStatStruc *pDCTstat, u8 dct)
2631 LDplus1 = Get_Latency_Diff(pMCTstat, pDCTstat, dct);
2633 Twrrd = ((int8_t)(pDCTstat->WrDatGrossMax - pDCTstat->DqsRcvEnGrossMin) >> 1) + 4 - LDplus1;
2637 else if (Twrrd > 10)
2639 pDCTstat->Twrrd = Twrrd;
2642 static void Get_TrwtTO(struct MCTStatStruc *pMCTstat,
2643 struct DCTStatStruc *pDCTstat, u8 dct)
2648 LDplus1 = Get_Latency_Diff(pMCTstat, pDCTstat, dct);
2650 TrwtTO = ((int8_t)(pDCTstat->DqsRcvEnGrossMax - pDCTstat->WrDatGrossMin) >> 1) + LDplus1;
2652 pDCTstat->TrwtTO = TrwtTO;
2655 static void Get_TrwtWB(struct MCTStatStruc *pMCTstat,
2656 struct DCTStatStruc *pDCTstat)
2658 /* TrwtWB ensures read-to-write data-bus turnaround.
2659 This value should be one more than the programmed TrwtTO.*/
2660 pDCTstat->TrwtWB = pDCTstat->TrwtTO;
2663 static u8 Get_Latency_Diff(struct MCTStatStruc *pMCTstat,
2664 struct DCTStatStruc *pDCTstat, u8 dct)
2666 u32 reg_off = 0x100 * dct;
2667 u32 dev = pDCTstat->dev_dct;
2670 val1 = Get_NB32(dev, reg_off + 0x88) & 0xF;
2671 val2 = (Get_NB32(dev, reg_off + 0x84) >> 20) & 7;
2676 static void Get_DqsRcvEnGross_Diff(struct DCTStatStruc *pDCTstat,
2677 u32 dev, u32 index_reg)
2679 u8 Smallest, Largest;
2683 /* The largest DqsRcvEnGrossDelay of any DIMM minus the
2684 DqsRcvEnGrossDelay of any other DIMM is equal to the Critical
2685 Gross Delay Difference (CGDD) */
2686 /* DqsRcvEn byte 1,0 */
2687 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x10);
2688 Largest = val & 0xFF;
2689 Smallest = (val >> 8) & 0xFF;
2691 /* DqsRcvEn byte 3,2 */
2692 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x11);
2694 bytex = (val >> 8) & 0xFF;
2695 if (bytex < Smallest)
2700 /* DqsRcvEn byte 5,4 */
2701 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x20);
2703 bytex = (val >> 8) & 0xFF;
2704 if (bytex < Smallest)
2709 /* DqsRcvEn byte 7,6 */
2710 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x21);
2712 bytex = (val >> 8) & 0xFF;
2713 if (bytex < Smallest)
2718 if (pDCTstat->DimmECCPresent> 0) {
2720 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x12);
2722 bytex = (val >> 8) & 0xFF;
2723 if (bytex < Smallest)
2729 pDCTstat->DqsRcvEnGrossMax = Largest;
2730 pDCTstat->DqsRcvEnGrossMin = Smallest;
2733 static void Get_WrDatGross_Diff(struct DCTStatStruc *pDCTstat,
2734 u8 dct, u32 dev, u32 index_reg)
2736 u8 Smallest, Largest;
2740 /* The largest WrDatGrossDlyByte of any DIMM minus the
2741 WrDatGrossDlyByte of any other DIMM is equal to CGDD */
2742 if (pDCTstat->DIMMValid & (1 << 0)) {
2743 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x01); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM0 */
2744 Largest = val & 0xFF;
2745 Smallest = (val >> 8) & 0xFF;
2747 if (pDCTstat->DIMMValid & (1 << 2)) {
2748 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x101); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM1 */
2750 bytex = (val >> 8) & 0xFF;
2751 if (bytex < Smallest)
2757 /* If Cx, 2 more dimm need to be checked to find out the largest and smallest */
2758 if (pDCTstat->LogicalCPUID & AMD_DR_Cx) {
2759 if (pDCTstat->DIMMValid & (1 << 4)) {
2760 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x201); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM2 */
2762 bytex = (val >> 8) & 0xFF;
2763 if (bytex < Smallest)
2768 if (pDCTstat->DIMMValid & (1 << 6)) {
2769 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x301); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM2 */
2771 bytex = (val >> 8) & 0xFF;
2772 if (bytex < Smallest)
2779 pDCTstat->WrDatGrossMax = Largest;
2780 pDCTstat->WrDatGrossMin = Smallest;
2783 static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
2784 u32 dev, u32 index_reg,
2787 u8 Smallest, Largest;
2800 for (i=0; i < 8; i+=2) {
2801 if ( pDCTstat->DIMMValid & (1 << i)) {
2802 val = Get_NB32_index_wait(dev, index_reg, index);
2804 byte = (val >> 5) & 0xFF;
2805 if (byte < Smallest)
2810 byte = (val >> (16 + 5)) & 0xFF;
2811 if (byte < Smallest)
2827 static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat,
2828 u8 dct, u32 dev, u32 index_reg,
2831 u8 Smallest, Largest;
2839 for (i=0; i < 2; i++) {
2840 val = Get_NB32_index_wait(dev, index_reg, index);
2843 for (j=0; j < 4; j++) {
2845 if (byte < Smallest)
2854 if (pDCTstat->DimmECCPresent > 0) {
2856 val = Get_NB32_index_wait(dev, index_reg, index);
2860 if (byte < Smallest)
2873 static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
2874 struct DCTStatStruc *pDCTstat)
2876 /* ClrClToNB_D postponed until we're done executing from ROM */
2877 mct_ClrWbEnhWsbDis_D(pMCTstat, pDCTstat);
2880 static void mct_InitialMCT_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat)
2882 mct_SetClToNB_D(pMCTstat, pDCTstat);
2883 mct_SetWbEnhWsbDis_D(pMCTstat, pDCTstat);
2886 static u32 mct_NodePresent_D(void)
2893 static void mct_init(struct MCTStatStruc *pMCTstat,
2894 struct DCTStatStruc *pDCTstat)
2899 pDCTstat->GangedMode = 0;
2900 pDCTstat->DRPresent = 1;
2902 /* enable extend PCI configuration access */
2904 _RDMSR(addr, &lo, &hi);
2905 if (hi & (1 << (46-32))) {
2906 pDCTstat->Status |= 1 << SB_ExtConfig;
2909 _WRMSR(addr, lo, hi);
2913 static void clear_legacy_Mode(struct MCTStatStruc *pMCTstat,
2914 struct DCTStatStruc *pDCTstat)
2918 u32 dev = pDCTstat->dev_dct;
2920 /* Clear Legacy BIOS Mode bit */
2922 val = Get_NB32(dev, reg);
2923 val &= ~(1<<LegacyBiosMode);
2924 Set_NB32(dev, reg, val);
2927 val = Get_NB32(dev, reg);
2928 val &= ~(1<<LegacyBiosMode);
2929 Set_NB32(dev, reg, val);
2932 static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
2933 struct DCTStatStruc *pDCTstatA)
2936 u32 Drambase, Dramlimit;
2942 struct DCTStatStruc *pDCTstat;
2944 pDCTstat = pDCTstatA + 0;
2945 dev = pDCTstat->dev_map;
2947 /* Copy dram map from F1x40/44,F1x48/4c,
2948 to F1x120/124(Node0),F1x120/124(Node1),...*/
2949 for (Node=0; Node < MAX_NODES_SUPPORTED; Node++) {
2950 pDCTstat = pDCTstatA + Node;
2951 devx = pDCTstat->dev_map;
2953 /* get base/limit from Node0 */
2954 reg = 0x40 + (Node << 3); /* Node0/Dram Base 0 */
2955 val = Get_NB32(dev, reg);
2956 Drambase = val >> ( 16 + 3);
2958 reg = 0x44 + (Node << 3); /* Node0/Dram Base 0 */
2959 val = Get_NB32(dev, reg);
2960 Dramlimit = val >> (16 + 3);
2962 /* set base/limit to F1x120/124 per Node */
2963 if (pDCTstat->NodePresent) {
2964 reg = 0x120; /* F1x120,DramBase[47:27] */
2965 val = Get_NB32(devx, reg);
2968 Set_NB32(devx, reg, val);
2971 val = Get_NB32(devx, reg);
2974 Set_NB32(devx, reg, val);
2976 if ( pMCTstat->GStatus & ( 1 << GSB_HWHole)) {
2978 val = Get_NB32(devx, reg);
2979 val |= (1 << DramMemHoistValid);
2980 val &= ~(0xFF << 24);
2981 dword = (pMCTstat->HoleBase >> (24 - 8)) & 0xFF;
2984 Set_NB32(devx, reg, val);
2991 static void SetCSTriState(struct MCTStatStruc *pMCTstat,
2992 struct DCTStatStruc *pDCTstat, u8 dct)
2995 u32 dev = pDCTstat->dev_dct;
2996 u32 index_reg = 0x98 + 0x100 * dct;
3000 /* Tri-state unused chipselects when motherboard
3001 termination is available */
3003 /* FIXME: skip for Ax */
3005 word = pDCTstat->CSPresent;
3006 if (pDCTstat->Status & (1 << SB_Registered)) {
3007 word |= (word & 0x55) << 1;
3009 word = (~word) & 0xFF;
3011 val = Get_NB32_index_wait(dev, index_reg, index);
3013 Set_NB32_index_wait(dev, index_reg, index, val);
3016 static void SetCKETriState(struct MCTStatStruc *pMCTstat,
3017 struct DCTStatStruc *pDCTstat, u8 dct)
3021 u32 index_reg = 0x98 + 0x100 * dct;
3025 /* Tri-state unused CKEs when motherboard termination is available */
3027 /* FIXME: skip for Ax */
3029 dev = pDCTstat->dev_dct;
3030 word = pDCTstat->CSPresent;
3033 val = Get_NB32_index_wait(dev, index_reg, index);
3034 if ((word & 0x55) == 0)
3037 if ((word & 0xAA) == 0)
3040 Set_NB32_index_wait(dev, index_reg, index, val);
3043 static void SetODTTriState(struct MCTStatStruc *pMCTstat,
3044 struct DCTStatStruc *pDCTstat, u8 dct)
3048 u32 index_reg = 0x98 + 0x100 * dct;
3054 /* FIXME: skip for Ax */
3056 dev = pDCTstat->dev_dct;
3058 /* Tri-state unused ODTs when motherboard termination is available */
3059 max_dimms = (u8) mctGet_NVbits(NV_MAX_DIMMS);
3060 odt = 0x0F; /* ODT tri-state setting */
3062 if (pDCTstat->Status & (1 <<SB_Registered)) {
3063 for (cs = 0; cs < 8; cs += 2) {
3064 if (pDCTstat->CSPresent & (1 << cs)) {
3065 odt &= ~(1 << (cs / 2));
3066 if (mctGet_NVbits(NV_4RANKType) != 0) { /* quad-rank capable platform */
3067 if (pDCTstat->CSPresent & (1 << (cs + 1)))
3068 odt &= ~(4 << (cs / 2));
3072 } else { /* AM3 package */
3073 val = ~(pDCTstat->CSPresent);
3074 odt = val & 9; /* swap bits 1 and 2 */
3082 val = Get_NB32_index_wait(dev, index_reg, index);
3083 val |= ((odt & 0xFF) << 8); /* set bits 11:8 ODTTriState[3:0] */
3084 Set_NB32_index_wait(dev, index_reg, index, val);
3088 static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
3089 struct DCTStatStruc *pDCTstat, u8 dct)
3092 u32 index_reg = 0x98 + 0x100 * dct;
3093 u32 dev = pDCTstat->dev_dct;
3099 val = Get_NB32_index_wait(dev, index_reg, 0x00);
3101 for (i=0; i < 6; i++) {
3105 p = Table_Comp_Rise_Slew_15x;
3106 valx = p[(val >> 16) & 3];
3110 p = Table_Comp_Fall_Slew_15x;
3111 valx = p[(val >> 16) & 3];
3114 p = Table_Comp_Rise_Slew_20x;
3115 valx = p[(val >> 8) & 3];
3118 p = Table_Comp_Fall_Slew_20x;
3119 valx = p[(val >> 8) & 3];
3123 dword |= valx << (5 * i);
3126 /* Override/Exception */
3127 if (!pDCTstat->GangedMode) {
3128 i = 0; /* use i for the dct setting required */
3129 if (pDCTstat->MAdimms[0] < 4)
3131 if (((pDCTstat->Speed == 2) || (pDCTstat->Speed == 3)) && (pDCTstat->MAdimms[i] == 4)) {
3132 dword &= 0xF18FFF18;
3133 index_reg = 0x98; /* force dct = 0 */
3137 Set_NB32_index_wait(dev, index_reg, 0x0a, dword);
3140 static void mct_EarlyArbEn_D(struct MCTStatStruc *pMCTstat,
3141 struct DCTStatStruc *pDCTstat)
3145 u32 dev = pDCTstat->dev_dct;
3147 /* GhEnhancement #18429 modified by askar: For low NB CLK :
3148 * Memclk ratio, the DCT may need to arbitrate early to avoid
3149 * unnecessary bubbles.
3150 * bit 19 of F2x[1,0]78 Dram Control Register, set this bit only when
3151 * NB CLK : Memclk ratio is between 3:1 (inclusive) to 4:5 (inclusive)
3154 val = Get_NB32(dev, reg);
3156 if (pDCTstat->LogicalCPUID & (AMD_DR_Bx | AMD_DR_Cx))
3157 val |= (1 << EarlyArbEn);
3158 else if (CheckNBCOFEarlyArbEn(pMCTstat, pDCTstat))
3159 val |= (1 << EarlyArbEn);
3161 Set_NB32(dev, reg, val);
3164 static u8 CheckNBCOFEarlyArbEn(struct MCTStatStruc *pMCTstat,
3165 struct DCTStatStruc *pDCTstat)
3171 u32 dev = pDCTstat->dev_dct;
3175 /* Check if NB COF >= 4*Memclk, if it is not, return a fatal error
3178 /* 3*(Fn2xD4[NBFid]+4)/(2^NbDid)/(3+Fn2x94[MemClkFreq]) */
3179 _RDMSR(0xC0010071, &lo, &hi);
3184 val = Get_NB32(dev, reg);
3185 if (!(val & (1 << MemClkFreqVal)))
3186 val = Get_NB32(dev, reg + 0x100); /* get the DCT1 value */
3194 dev = pDCTstat->dev_nbmisc;
3196 val = Get_NB32(dev, reg);
3204 /* Yes this could be nicer but this was how the asm was.... */
3205 if (val < 3) { /* NClk:MemClk < 3:1 */
3207 } else if (val > 4) { /* NClk:MemClk >= 5:1 */
3209 } else if ((val == 4) && (rem > tmp)) { /* NClk:MemClk > 4.5:1 */
3212 return 1; /* 3:1 <= NClk:MemClk <= 4.5:1*/
3216 static void mct_ResetDataStruct_D(struct MCTStatStruc *pMCTstat,
3217 struct DCTStatStruc *pDCTstatA)
3221 struct DCTStatStruc *pDCTstat;
3224 u16 host_serv1, host_serv2;
3226 /* Initialize Data structures by clearing all entries to 0 */
3227 p = (u8 *) pMCTstat;
3228 for (i = 0; i < sizeof(struct MCTStatStruc); i++) {
3232 for (Node = 0; Node < 8; Node++) {
3233 pDCTstat = pDCTstatA + Node;
3234 host_serv1 = pDCTstat->HostBiosSrvc1;
3235 host_serv2 = pDCTstat->HostBiosSrvc2;
3237 p = (u8 *) pDCTstat;
3239 stop = ((u32) &((struct DCTStatStruc *)0)->CH_MaxRdLat[2]);
3240 for (i = start; i < stop ; i++) {
3244 start = ((u32) &((struct DCTStatStruc *)0)->CH_D_BC_RCVRDLY[2][4]);
3245 stop = sizeof(struct DCTStatStruc);
3246 for (i = start; i < stop; i++) {
3249 pDCTstat->HostBiosSrvc1 = host_serv1;
3250 pDCTstat->HostBiosSrvc2 = host_serv2;
3254 static void mct_BeforeDramInit_Prod_D(struct MCTStatStruc *pMCTstat,
3255 struct DCTStatStruc *pDCTstat)
3259 u32 dev = pDCTstat->dev_dct;
3261 if (pDCTstat->LogicalCPUID & AMD_DR_Dx) {
3262 if ((pDCTstat->Speed == 3))
3266 for (i=0; i < 2; i++) {
3267 reg_off = 0x100 * i;
3268 Set_NB32(dev, 0x98 + reg_off, 0x0D000030);
3269 Set_NB32(dev, 0x9C + reg_off, dword);
3270 Set_NB32(dev, 0x98 + reg_off, 0x4D040F30);
3275 static u32 mct_DisDllShutdownSR(struct MCTStatStruc *pMCTstat,
3276 struct DCTStatStruc *pDCTstat, u32 DramConfigLo, u8 dct)
3278 u32 reg_off = 0x100 * dct;
3279 u32 dev = pDCTstat->dev_dct;
3281 /* Write 0000_07D0h to register F2x[1, 0]98_x4D0FE006 */
3282 if (pDCTstat->LogicalCPUID & (AMD_DA_C2 | AMD_RB_C3)) {
3283 Set_NB32(dev, 0x9C + reg_off, 0x1c);
3284 Set_NB32(dev, 0x98 + reg_off, 0x4D0FE006);
3285 Set_NB32(dev, 0x9C + reg_off, 0x13d);
3286 Set_NB32(dev, 0x98 + reg_off, 0x4D0FE007);
3289 return DramConfigLo | /* DisDllShutdownSR */ 1 << 27;
3292 void mct_SetClToNB_D(struct MCTStatStruc *pMCTstat,
3293 struct DCTStatStruc *pDCTstat)
3298 /* FIXME: Maybe check the CPUID? - not for now. */
3299 /* pDCTstat->LogicalCPUID; */
3302 _RDMSR(msr, &lo, &hi);
3303 lo |= 1 << ClLinesToNbDis;
3304 _WRMSR(msr, lo, hi);
3307 void mct_ClrClToNB_D(struct MCTStatStruc *pMCTstat,
3308 struct DCTStatStruc *pDCTstat)
3314 /* FIXME: Maybe check the CPUID? - not for now. */
3315 /* pDCTstat->LogicalCPUID; */
3318 _RDMSR(msr, &lo, &hi);
3319 if (!pDCTstat->ClToNB_flag)
3320 lo &= ~(1<<ClLinesToNbDis);
3321 _WRMSR(msr, lo, hi);
3325 void mct_SetWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
3326 struct DCTStatStruc *pDCTstat)
3331 /* FIXME: Maybe check the CPUID? - not for now. */
3332 /* pDCTstat->LogicalCPUID; */
3335 _RDMSR(msr, &lo, &hi);
3336 hi |= (1 << WbEnhWsbDis_D);
3337 _WRMSR(msr, lo, hi);
3340 void mct_ClrWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
3341 struct DCTStatStruc *pDCTstat)
3346 /* FIXME: Maybe check the CPUID? - not for now. */
3347 /* pDCTstat->LogicalCPUID; */
3350 _RDMSR(msr, &lo, &hi);
3351 hi &= ~(1 << WbEnhWsbDis_D);
3352 _WRMSR(msr, lo, hi);
3355 static u32 mct_DramTermDyn_RDimm(struct MCTStatStruc *pMCTstat,
3356 struct DCTStatStruc *pDCTstat, u8 dimm)
3358 u8 DimmsInstalled = dimm;
3359 u32 DramTermDyn = 0;
3360 u8 Speed = pDCTstat->Speed;
3362 if (mctGet_NVbits(NV_MAX_DIMMS) == 4) {
3363 if (pDCTstat->CSPresent & 0xF0) {
3364 if (DimmsInstalled == 1)
3366 DramTermDyn |= 1 << 10;
3368 DramTermDyn |= 1 << 11;
3371 DramTermDyn |= 1 << 11;
3373 DramTermDyn |= 1 << 10;
3375 if (DimmsInstalled != 1) {
3377 DramTermDyn |= 1 << 10;
3379 DramTermDyn |= 1 << 11;
3383 if (DimmsInstalled != 1)
3384 DramTermDyn |= 1 << 11;
3389 void ProgDramMRSReg_D(struct MCTStatStruc *pMCTstat,
3390 struct DCTStatStruc *pDCTstat, u8 dct)
3397 /* Set chip select CKE control mode */
3398 if (mctGet_NVbits(NV_CKE_CTL)) {
3399 if (pDCTstat->CSPresent == 3) {
3401 word = pDCTstat->DIMMSPDCSE;
3412 DrvImpCtrl: drive impedance control.01b(34 ohm driver; Ron34 = Rzq/7)
3415 /* Dram nominal termination: */
3416 byte = pDCTstat->MAdimms[dct];
3417 if (!(pDCTstat->Status & (1 << SB_Registered))) {
3418 DramMRS |= 1 << 7; /* 60 ohms */
3420 if (pDCTstat->Speed < 6)
3421 DramMRS |= 1 << 8; /* 40 ohms */
3423 DramMRS |= 1 << 9; /* 30 ohms */
3426 /* Dram dynamic termination: Disable(1DIMM), 120ohm(>=2DIMM) */
3427 if (!(pDCTstat->Status & (1 << SB_Registered))) {
3429 if (pDCTstat->Speed == 7)
3435 DramMRS |= mct_DramTermDyn_RDimm(pMCTstat, pDCTstat, byte);
3438 /* burst length control */
3439 if (pDCTstat->Status & (1 << SB_128bitmode))
3441 /* Qoff=0, output buffers enabled */
3443 DramMRS |= (pDCTstat->Speed - 4) << 20;
3444 /* ASR=1, auto self refresh */
3448 dword = Get_NB32(pDCTstat->dev_dct, 0x100 * dct + 0x84);
3449 dword &= ~0x00FC2F8F;
3451 Set_NB32(pDCTstat->dev_dct, 0x100 * dct + 0x84, dword);
3454 void mct_SetDramConfigHi_D(struct DCTStatStruc *pDCTstat, u32 dct,
3457 /* Bug#15114: Comp. update interrupted by Freq. change can cause
3458 * subsequent update to be invalid during any MemClk frequency change:
3459 * Solution: From the bug report:
3460 * 1. A software-initiated frequency change should be wrapped into the
3461 * following sequence :
3462 * - a) Disable Compensation (F2[1, 0]9C_x08[30] )
3463 * b) Reset the Begin Compensation bit (D3CMP->COMP_CONFIG[0]) in all the compensation engines
3464 * c) Do frequency change
3465 * d) Enable Compensation (F2[1, 0]9C_x08[30] )
3466 * 2. A software-initiated Disable Compensation should always be
3467 * followed by step b) of the above steps.
3468 * Silicon Status: Fixed In Rev B0
3470 * Errata#177: DRAM Phy Automatic Compensation Updates May Be Invalid
3471 * Solution: BIOS should disable the phy automatic compensation prior
3472 * to initiating a memory clock frequency change as follows:
3473 * 1. Disable PhyAutoComp by writing 1'b1 to F2x[1, 0]9C_x08[30]
3474 * 2. Reset the Begin Compensation bits by writing 32'h0 to
3475 * F2x[1, 0]9C_x4D004F00
3476 * 3. Perform frequency change
3477 * 4. Enable PhyAutoComp by writing 1'b0 to F2x[1, 0]9C_08[30]
3478 * In addition, any time software disables the automatic phy
3479 * compensation it should reset the begin compensation bit per step 2.
3480 * Silicon Status: Fixed in DR-B0
3483 u32 dev = pDCTstat->dev_dct;
3484 u32 index_reg = 0x98 + 0x100 * dct;
3490 val = Get_NB32_index_wait(dev, index_reg, index);
3491 if (!(val & (1 << DisAutoComp)))
3492 Set_NB32_index_wait(dev, index_reg, index, val | (1 << DisAutoComp));
3496 Set_NB32(dev, 0x94 + 0x100 * dct, DramConfigHi);
3499 static void mct_BeforeDQSTrain_D(struct MCTStatStruc *pMCTstat,
3500 struct DCTStatStruc *pDCTstatA)
3503 struct DCTStatStruc *pDCTstat;
3507 * Bug#15115: Uncertainty In The Sync Chain Leads To Setup Violations
3509 * Solution: BIOS should program DRAM Control Register[RdPtrInit] =
3510 * 5h, (F2x[1, 0]78[3:0] = 5h).
3511 * Silicon Status: Fixed In Rev B0
3513 * Bug#15880: Determine validity of reset settings for DDR PHY timing.
3514 * Solution: At least, set WrDqs fine delay to be 0 for DDR3 training.
3516 for (Node = 0; Node < 8; Node++) {
3517 pDCTstat = pDCTstatA + Node;
3519 if (pDCTstat->NodePresent) {
3520 mct_BeforeDQSTrainSamp(pDCTstat); /* only Bx */
3521 mct_ResetDLL_D(pMCTstat, pDCTstat, 0);
3522 mct_ResetDLL_D(pMCTstat, pDCTstat, 1);
3527 static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat,
3528 struct DCTStatStruc *pDCTstat, u8 dct)
3531 u32 dev = pDCTstat->dev_dct;
3532 u32 reg_off = 0x100 * dct;
3538 /* Skip reset DLL for B3 */
3539 if (pDCTstat->LogicalCPUID & AMD_DR_B3) {
3544 _RDMSR(addr, &lo, &hi);
3545 if(lo & (1<<17)) { /* save the old value */
3548 lo |= (1<<17); /* HWCR.wrap32dis */
3549 /* Setting wrap32dis allows 64-bit memory references in 32bit mode */
3550 _WRMSR(addr, lo, hi);
3552 pDCTstat->Channel = dct;
3553 Receiver = mct_InitReceiver_D(pDCTstat, dct);
3554 /* there are four receiver pairs, loosely associated with chipselects.*/
3555 for (; Receiver < 8; Receiver += 2) {
3556 if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, dct, Receiver)) {
3557 addr = mct_GetRcvrSysAddr_D(pMCTstat, pDCTstat, dct, Receiver, &valid);
3559 mct_Read1LTestPattern_D(pMCTstat, pDCTstat, addr); /* cache fills */
3561 /* Write 0000_8000h to register F2x[1,0]9C_xD080F0C */
3562 Set_NB32_index_wait(dev, 0x98 + reg_off, 0x4D080F0C, 0x00008000);
3563 mct_Wait(80); /* wait >= 300ns */
3565 /* Write 0000_0000h to register F2x[1,0]9C_xD080F0C */
3566 Set_NB32_index_wait(dev, 0x98 + reg_off, 0x4D080F0C, 0x00000000);
3567 mct_Wait(800); /* wait >= 2us */
3575 _RDMSR(addr, &lo, &hi);
3576 lo &= ~(1<<17); /* restore HWCR.wrap32dis */
3577 _WRMSR(addr, lo, hi);
3581 static void mct_EnableDatIntlv_D(struct MCTStatStruc *pMCTstat,
3582 struct DCTStatStruc *pDCTstat)
3584 u32 dev = pDCTstat->dev_dct;
3587 /* Enable F2x110[DctDatIntlv] */
3588 /* Call back not required mctHookBeforeDatIntlv_D() */
3589 /* FIXME Skip for Ax */
3590 if (!pDCTstat->GangedMode) {
3591 val = Get_NB32(dev, 0x110);
3592 val |= 1 << 5; /* DctDatIntlv */
3593 Set_NB32(dev, 0x110, val);
3595 /* FIXME Skip for Cx */
3596 dev = pDCTstat->dev_nbmisc;
3597 val = Get_NB32(dev, 0x8C); /* NB Configuration Hi */
3598 val |= 1 << (36-32); /* DisDatMask */
3599 Set_NB32(dev, 0x8C, val);
3603 static void SetDllSpeedUp_D(struct MCTStatStruc *pMCTstat,
3604 struct DCTStatStruc *pDCTstat, u8 dct)
3607 u32 dev = pDCTstat->dev_dct;
3608 u32 reg_off = 0x100 * dct;
3610 if (pDCTstat->Speed >= 7) { /* DDR1600 and above */
3611 /* Set bit13 PowerDown to register F2x[1, 0]98_x0D080F10 */
3612 Set_NB32(dev, reg_off + 0x98, 0x0D080F10);
3613 val = Get_NB32(dev, reg_off + 0x9C);
3615 Set_NB32(dev, reg_off + 0x9C, val);
3616 Set_NB32(dev, reg_off + 0x98, 0x4D080F10);
3618 /* Set bit13 PowerDown to register F2x[1, 0]98_x0D080F11 */
3619 Set_NB32(dev, reg_off + 0x98, 0x0D080F11);
3620 val = Get_NB32(dev, reg_off + 0x9C);
3622 Set_NB32(dev, reg_off + 0x9C, val);
3623 Set_NB32(dev, reg_off + 0x98, 0x4D080F11);
3625 /* Set bit13 PowerDown to register F2x[1, 0]98_x0D088F30 */
3626 Set_NB32(dev, reg_off + 0x98, 0x0D088F30);
3627 val = Get_NB32(dev, reg_off + 0x9C);
3629 Set_NB32(dev, reg_off + 0x9C, val);
3630 Set_NB32(dev, reg_off + 0x98, 0x4D088F30);
3632 /* Set bit13 PowerDown to register F2x[1, 0]98_x0D08CF30 */
3633 Set_NB32(dev, reg_off + 0x98, 0x0D08CF30);
3634 val = Get_NB32(dev, reg_off + 0x9C);
3636 Set_NB32(dev, reg_off + 0x9C, val);
3637 Set_NB32(dev, reg_off + 0x98, 0x4D08CF30);
3642 static void SyncSetting(struct DCTStatStruc *pDCTstat)
3644 /* set F2x78[ChSetupSync] when F2x[1, 0]9C_x04[AddrCmdSetup, CsOdtSetup,
3645 * CkeSetup] setups for one DCT are all 0s and at least one of the setups,
3646 * F2x[1, 0]9C_x04[AddrCmdSetup, CsOdtSetup, CkeSetup], of the other
3650 u32 dev = pDCTstat->dev_dct;
3653 cha = pDCTstat->CH_ADDR_TMG[0] & 0x0202020;
3654 chb = pDCTstat->CH_ADDR_TMG[1] & 0x0202020;
3656 if ((cha != chb) && ((cha == 0) || (chb == 0))) {
3657 val = Get_NB32(dev, 0x78);
3658 val |= 1 << ChSetupSync;
3659 Set_NB32(dev, 0x78, val);
3663 static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct) {
3666 u32 reg_off = 0x100 * dct;
3667 u32 dev = pDCTstat->dev_dct;
3669 if (pDCTstat->LogicalCPUID & (AMD_DR_B2 | AMD_DR_B3)) {
3670 mct_Wait(10000); /* Wait 50 us*/
3671 val = Get_NB32(dev, 0x110);
3672 if (!(val & (1 << DramEnabled))) {
3673 /* If 50 us expires while DramEnable =0 then do the following */
3674 val = Get_NB32(dev, 0x90 + reg_off);
3675 val &= ~(1 << Width128); /* Program Width128 = 0 */
3676 Set_NB32(dev, 0x90 + reg_off, val);
3678 val = Get_NB32_index_wait(dev, 0x98 + reg_off, 0x05); /* Perform dummy CSR read to F2x09C_x05 */
3680 if (pDCTstat->GangedMode) {
3681 val = Get_NB32(dev, 0x90 + reg_off);
3682 val |= 1 << Width128; /* Program Width128 = 0 */
3683 Set_NB32(dev, 0x90 + reg_off, val);
3689 /* ==========================================================
3690 * 6-bit Bank Addressing Table
3693 * CCC=Columns-9 binary
3694 * ==========================================================
3695 * DCT CCCBRR Rows Banks Columns 64-bit CS Size
3697 * 0000 000000 13 2 9 128MB
3698 * 0001 001000 13 2 10 256MB
3699 * 0010 001001 14 2 10 512MB
3700 * 0011 010000 13 2 11 512MB
3701 * 0100 001100 13 3 10 512MB
3702 * 0101 001101 14 3 10 1GB
3703 * 0110 010001 14 2 11 1GB
3704 * 0111 001110 15 3 10 2GB
3705 * 1000 010101 14 3 11 2GB
3706 * 1001 010110 15 3 11 4GB
3707 * 1010 001111 16 3 10 4GB
3708 * 1011 010111 16 3 11 8GB
3710 u8 crcCheck(u8 smbaddr)
3717 byte_use = mctRead_SPD(smbaddr, SPD_ByteUse);
3718 if (byte_use & 0x80)
3724 for (Index = 0; Index < byte_use; Index ++) {
3725 byte = mctRead_SPD(smbaddr, Index);
3727 for (i=0; i<8; i++) {
3735 return CRC == (mctRead_SPD(smbaddr, SPD_byte_127) << 8 | mctRead_SPD(smbaddr, SPD_byte_126));