2 * This file is part of the coreboot project.
4 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 /* Description: Main memory controller system configuration for DDR 2 */
23 /* KNOWN ISSUES - ERRATA
25 * Trtp is not calculated correctly when the controller is in 64-bit mode, it
26 * is 1 busclock off. No fix planned. The controller is not ordinarily in
29 * 32 Byte burst not supported. No fix planned. The controller is not
30 * ordinarily in 64-bit mode.
32 * Trc precision does not use extra Jedec defined fractional component.
33 * InsteadTrc (course) is rounded up to nearest 1 ns.
35 * Mini and Micro DIMM not supported. Only RDIMM, UDIMM, SO-DIMM defined types
39 static u8 ReconfigureDIMMspare_D(struct MCTStatStruc *pMCTstat,
40 struct DCTStatStruc *pDCTstatA);
41 static void DQSTiming_D(struct MCTStatStruc *pMCTstat,
42 struct DCTStatStruc *pDCTstatA);
43 static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
44 struct DCTStatStruc *pDCTstatA);
45 static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
46 struct DCTStatStruc *pDCTstatA);
47 static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
48 struct DCTStatStruc *pDCTstatA);
49 static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
50 struct DCTStatStruc *pDCTstat);
51 static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
52 struct DCTStatStruc *pDCTstat);
53 static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
54 struct DCTStatStruc *pDCTstatA);
55 static u8 NodePresent_D(u8 Node);
56 static void SyncDCTsReady_D(struct MCTStatStruc *pMCTstat,
57 struct DCTStatStruc *pDCTstatA);
58 static void StartupDCT_D(struct MCTStatStruc *pMCTstat,
59 struct DCTStatStruc *pDCTstat, u8 dct);
60 static void ClearDCT_D(struct MCTStatStruc *pMCTstat,
61 struct DCTStatStruc *pDCTstat, u8 dct);
62 static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
63 struct DCTStatStruc *pDCTstat, u8 dct);
64 static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
65 struct DCTStatStruc *pDCTstat);
66 static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
67 struct DCTStatStruc *pDCTstat, u8 dct);
68 static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
69 struct DCTStatStruc *pDCTstat, u8 dct);
70 static u8 PlatformSpec_D(struct MCTStatStruc *pMCTstat,
71 struct DCTStatStruc *pDCTstat, u8 dct);
72 static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
73 struct DCTStatStruc *pDCTstat, u8 dct);
74 static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
75 struct DCTStatStruc *pDCTstat, u8 dct);
76 static u8 Get_DefTrc_k_D(u8 k);
77 static u16 Get_40Tk_D(u8 k);
78 static u16 Get_Fk_D(u8 k);
79 static u8 Dimm_Supports_D(struct DCTStatStruc *pDCTstat, u8 i, u8 j, u8 k);
80 static u8 Sys_Capability_D(struct MCTStatStruc *pMCTstat,
81 struct DCTStatStruc *pDCTstat, int j, int k);
82 static u8 Get_DIMMAddress_D(struct DCTStatStruc *pDCTstat, u8 i);
83 static void mct_initDCT(struct MCTStatStruc *pMCTstat,
84 struct DCTStatStruc *pDCTstat);
85 static void mct_DramInit(struct MCTStatStruc *pMCTstat,
86 struct DCTStatStruc *pDCTstat, u8 dct);
87 static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
88 struct DCTStatStruc *pDCTstat, u8 dct);
89 static void mct_SyncDCTsReady(struct DCTStatStruc *pDCTstat);
90 static void Get_Trdrd(struct MCTStatStruc *pMCTstat,
91 struct DCTStatStruc *pDCTstat, u8 dct);
92 static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
93 struct DCTStatStruc *pDCTstat, u8 dct);
94 static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,\
95 struct DCTStatStruc *pDCTstat, u8 dct);
96 static void mct_AfterStitchMemory(struct MCTStatStruc *pMCTstat,
97 struct DCTStatStruc *pDCTstat, u8 dct);
98 static u8 mct_DIMMPresence(struct MCTStatStruc *pMCTstat,
99 struct DCTStatStruc *pDCTstat, u8 dct);
100 static void Set_OtherTiming(struct MCTStatStruc *pMCTstat,
101 struct DCTStatStruc *pDCTstat, u8 dct);
102 static void Get_Twrwr(struct MCTStatStruc *pMCTstat,
103 struct DCTStatStruc *pDCTstat, u8 dct);
104 static void Get_Twrrd(struct MCTStatStruc *pMCTstat,
105 struct DCTStatStruc *pDCTstat, u8 dct);
106 static void Get_TrwtTO(struct MCTStatStruc *pMCTstat,
107 struct DCTStatStruc *pDCTstat, u8 dct);
108 static void Get_TrwtWB(struct MCTStatStruc *pMCTstat,
109 struct DCTStatStruc *pDCTstat);
110 static u8 Check_DqsRcvEn_Diff(struct DCTStatStruc *pDCTstat, u8 dct,
111 u32 dev, u32 index_reg, u32 index);
112 static u8 Get_DqsRcvEnGross_Diff(struct DCTStatStruc *pDCTstat,
113 u32 dev, u32 index_reg);
114 static u8 Get_WrDatGross_Diff(struct DCTStatStruc *pDCTstat, u8 dct,
115 u32 dev, u32 index_reg);
116 static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
117 u32 dev, u32 index_reg, u32 index);
118 static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
119 struct DCTStatStruc *pDCTstat);
120 static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat, u8 dct,
121 u32 dev, u32 index_reg, u32 index);
122 static void mct_InitialMCT_D(struct MCTStatStruc *pMCTstat,
123 struct DCTStatStruc *pDCTstat);
124 static void mct_init(struct MCTStatStruc *pMCTstat,
125 struct DCTStatStruc *pDCTstat);
126 static void clear_legacy_Mode(struct MCTStatStruc *pMCTstat,
127 struct DCTStatStruc *pDCTstat);
128 static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
129 struct DCTStatStruc *pDCTstatA);
130 static void SetCSTriState(struct MCTStatStruc *pMCTstat,
131 struct DCTStatStruc *pDCTstat, u8 dct);
132 static void SetODTTriState(struct MCTStatStruc *pMCTstat,
133 struct DCTStatStruc *pDCTstat, u8 dct);
134 static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
135 struct DCTStatStruc *pDCTstat, u8 dct);
136 static u32 mct_NodePresent_D(void);
137 static void WaitRoutine_D(u32 time);
138 static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
139 struct DCTStatStruc *pDCTstatA);
140 static void mct_ResetDataStruct_D(struct MCTStatStruc *pMCTstat,
141 struct DCTStatStruc *pDCTstatA);
142 static void mct_EarlyArbEn_D(struct MCTStatStruc *pMCTstat,
143 struct DCTStatStruc *pDCTstat);
144 static void mct_BeforeDramInit_Prod_D(struct MCTStatStruc *pMCTstat,
145 struct DCTStatStruc *pDCTstat);
146 void mct_ClrClToNB_D(struct MCTStatStruc *pMCTstat,
147 struct DCTStatStruc *pDCTstat);
148 static u8 CheckNBCOFEarlyArbEn(struct MCTStatStruc *pMCTstat,
149 struct DCTStatStruc *pDCTstat);
150 void mct_ClrWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
151 struct DCTStatStruc *pDCTstat);
152 static void mct_BeforeDQSTrain_D(struct MCTStatStruc *pMCTstat,
153 struct DCTStatStruc *pDCTstatA);
154 static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct);
155 static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat,
156 struct DCTStatStruc *pDCTstat, u8 dct);
159 /*See mctAutoInitMCT header for index relationships to CL and T*/
160 static const u16 Table_F_k[] = {00,200,266,333,400,533 };
161 static const u8 Table_T_k[] = {0x00,0x50,0x3D,0x30,0x25, 0x18 };
162 static const u8 Table_CL2_j[] = {0x04,0x08,0x10,0x20,0x40, 0x80 };
163 static const u8 Tab_defTrc_k[] = {0x0,0x41,0x3C,0x3C,0x3A, 0x3A };
164 static const u16 Tab_40T_k[] = {00,200,150,120,100,75 };
165 static const u8 Tab_TrefT_k[] = {00,0,1,1,2,2,3,4,5,6,0,0};
166 static const u8 Tab_BankAddr[] = {0x0,0x08,0x09,0x10,0x0C,0x0D,0x11,0x0E,0x15,0x16,0x0F,0x17};
167 static const u8 Tab_tCL_j[] = {0,2,3,4,5};
168 static const u8 Tab_1KTfawT_k[] = {00,8,10,13,14,20};
169 static const u8 Tab_2KTfawT_k[] = {00,10,14,17,18,24};
170 static const u8 Tab_L1CLKDis[] = {8,8,6,4,2,0,8,8};
171 static const u8 Tab_M2CLKDis[] = {2,0,8,8,2,0,2,0};
172 static const u8 Tab_S1CLKDis[] = {8,0,8,8,8,0,8,0};
173 static const u8 Table_Comp_Rise_Slew_20x[] = {7, 3, 2, 2, 0xFF};
174 static const u8 Table_Comp_Rise_Slew_15x[] = {7, 7, 3, 2, 0xFF};
175 static const u8 Table_Comp_Fall_Slew_20x[] = {7, 5, 3, 2, 0xFF};
176 static const u8 Table_Comp_Fall_Slew_15x[] = {7, 7, 5, 3, 0xFF};
178 static void mctAutoInitMCT_D(struct MCTStatStruc *pMCTstat,
179 struct DCTStatStruc *pDCTstatA)
182 * Memory may be mapped contiguously all the way up to 4GB (depending
183 * on setup options). It is the responsibility of PCI subsystem to
184 * create an uncacheable IO region below 4GB and to adjust TOP_MEM
185 * downward prior to any IO mapping or accesses. It is the same
186 * responsibility of the CPU sub-system prior toaccessing LAPIC.
188 * Slot Number is an external convention, and is determined by OEM with
189 * accompanying silk screening. OEM may choose to use Slot number
190 * convention which is consistent with DIMM number conventions.
191 * All AMD engineering
194 * Run-Time Requirements:
195 * 1. Complete Hypertransport Bus Configuration
196 * 2. SMBus Controller Initialized
197 * 3. Checksummed or Valid NVRAM bits
198 * 4. MCG_CTL=-1, MC4_CTL_EN=0 for all CPUs
199 * 5. MCi_STS from shutdown/warm reset recorded (if desired) prior to
201 * 6. All var MTRRs reset to zero
202 * 7. State of NB_CFG.DisDatMsk set properly on all CPUs
203 * 8. All CPUs at 2Ghz Speed (unless DQS training is not installed).
204 * 9. All cHT links at max Speed/Width (unless DQS training is not
208 * Global relationship between index values and item values:
210 * --------------------------
222 mctInitMemGPIOs_A_D(); /* Set any required GPIOs*/
225 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
226 struct DCTStatStruc *pDCTstat;
227 pDCTstat = pDCTstatA + Node;
228 pDCTstat->Node_ID = Node;
229 pDCTstat->dev_host = PA_HOST(Node);
230 pDCTstat->dev_map = PA_MAP(Node);
231 pDCTstat->dev_dct = PA_DCT(Node);
232 pDCTstat->dev_nbmisc = PA_NBMISC(Node);
233 pDCTstat->NodeSysBase = node_sys_base;
235 print_tx("mctAutoInitMCT_D: mct_init Node ", Node);
236 mct_init(pMCTstat, pDCTstat);
237 mctNodeIDDebugPort_D();
238 pDCTstat->NodePresent = NodePresent_D(Node);
239 if (pDCTstat->NodePresent) { /* See if Node is there*/
240 print_t("mctAutoInitMCT_D: clear_legacy_Mode\n");
241 clear_legacy_Mode(pMCTstat, pDCTstat);
242 pDCTstat->LogicalCPUID = mctGetLogicalCPUID_D(Node);
244 print_t("mctAutoInitMCT_D: mct_InitialMCT_D\n");
245 mct_InitialMCT_D(pMCTstat, pDCTstat);
247 print_t("mctAutoInitMCT_D: mctSMBhub_Init\n");
248 mctSMBhub_Init(Node); /* Switch SMBUS crossbar to proper node*/
250 print_t("mctAutoInitMCT_D: mct_initDCT\n");
251 mct_initDCT(pMCTstat, pDCTstat);
252 if (pDCTstat->ErrCode == SC_FatalErr) {
253 goto fatalexit; /* any fatal errors?*/
254 } else if (pDCTstat->ErrCode < SC_StopError) {
257 } /* if Node present */
258 node_sys_base = pDCTstat->NodeSysBase;
259 node_sys_base += (pDCTstat->NodeSysLimit + 2) & ~0x0F;
261 if (NodesWmem == 0) {
262 print_debug("No Nodes?!\n");
266 print_t("mctAutoInitMCT_D: SyncDCTsReady_D\n");
267 SyncDCTsReady_D(pMCTstat, pDCTstatA); /* Make sure DCTs are ready for accesses.*/
269 print_t("mctAutoInitMCT_D: HTMemMapInit_D\n");
270 HTMemMapInit_D(pMCTstat, pDCTstatA); /* Map local memory into system address space.*/
273 print_t("mctAutoInitMCT_D: CPUMemTyping_D\n");
274 CPUMemTyping_D(pMCTstat, pDCTstatA); /* Map dram into WB/UC CPU cacheability */
275 mctHookAfterCPU(); /* Setup external northbridge(s) */
277 print_t("mctAutoInitMCT_D: DQSTiming_D\n");
278 DQSTiming_D(pMCTstat, pDCTstatA); /* Get Receiver Enable and DQS signal timing*/
280 print_t("mctAutoInitMCT_D: UMAMemTyping_D\n");
281 UMAMemTyping_D(pMCTstat, pDCTstatA); /* Fix up for UMA sizing */
283 print_t("mctAutoInitMCT_D: :OtherTiming\n");
284 mct_OtherTiming(pMCTstat, pDCTstatA);
286 if (ReconfigureDIMMspare_D(pMCTstat, pDCTstatA)) { /* RESET# if 1st pass of DIMM spare enabled*/
290 InterleaveNodes_D(pMCTstat, pDCTstatA);
291 InterleaveChannels_D(pMCTstat, pDCTstatA);
293 print_t("mctAutoInitMCT_D: ECCInit_D\n");
294 if (ECCInit_D(pMCTstat, pDCTstatA)) { /* Setup ECC control and ECC check-bits*/
295 print_t("mctAutoInitMCT_D: MCTMemClr_D\n");
296 MCTMemClr_D(pMCTstat,pDCTstatA);
299 mct_FinalMCT_D(pMCTstat, (pDCTstatA + 0) ); // Node 0
300 print_t("All Done\n");
304 die("mct_d: fatalexit");
308 static u8 ReconfigureDIMMspare_D(struct MCTStatStruc *pMCTstat,
309 struct DCTStatStruc *pDCTstatA)
313 if (mctGet_NVbits(NV_CS_SpareCTL)) {
314 if (MCT_DIMM_SPARE_NO_WARM) {
315 /* Do no warm-reset DIMM spare */
316 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
317 LoadDQSSigTmgRegs_D(pMCTstat, pDCTstatA);
320 mct_ResetDataStruct_D(pMCTstat, pDCTstatA);
321 pMCTstat->GStatus |= 1 << GSB_EnDIMMSpareNW;
325 /* Do warm-reset DIMM spare */
326 if (mctGet_NVbits(NV_DQSTrainCTL))
340 static void DQSTiming_D(struct MCTStatStruc *pMCTstat,
341 struct DCTStatStruc *pDCTstatA)
345 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
348 nv_DQSTrainCTL = mctGet_NVbits(NV_DQSTrainCTL);
349 /* FIXME: BOZO- DQS training every time*/
352 print_t("DQSTiming_D: mct_BeforeDQSTrain_D:\n");
353 mct_BeforeDQSTrain_D(pMCTstat, pDCTstatA);
354 phyAssistedMemFnceTraining(pMCTstat, pDCTstatA);
356 if (nv_DQSTrainCTL) {
357 mctHookBeforeAnyTraining(pMCTstat, pDCTstatA);
359 print_t("DQSTiming_D: TrainReceiverEn_D FirstPass:\n");
360 TrainReceiverEn_D(pMCTstat, pDCTstatA, FirstPass);
362 print_t("DQSTiming_D: mct_TrainDQSPos_D\n");
363 mct_TrainDQSPos_D(pMCTstat, pDCTstatA);
365 // Second Pass never used for Barcelona!
366 //print_t("DQSTiming_D: TrainReceiverEn_D SecondPass:\n");
367 //TrainReceiverEn_D(pMCTstat, pDCTstatA, SecondPass);
369 print_t("DQSTiming_D: mctSetEccDQSRcvrEn_D\n");
370 mctSetEccDQSRcvrEn_D(pMCTstat, pDCTstatA);
372 print_t("DQSTiming_D: TrainMaxReadLatency_D\n");
373 //FIXME - currently uses calculated value TrainMaxReadLatency_D(pMCTstat, pDCTstatA);
374 mctHookAfterAnyTraining();
375 mctSaveDQSSigTmg_D();
377 print_t("DQSTiming_D: mct_EndDQSTraining_D\n");
378 mct_EndDQSTraining_D(pMCTstat, pDCTstatA);
380 print_t("DQSTiming_D: MCTMemClr_D\n");
381 MCTMemClr_D(pMCTstat, pDCTstatA);
383 mctGetDQSSigTmg_D(); /* get values into data structure */
384 LoadDQSSigTmgRegs_D(pMCTstat, pDCTstatA); /* load values into registers.*/
385 //mctDoWarmResetMemClr_D();
386 MCTMemClr_D(pMCTstat, pDCTstatA);
391 static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
392 struct DCTStatStruc *pDCTstatA)
394 u8 Node, Receiver, Channel, Dir, DIMM;
402 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
403 struct DCTStatStruc *pDCTstat;
404 pDCTstat = pDCTstatA + Node;
406 if (pDCTstat->DCTSysLimit) {
407 dev = pDCTstat->dev_dct;
408 for (Channel = 0;Channel < 2; Channel++) {
409 /* there are four receiver pairs,
410 loosely associated with chipselects.*/
411 index_reg = 0x98 + Channel * 0x100;
412 for (Receiver = 0; Receiver < 8; Receiver += 2) {
413 /* Set Receiver Enable Values */
414 mct_SetRcvrEnDly_D(pDCTstat,
416 1, /* FinalValue, From stack */
420 (Receiver >> 1) * 3 + 0x10, /* Addl_Index */
421 2); /* Pass Second Pass ? */
425 for (Channel = 0; Channel<2; Channel++) {
426 SetEccDQSRcvrEn_D(pDCTstat, Channel);
429 for (Channel = 0; Channel < 2; Channel++) {
431 index_reg = 0x98 + Channel * 0x100;
434 * when 400, 533, 667, it will support dimm0/1/2/3,
435 * and set conf for dimm0, hw will copy to dimm1/2/3
436 * set for dimm1, hw will copy to dimm3
437 * Rev A/B only support DIMM0/1 when 800Mhz and above
438 * + 0x100 to next dimm
439 * Rev C support DIMM0/1/2/3 when 800Mhz and above
440 * + 0x100 to next dimm
442 for (DIMM = 0; DIMM < 2; DIMM++) {
444 index = 0; /* CHA Write Data Timing Low */
446 if (pDCTstat->Speed >= 4) {
447 index = 0x100 * DIMM;
452 for (Dir=0;Dir<2;Dir++) {//RD/WR
453 p = pDCTstat->CH_D_DIR_B_DQS[Channel][DIMM][Dir];
454 val = stream_to_int(p); /* CHA Read Data Timing High */
455 Set_NB32_index_wait(dev, index_reg, index+1, val);
456 val = stream_to_int(p+4); /* CHA Write Data Timing High */
457 Set_NB32_index_wait(dev, index_reg, index+2, val);
458 val = *(p+8); /* CHA Write ECC Timing */
459 Set_NB32_index_wait(dev, index_reg, index+3, val);
465 for (Channel = 0; Channel<2; Channel++) {
466 reg = 0x78 + Channel * 0x100;
467 val = Get_NB32(dev, reg);
469 val |= ((u32) pDCTstat->CH_MaxRdLat[Channel] << 22);
470 val &= ~(1<<DqsRcvEnTrain);
471 Set_NB32(dev, reg, val); /* program MaxRdLatency to correspond with current delay*/
478 static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
479 struct DCTStatStruc *pDCTstatA);
480 static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
481 struct DCTStatStruc *pDCTstatA)
483 /* Clear MC4_STS for all Nodes in the system. This is required in some
484 * circumstances to clear left over garbage from cold reset, shutdown,
485 * or normal ECC memory conditioning.
488 //FIXME: this function depends on pDCTstat Array ( with Node id ) - Is this really a problem?
493 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
494 struct DCTStatStruc *pDCTstat;
495 pDCTstat = pDCTstatA + Node;
497 if (pDCTstat->NodePresent) {
498 dev = pDCTstat->dev_nbmisc;
499 /*MCA NB Status Low (alias to MC4_STS[31:0] */
500 Set_NB32(dev, 0x48, 0);
501 /* MCA NB Status High (alias to MC4_STS[63:32] */
502 Set_NB32(dev, 0x4C, 0);
508 static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
509 struct DCTStatStruc *pDCTstatA)
512 u32 NextBase, BottomIO;
513 u8 _MemHoleRemap, DramHoleBase, DramHoleOffset;
514 u32 HoleSize, DramSelBaseAddr;
520 struct DCTStatStruc *pDCTstat;
522 _MemHoleRemap = mctGet_NVbits(NV_MemHole);
524 if (pMCTstat->HoleBase == 0) {
525 DramHoleBase = mctGet_NVbits(NV_BottomIO);
527 DramHoleBase = pMCTstat->HoleBase >> (24-8);
530 BottomIO = DramHoleBase << (24-8);
533 pDCTstat = pDCTstatA + 0;
534 dev = pDCTstat->dev_map;
537 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
538 pDCTstat = pDCTstatA + Node;
539 devx = pDCTstat->dev_map;
541 pDCTstat = pDCTstatA + Node;
542 if (!pDCTstat->GangedMode) {
543 DramSelBaseAddr = pDCTstat->NodeSysLimit - pDCTstat->DCTSysLimit;
544 /*In unganged mode, we must add DCT0 and DCT1 to DCTSysLimit */
545 val = pDCTstat->NodeSysLimit;
546 if ((val & 0xFF) == 0xFE) {
550 pDCTstat->DCTSysLimit = val;
553 base = pDCTstat->DCTSysBase;
554 limit = pDCTstat->DCTSysLimit;
558 DramSelBaseAddr += NextBase;
559 printk(BIOS_DEBUG, " Node: %02x base: %02x limit: %02x BottomIO: %02x\n", Node, base, limit, BottomIO);
562 if ((base < BottomIO) && (limit >= BottomIO)) {
564 pDCTstat->Status |= 1 << SB_HWHole;
565 pMCTstat->GStatus |= 1 << GSB_HWHole;
566 pDCTstat->DCTSysBase = base;
567 pDCTstat->DCTSysLimit = limit;
568 pDCTstat->DCTHoleBase = BottomIO;
569 pMCTstat->HoleBase = BottomIO;
570 HoleSize = _4GB_RJ8 - BottomIO; /* HoleSize[39:8] */
571 if ((DramSelBaseAddr > 0) && (DramSelBaseAddr < BottomIO))
572 base = DramSelBaseAddr;
573 val = ((base + HoleSize) >> (24-8)) & 0xFF;
574 DramHoleOffset = val;
575 val <<= 8; /* shl 16, rol 24 */
576 val |= DramHoleBase << 24;
577 val |= 1 << DramHoleValid;
578 Set_NB32(devx, 0xF0, val); /* Dram Hole Address Reg */
579 pDCTstat->DCTSysLimit += HoleSize;
580 base = pDCTstat->DCTSysBase;
581 limit = pDCTstat->DCTSysLimit;
582 } else if (base == BottomIO) {
584 pMCTstat->GStatus |= 1<<GSB_SpIntRemapHole;
585 pDCTstat->Status |= 1<<SB_SWNodeHole;
586 pMCTstat->GStatus |= 1<<GSB_SoftHole;
587 pMCTstat->HoleBase = base;
591 pDCTstat->DCTSysBase = base;
592 pDCTstat->DCTSysLimit = limit;
594 /* No Remapping. Normal Contiguous mapping */
595 pDCTstat->DCTSysBase = base;
596 pDCTstat->DCTSysLimit = limit;
599 /*No Remapping. Normal Contiguous mapping*/
600 pDCTstat->DCTSysBase = base;
601 pDCTstat->DCTSysLimit = limit;
603 base |= 3; /* set WE,RE fields*/
604 pMCTstat->SysLimit = limit;
606 Set_NB32(dev, 0x40 + (Node << 3), base); /* [Node] + Dram Base 0 */
608 /* if Node limit > 1GB then set it to 1GB boundary for each node */
609 if ((mctSetNodeBoundary_D()) && (limit > 0x00400000)) {
614 val = limit & 0xFFFF0000;
616 Set_NB32(dev, 0x44 + (Node << 3), val); /* set DstNode */
618 limit = pDCTstat->DCTSysLimit;
620 NextBase = (limit & 0xFFFF0000) + 0x10000;
621 if ((mctSetNodeBoundary_D()) && (NextBase > 0x00400000)) {
623 NextBase &= 0xFFC00000;
629 /* Copy dram map from Node 0 to Node 1-7 */
630 for (Node = 1; Node < MAX_NODES_SUPPORTED; Node++) {
632 pDCTstat = pDCTstatA + Node;
633 devx = pDCTstat->dev_map;
635 if (pDCTstat->NodePresent) {
636 printk(BIOS_DEBUG, " Copy dram map from Node 0 to Node %02x \n", Node);
637 reg = 0x40; /*Dram Base 0*/
639 val = Get_NB32(dev, reg);
640 Set_NB32(devx, reg, val);
642 } while ( reg < 0x80);
644 break; /* stop at first absent Node */
648 /*Copy dram map to F1x120/124*/
649 mct_HTMemMapExt(pMCTstat, pDCTstatA);
653 static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
654 struct DCTStatStruc *pDCTstatA)
657 /* Initiates a memory clear operation for all node. The mem clr
658 * is done in paralel. After the memclr is complete, all processors
659 * status are checked to ensure that memclr has completed.
662 struct DCTStatStruc *pDCTstat;
664 if (!mctGet_NVbits(NV_DQSTrainCTL)){
665 // FIXME: callback to wrapper: mctDoWarmResetMemClr_D
666 } else { // NV_DQSTrainCTL == 1
667 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
668 pDCTstat = pDCTstatA + Node;
670 if (pDCTstat->NodePresent) {
671 DCTMemClr_Init_D(pMCTstat, pDCTstat);
674 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
675 pDCTstat = pDCTstatA + Node;
677 if (pDCTstat->NodePresent) {
678 DCTMemClr_Sync_D(pMCTstat, pDCTstat);
685 static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
686 struct DCTStatStruc *pDCTstat)
692 /* Initiates a memory clear operation on one node */
693 if (pDCTstat->DCTSysLimit) {
694 dev = pDCTstat->dev_dct;
698 val = Get_NB32(dev, reg);
699 } while (val & (1 << MemClrBusy));
701 val |= (1 << MemClrInit);
702 Set_NB32(dev, reg, val);
708 static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
709 struct DCTStatStruc *pDCTstatA)
711 /* Ensures that memory clear has completed on all node.*/
713 struct DCTStatStruc *pDCTstat;
715 if (!mctGet_NVbits(NV_DQSTrainCTL)){
716 // callback to wrapper: mctDoWarmResetMemClr_D
717 } else { // NV_DQSTrainCTL == 1
718 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
719 pDCTstat = pDCTstatA + Node;
721 if (pDCTstat->NodePresent) {
722 DCTMemClr_Sync_D(pMCTstat, pDCTstat);
729 static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
730 struct DCTStatStruc *pDCTstat)
733 u32 dev = pDCTstat->dev_dct;
736 /* Ensure that a memory clear operation has completed on one node */
737 if (pDCTstat->DCTSysLimit){
741 val = Get_NB32(dev, reg);
742 } while (val & (1 << MemClrBusy));
745 val = Get_NB32(dev, reg);
746 } while (!(val & (1 << Dr_MemClrStatus)));
749 val = 0x0FE40FC0; // BKDG recommended
750 val |= MCCH_FlushWrOnStpGnt; // Set for S3
751 Set_NB32(dev, 0x11C, val);
755 static u8 NodePresent_D(u8 Node)
758 * Determine if a single Hammer Node exists within the network.
766 dev = PA_HOST(Node); /*test device/vendor id at host bridge */
767 val = Get_NB32(dev, 0);
768 dword = mct_NodePresent_D(); /* FIXME: BOZO -11001022h rev for F */
769 if (val == dword) { /* AMD Hammer Family CPU HT Configuration */
770 if (oemNodePresent_D(Node, &ret))
772 /* Node ID register */
773 val = Get_NB32(dev, 0x60);
776 if (val == dword) /* current nodeID = requested nodeID ? */
786 static void DCTInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 dct)
789 * Initialize DRAM on single Athlon 64/Opteron Node.
795 ClearDCT_D(pMCTstat, pDCTstat, dct);
796 stopDCTflag = 1; /*preload flag with 'disable' */
797 if (mct_DIMMPresence(pMCTstat, pDCTstat, dct) < SC_StopError) {
798 print_t("\t\tDCTInit_D: mct_DIMMPresence Done\n");
799 if (mct_SPDCalcWidth(pMCTstat, pDCTstat, dct) < SC_StopError) {
800 print_t("\t\tDCTInit_D: mct_SPDCalcWidth Done\n");
801 if (AutoCycTiming_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
802 print_t("\t\tDCTInit_D: AutoCycTiming_D Done\n");
803 if (AutoConfig_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
804 print_t("\t\tDCTInit_D: AutoConfig_D Done\n");
805 if (PlatformSpec_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
806 print_t("\t\tDCTInit_D: PlatformSpec_D Done\n");
808 if (!(pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW))) {
809 print_t("\t\tDCTInit_D: StartupDCT_D\n");
810 StartupDCT_D(pMCTstat, pDCTstat, dct); /*yeaahhh! */
818 u32 reg_off = dct * 0x100;
819 val = 1<<DisDramInterface;
820 Set_NB32(pDCTstat->dev_dct, reg_off+0x94, val);
821 /*To maximize power savings when DisDramInterface=1b,
822 all of the MemClkDis bits should also be set.*/
824 Set_NB32(pDCTstat->dev_dct, reg_off+0x88, val);
829 static void SyncDCTsReady_D(struct MCTStatStruc *pMCTstat,
830 struct DCTStatStruc *pDCTstatA)
832 /* Wait (and block further access to dram) for all DCTs to be ready,
833 * by polling all InitDram bits and waiting for possible memory clear
834 * operations to be complete. Read MemClkFreqVal bit to see if
835 * the DIMMs are present in this node.
840 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
841 struct DCTStatStruc *pDCTstat;
842 pDCTstat = pDCTstatA + Node;
843 mct_SyncDCTsReady(pDCTstat);
848 static void StartupDCT_D(struct MCTStatStruc *pMCTstat,
849 struct DCTStatStruc *pDCTstat, u8 dct)
851 /* Read MemClkFreqVal bit to see if the DIMMs are present in this node.
852 * If the DIMMs are present then set the DRAM Enable bit for this node.
854 * Setting dram init starts up the DCT state machine, initializes the
855 * dram devices with MRS commands, and kicks off any
856 * HW memory clear process that the chip is capable of. The sooner
857 * that dram init is set for all nodes, the faster the memory system
858 * initialization can complete. Thus, the init loop is unrolled into
859 * two loops so as to start the processeses for non BSP nodes sooner.
860 * This procedure will not wait for the process to finish.
861 * Synchronization is handled elsewhere.
868 u32 reg_off = dct * 0x100;
870 dev = pDCTstat->dev_dct;
871 val = Get_NB32(dev, 0x94 + reg_off);
872 if (val & (1<<MemClkFreqVal)) {
873 print_t("\t\t\tStartupDCT_D: MemClkFreqVal\n");
874 byte = mctGet_NVbits(NV_DQSTrainCTL);
876 /* Enable DQSRcvEn training mode */
877 print_t("\t\t\tStartupDCT_D: DqsRcvEnTrain set \n");
878 reg = 0x78 + reg_off;
879 val = Get_NB32(dev, reg);
880 /* Setting this bit forces a 1T window with hard left
881 * pass/fail edge and a probabalistic right pass/fail
882 * edge. LEFT edge is referenced for final
883 * receiver enable position.*/
884 val |= 1 << DqsRcvEnTrain;
885 Set_NB32(dev, reg, val);
887 mctHookBeforeDramInit(); /* generalized Hook */
888 print_t("\t\t\tStartupDCT_D: DramInit \n");
889 mct_DramInit(pMCTstat, pDCTstat, dct);
890 AfterDramInit_D(pDCTstat, dct);
891 mctHookAfterDramInit(); /* generalized Hook*/
896 static void ClearDCT_D(struct MCTStatStruc *pMCTstat,
897 struct DCTStatStruc *pDCTstat, u8 dct)
900 u32 dev = pDCTstat->dev_dct;
901 u32 reg = 0x40 + 0x100 * dct;
904 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
905 reg_end = 0x78 + 0x100 * dct;
907 reg_end = 0xA4 + 0x100 * dct;
910 while(reg < reg_end) {
911 Set_NB32(dev, reg, val);
916 dev = pDCTstat->dev_map;
918 Set_NB32(dev, reg, val);
922 static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
923 struct DCTStatStruc *pDCTstat, u8 dct)
925 /* Initialize DCT Timing registers as per DIMM SPD.
926 * For primary timing (T, CL) use best case T value.
927 * For secondary timing params., use most aggressive settings
930 * There are three components to determining "maximum frequency":
931 * SPD component, Bus load component, and "Preset" max frequency
934 * The SPD component is a function of the min cycle time specified
935 * by each DIMM, and the interaction of cycle times from all DIMMs
936 * in conjunction with CAS latency. The SPD component only applies
937 * when user timing mode is 'Auto'.
939 * The Bus load component is a limiting factor determined by electrical
940 * characteristics on the bus as a result of varying number of device
941 * loads. The Bus load component is specific to each platform but may
942 * also be a function of other factors. The bus load component only
943 * applies when user timing mode is 'Auto'.
945 * The Preset component is subdivided into three items and is the
946 * minimum of the set: Silicon revision, user limit setting when user
947 * timing mode is 'Auto' and memclock mode is 'Limit', OEM build
948 * specification of the maximum frequency. The Preset component is only
949 * applies when user timing mode is 'Auto'.
954 u8 Trp, Trrd, Trcd, Tras, Trc, Trfc[4], Rows;
955 u32 DramTimingLo, DramTimingHi;
968 /* Get primary timing (CAS Latency and Cycle Time) */
969 if (pDCTstat->Speed == 0) {
970 mctGet_MaxLoadFreq(pDCTstat);
972 /* and Factor in presets (setup options, Si cap, etc.) */
973 GetPresetmaxF_D(pMCTstat, pDCTstat);
975 /* Go get best T and CL as specified by DIMM mfgs. and OEM */
976 SPDGetTCL_D(pMCTstat, pDCTstat, dct);
977 /* skip callback mctForce800to1067_D */
978 pDCTstat->Speed = pDCTstat->DIMMAutoSpeed;
979 pDCTstat->CASL = pDCTstat->DIMMCASL;
981 /* if "manual" memclock mode */
982 if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 2)
983 pDCTstat->Speed = mctGet_NVbits(NV_MemCkVal) + 1;
986 mct_AfterGetCLT(pMCTstat, pDCTstat, dct);
988 /* Gather all DIMM mini-max values for cycle timing data */
998 for (i=0; i < 4; i++)
1001 for ( i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
1003 if (pDCTstat->DIMMValid & (1 << i)) {
1004 smbaddr = Get_DIMMAddress_D(pDCTstat, dct + i);
1005 byte = mctRead_SPD(smbaddr, SPD_ROWSZ);
1007 Rows = byte; /* keep track of largest row sz */
1009 byte = mctRead_SPD(smbaddr, SPD_TRP);
1013 byte = mctRead_SPD(smbaddr, SPD_TRRD);
1017 byte = mctRead_SPD(smbaddr, SPD_TRCD);
1021 byte = mctRead_SPD(smbaddr, SPD_TRTP);
1025 byte = mctRead_SPD(smbaddr, SPD_TWR);
1029 byte = mctRead_SPD(smbaddr, SPD_TWTR);
1033 val = mctRead_SPD(smbaddr, SPD_TRC);
1034 if ((val == 0) || (val == 0xFF)) {
1035 pDCTstat->ErrStatus |= 1<<SB_NoTrcTrfc;
1036 pDCTstat->ErrCode = SC_VarianceErr;
1037 val = Get_DefTrc_k_D(pDCTstat->Speed);
1039 byte = mctRead_SPD(smbaddr, SPD_TRCRFC);
1041 val++; /* round up in case fractional extention is non-zero.*/
1047 /* dev density=rank size/#devs per rank */
1048 byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
1050 val = ((byte >> 5) | (byte << 3)) & 0xFF;
1053 byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE; /* dev density=2^(rows+columns+banks) */
1056 } else if (byte == 8) {
1058 } else if (byte == 16) {
1064 if (Trfc[LDIMM] < byte)
1067 byte = mctRead_SPD(smbaddr, SPD_TRAS);
1070 } /* Dimm Present */
1073 /* Convert DRAM CycleTiming values and store into DCT structure */
1075 byte = pDCTstat->Speed;
1078 Tk40 = Get_40Tk_D(byte);
1082 1. All secondary time values given in SPDs are in binary with units of ns.
1083 2. Some time values are scaled by four, in order to have least count of 0.25 ns
1084 (more accuracy). JEDEC SPD spec. shows which ones are x1 and x4.
1085 3. Internally to this SW, cycle time, Tk, is scaled by 10 to affect a
1086 least count of 0.1 ns (more accuracy).
1087 4. SPD values not scaled are multiplied by 10 and then divided by 10T to find
1088 equivalent minimum number of bus clocks (a remainder causes round-up of clocks).
1089 5. SPD values that are prescaled by 4 are multiplied by 10 and then divided by 40T to find
1090 equivalent minimum number of bus clocks (a remainder causes round-up of clocks).*/
1094 pDCTstat->DIMMTras = (u16)dword;
1096 if (dword % Tk40) { /* round up number of busclocks */
1100 if (val < Min_TrasT_1066)
1101 val = Min_TrasT_1066;
1102 else if (val > Max_TrasT_1066)
1103 val = Max_TrasT_1066;
1105 if (val < Min_TrasT)
1107 else if (val > Max_TrasT)
1110 pDCTstat->Tras = val;
1114 pDCTstat->DIMMTrp = dword;
1116 if (dword % Tk40) { /* round up number of busclocks */
1120 if (val < Min_TrasT_1066)
1121 val = Min_TrpT_1066;
1122 else if (val > Max_TrpT_1066)
1123 val = Max_TrpT_1066;
1127 else if (val > Max_TrpT)
1130 pDCTstat->Trp = val;
1134 pDCTstat->DIMMTrrd = dword;
1136 if (dword % Tk40) { /* round up number of busclocks */
1140 if (val < Min_TrrdT_1066)
1141 val = Min_TrrdT_1066;
1142 else if (val > Max_TrrdT_1066)
1143 val = Max_TrrdT_1066;
1145 if (val < Min_TrrdT)
1147 else if (val > Max_TrrdT)
1150 pDCTstat->Trrd = val;
1154 pDCTstat->DIMMTrcd = dword;
1156 if (dword % Tk40) { /* round up number of busclocks */
1160 if (val < Min_TrcdT_1066)
1161 val = Min_TrcdT_1066;
1162 else if (val > Max_TrcdT_1066)
1163 val = Max_TrcdT_1066;
1165 if (val < Min_TrcdT)
1167 else if (val > Max_TrcdT)
1170 pDCTstat->Trcd = val;
1174 pDCTstat->DIMMTrc = dword;
1176 if (dword % Tk40) { /* round up number of busclocks */
1180 if (val < Min_TrcT_1066)
1181 val = Min_TrcT_1066;
1182 else if (val > Max_TrcT_1066)
1183 val = Max_TrcT_1066;
1187 else if (val > Max_TrcT)
1190 pDCTstat->Trc = val;
1194 pDCTstat->DIMMTrtp = dword;
1195 val = pDCTstat->Speed;
1196 if (val <= 2) { /* 7.75ns / Speed in ns to get clock # */
1197 val = 2; /* for DDR400/DDR533 */
1198 } else { /* Note a speed of 3 will be a Trtp of 3 */
1199 val = 3; /* for DDR667/DDR800/DDR1066 */
1201 pDCTstat->Trtp = val;
1205 pDCTstat->DIMMTwr = dword;
1207 if (dword % Tk40) { /* round up number of busclocks */
1211 if (val < Min_TwrT_1066)
1212 val = Min_TwrT_1066;
1213 else if (val > Max_TwrT_1066)
1214 val = Max_TwrT_1066;
1218 else if (val > Max_TwrT)
1221 pDCTstat->Twr = val;
1225 pDCTstat->DIMMTwtr = dword;
1227 if (dword % Tk40) { /* round up number of busclocks */
1231 if (val < Min_TwrT_1066)
1232 val = Min_TwtrT_1066;
1233 else if (val > Max_TwtrT_1066)
1234 val = Max_TwtrT_1066;
1236 if (val < Min_TwtrT)
1238 else if (val > Max_TwtrT)
1241 pDCTstat->Twtr = val;
1246 pDCTstat->Trfc[i] = Trfc[i];
1248 mctAdjustAutoCycTmg_D();
1250 /* Program DRAM Timing values */
1251 DramTimingLo = 0; /* Dram Timing Low init */
1252 val = pDCTstat->CASL;
1253 val = Tab_tCL_j[val];
1254 DramTimingLo |= val;
1256 val = pDCTstat->Trcd;
1258 val -= Bias_TrcdT_1066;
1262 DramTimingLo |= val<<4;
1264 val = pDCTstat->Trp;
1266 val -= Bias_TrpT_1066;
1271 DramTimingLo |= val<<7;
1273 val = pDCTstat->Trtp;
1275 DramTimingLo |= val<<11;
1277 val = pDCTstat->Tras;
1279 val -= Bias_TrasT_1066;
1282 DramTimingLo |= val<<12;
1284 val = pDCTstat->Trc;
1286 DramTimingLo |= val<<16;
1289 val = pDCTstat->Twr;
1291 DramTimingLo |= val<<20;
1294 val = pDCTstat->Trrd;
1296 val -= Bias_TrrdT_1066;
1299 DramTimingLo |= val<<22;
1302 DramTimingHi = 0; /* Dram Timing Low init */
1303 val = pDCTstat->Twtr;
1305 val -= Bias_TwtrT_1066;
1308 DramTimingHi |= val<<8;
1311 DramTimingHi |= val<<16;
1318 DramTimingHi |= val << 20;
1321 dev = pDCTstat->dev_dct;
1322 reg_off = 0x100 * dct;
1323 print_tx("AutoCycTiming: DramTimingLo ", DramTimingLo);
1324 print_tx("AutoCycTiming: DramTimingHi ", DramTimingHi);
1326 Set_NB32(dev, 0x88 + reg_off, DramTimingLo); /*DCT Timing Low*/
1327 DramTimingHi |=0x0000FC77;
1328 Set_NB32(dev, 0x8c + reg_off, DramTimingHi); /*DCT Timing Hi*/
1332 dword = pDCTstat->Twr;
1333 dword -= Bias_TwrT_1066;
1335 reg = 0x84 + reg_off;
1336 val = Get_NB32(dev, reg);
1339 Set_NB32(dev, reg, val);
1341 // dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
1343 print_tx("AutoCycTiming: Status ", pDCTstat->Status);
1344 print_tx("AutoCycTiming: ErrStatus ", pDCTstat->ErrStatus);
1345 print_tx("AutoCycTiming: ErrCode ", pDCTstat->ErrCode);
1346 print_t("AutoCycTiming: Done\n");
1348 mctHookAfterAutoCycTmg();
1350 return pDCTstat->ErrCode;
1354 static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
1355 struct DCTStatStruc *pDCTstat)
1357 /* Get max frequency from OEM platform definition, from any user
1358 * override (limiting) of max frequency, and from any Si Revision
1359 * Specific information. Return the least of these three in
1360 * DCTStatStruc.PresetmaxFreq.
1366 /* Get CPU Si Revision defined limit (NPT) */
1367 proposedFreq = 533; /* Rev F0 programmable max memclock is */
1369 /*Get User defined limit if "limit" mode */
1370 if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 1) {
1371 word = Get_Fk_D(mctGet_NVbits(NV_MemCkVal) + 1);
1372 if (word < proposedFreq)
1373 proposedFreq = word;
1375 /* Get Platform defined limit */
1376 word = mctGet_NVbits(NV_MAX_MEMCLK);
1377 if (word < proposedFreq)
1378 proposedFreq = word;
1380 word = pDCTstat->PresetmaxFreq;
1381 if (word > proposedFreq)
1382 word = proposedFreq;
1384 pDCTstat->PresetmaxFreq = word;
1390 static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
1391 struct DCTStatStruc *pDCTstat, u8 dct)
1393 /* Find the best T and CL primary timing parameter pair, per Mfg.,
1394 * for the given set of DIMMs, and store into DCTStatStruc
1395 * (.DIMMAutoSpeed and .DIMMCASL). See "Global relationship between
1396 * index values and item values" for definition of CAS latency
1397 * index (j) and Frequency index (k).
1402 /* i={0..7} (std. physical DIMM number)
1403 * j is an integer which enumerates increasing CAS latency.
1404 * k is an integer which enumerates decreasing cycle time.
1405 * CL no. {0,1,2} corresponds to CL X, CL X-.5, or CL X-1 (per individual DIMM)
1406 * Max timing values are per parameter, of all DIMMs, spec'd in ns like the SPD.
1411 for (k=K_MAX; k >= K_MIN; k--) {
1412 for (j = J_MIN; j <= J_MAX; j++) {
1413 if (Sys_Capability_D(pMCTstat, pDCTstat, j, k) ) {
1414 /* 1. check to see if DIMMi is populated.
1415 2. check if DIMMi supports CLj and Tjk */
1416 for (i = 0; i < MAX_DIMMS_SUPPORTED; i++) {
1417 if (pDCTstat->DIMMValid & (1 << i)) {
1418 if (Dimm_Supports_D(pDCTstat, i, j, k))
1422 if (i == MAX_DIMMS_SUPPORTED) {
1432 if (T1min != 0xFF) {
1433 pDCTstat->DIMMCASL = CL1min; /*mfg. optimized */
1434 pDCTstat->DIMMAutoSpeed = T1min;
1435 print_tx("SPDGetTCL_D: DIMMCASL ", pDCTstat->DIMMCASL);
1436 print_tx("SPDGetTCL_D: DIMMAutoSpeed ", pDCTstat->DIMMAutoSpeed);
1439 pDCTstat->DIMMCASL = CL_DEF; /* failsafe values (running in min. mode) */
1440 pDCTstat->DIMMAutoSpeed = T_DEF;
1441 pDCTstat->ErrStatus |= 1 << SB_DimmMismatchT;
1442 pDCTstat->ErrStatus |= 1 << SB_MinimumMode;
1443 pDCTstat->ErrCode = SC_VarianceErr;
1445 print_tx("SPDGetTCL_D: Status ", pDCTstat->Status);
1446 print_tx("SPDGetTCL_D: ErrStatus ", pDCTstat->ErrStatus);
1447 print_tx("SPDGetTCL_D: ErrCode ", pDCTstat->ErrCode);
1448 print_t("SPDGetTCL_D: Done\n");
1452 static u8 PlatformSpec_D(struct MCTStatStruc *pMCTstat,
1453 struct DCTStatStruc *pDCTstat, u8 dct)
1459 mctGet_PS_Cfg_D(pMCTstat, pDCTstat, dct);
1461 if (pDCTstat->GangedMode) {
1462 mctGet_PS_Cfg_D(pMCTstat, pDCTstat, 1);
1465 if ( pDCTstat->_2Tmode == 2) {
1466 dev = pDCTstat->dev_dct;
1467 reg = 0x94 + 0x100 * dct; /* Dram Configuration Hi */
1468 val = Get_NB32(dev, reg);
1469 val |= 1 << 20; /* 2T CMD mode */
1470 Set_NB32(dev, reg, val);
1473 mct_PlatformSpec(pMCTstat, pDCTstat, dct);
1474 InitPhyCompensation(pMCTstat, pDCTstat, dct);
1475 mctHookAfterPSCfg();
1476 return pDCTstat->ErrCode;
1480 static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
1481 struct DCTStatStruc *pDCTstat, u8 dct)
1483 u32 DramControl, DramTimingLo, Status;
1484 u32 DramConfigLo, DramConfigHi, DramConfigMisc, DramConfigMisc2;
1492 print_tx("AutoConfig_D: DCT: ", dct);
1497 DramConfigMisc2 = 0;
1499 /* set bank addessing and Masks, plus CS pops */
1500 SPDSetBanks_D(pMCTstat, pDCTstat, dct);
1501 if (pDCTstat->ErrCode == SC_StopError)
1502 goto AutoConfig_exit;
1504 /* map chip-selects into local address space */
1505 StitchMemory_D(pMCTstat, pDCTstat, dct);
1506 InterleaveBanks_D(pMCTstat, pDCTstat, dct);
1508 /* temp image of status (for convenience). RO usage! */
1509 Status = pDCTstat->Status;
1511 dev = pDCTstat->dev_dct;
1512 reg_off = 0x100 * dct;
1515 /* Build Dram Control Register Value */
1516 DramConfigMisc2 = Get_NB32 (dev, 0xA8 + reg_off); /* Dram Control*/
1517 DramControl = Get_NB32 (dev, 0x78 + reg_off); /* Dram Control*/
1519 if (mctGet_NVbits(NV_CLKHZAltVidC3))
1520 DramControl |= 1<<16;
1522 // FIXME: Add support(skip) for Ax and Cx versions
1523 DramControl |= 5; /* RdPtrInit */
1526 /* Build Dram Config Lo Register Value */
1527 DramConfigLo |= 1 << 4; /* 75 Ohms ODT */
1528 if (mctGet_NVbits(NV_MAX_DIMMS) == 8) {
1529 if (pDCTstat->Speed == 3) {
1530 if ((pDCTstat->MAdimms[dct] == 4))
1531 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1532 } else if (pDCTstat->Speed == 4){
1533 if ((pDCTstat->MAdimms[dct] != 1))
1534 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1537 // FIXME: Skip for Ax versions
1538 if ((pDCTstat->MAdimms[dct] == 4)) {
1539 if ( pDCTstat->DimmQRPresent != 0) {
1540 if ((pDCTstat->Speed == 3) || (pDCTstat->Speed == 4)) {
1541 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1543 } else if ((pDCTstat->MAdimms[dct] == 4)) {
1544 if (pDCTstat->Speed == 4) {
1545 if ( pDCTstat->DimmQRPresent != 0) {
1546 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1550 } else if ((pDCTstat->MAdimms[dct] == 2)) {
1551 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1556 // FIXME: Skip for Ax versions
1557 /* callback not required - if (!mctParityControl_D()) */
1558 if (Status & (1 << SB_PARDIMMs)) {
1559 DramConfigLo |= 1 << ParEn;
1560 DramConfigMisc2 |= 1 << ActiveCmdAtRst;
1562 DramConfigLo &= ~(1 << ParEn);
1563 DramConfigMisc2 &= ~(1 << ActiveCmdAtRst);
1566 if (mctGet_NVbits(NV_BurstLen32)) {
1567 if (!pDCTstat->GangedMode)
1568 DramConfigLo |= 1 << BurstLength32;
1571 if (Status & (1 << SB_128bitmode))
1572 DramConfigLo |= 1 << Width128; /* 128-bit mode (normal) */
1577 if (pDCTstat->Dimmx4Present & (1 << word))
1578 DramConfigLo |= 1 << dword; /* X4Dimm[3:0] */
1584 if (!(Status & (1 << SB_Registered)))
1585 DramConfigLo |= 1 << UnBuffDimm; /* Unbufferd DIMMs */
1587 if (mctGet_NVbits(NV_ECC_CAP))
1588 if (Status & (1 << SB_ECCDIMMs))
1589 if ( mctGet_NVbits(NV_ECC))
1590 DramConfigLo |= 1 << DimmEcEn;
1594 /* Build Dram Config Hi Register Value */
1595 dword = pDCTstat->Speed;
1596 DramConfigHi |= dword - 1; /* get MemClk encoding */
1597 DramConfigHi |= 1 << MemClkFreqVal;
1599 if (Status & (1 << SB_Registered))
1600 if ((pDCTstat->Dimmx4Present != 0) && (pDCTstat->Dimmx8Present != 0))
1601 /* set only if x8 Registered DIMMs in System*/
1602 DramConfigHi |= 1 << RDqsEn;
1604 if (mctGet_NVbits(NV_CKE_PDEN)) {
1605 DramConfigHi |= 1 << 15; /* PowerDownEn */
1606 if (mctGet_NVbits(NV_CKE_CTL))
1607 /*Chip Select control of CKE*/
1608 DramConfigHi |= 1 << 16;
1611 /* Control Bank Swizzle */
1612 if (0) /* call back not needed mctBankSwizzleControl_D()) */
1613 DramConfigHi &= ~(1 << BankSwizzleMode);
1615 DramConfigHi |= 1 << BankSwizzleMode; /* recommended setting (default) */
1617 /* Check for Quadrank DIMM presence */
1618 if ( pDCTstat->DimmQRPresent != 0) {
1619 byte = mctGet_NVbits(NV_4RANKType);
1621 DramConfigHi |= 1 << 17; /* S4 (4-Rank SO-DIMMs) */
1623 DramConfigHi |= 1 << 18; /* R4 (4-Rank Registered DIMMs) */
1626 if (0) /* call back not needed mctOverrideDcqBypMax_D ) */
1627 val = mctGet_NVbits(NV_BYPMAX);
1629 val = 0x0f; // recommended setting (default)
1630 DramConfigHi |= val << 24;
1632 val = pDCTstat->DIMM2Kpage;
1633 if (pDCTstat->GangedMode != 0) {
1641 val = Tab_2KTfawT_k[pDCTstat->Speed];
1643 val = Tab_1KTfawT_k[pDCTstat->Speed];
1645 if (pDCTstat->Speed == 5)
1650 DramConfigHi |= val; /* Tfaw for 1K or 2K paged drams */
1652 // FIXME: Skip for Ax versions
1653 DramConfigHi |= 1 << DcqArbBypassEn;
1656 /* Build MemClkDis Value from Dram Timing Lo and
1657 Dram Config Misc Registers
1658 1. We will assume that MemClkDis field has been preset prior to this
1660 2. We will only set MemClkDis bits if a DIMM is NOT present AND if:
1661 NV_AllMemClks <>0 AND SB_DiagClks ==0 */
1664 /* Dram Timing Low (owns Clock Enable bits) */
1665 DramTimingLo = Get_NB32(dev, 0x88 + reg_off);
1666 if (mctGet_NVbits(NV_AllMemClks) == 0) {
1667 /* Special Jedec SPD diagnostic bit - "enable all clocks" */
1668 if (!(pDCTstat->Status & (1<<SB_DiagClks))) {
1670 byte = mctGet_NVbits(NV_PACK_TYPE);
1673 else if (byte == PT_M2)
1679 while(dword < MAX_DIMMS_SUPPORTED) {
1681 print_tx("DramTimingLo: val=", val);
1682 if (!(pDCTstat->DIMMValid & (1<<val)))
1684 DramTimingLo |= 1<<(dword+24);
1690 print_tx("AutoConfig_D: DramControl: ", DramControl);
1691 print_tx("AutoConfig_D: DramTimingLo: ", DramTimingLo);
1692 print_tx("AutoConfig_D: DramConfigMisc: ", DramConfigMisc);
1693 print_tx("AutoConfig_D: DramConfigMisc2: ", DramConfigMisc2);
1694 print_tx("AutoConfig_D: DramConfigLo: ", DramConfigLo);
1695 print_tx("AutoConfig_D: DramConfigHi: ", DramConfigHi);
1697 /* Write Values to the registers */
1698 Set_NB32(dev, 0x78 + reg_off, DramControl);
1699 Set_NB32(dev, 0x88 + reg_off, DramTimingLo);
1700 Set_NB32(dev, 0xA0 + reg_off, DramConfigMisc);
1701 Set_NB32(dev, 0xA8 + reg_off, DramConfigMisc2);
1702 Set_NB32(dev, 0x90 + reg_off, DramConfigLo);
1703 mct_SetDramConfigHi_D(pDCTstat, dct, DramConfigHi);
1704 mct_ForceAutoPrecharge_D(pDCTstat, dct);
1705 mct_EarlyArbEn_D(pMCTstat, pDCTstat);
1706 mctHookAfterAutoCfg();
1708 // dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
1710 print_tx("AutoConfig: Status ", pDCTstat->Status);
1711 print_tx("AutoConfig: ErrStatus ", pDCTstat->ErrStatus);
1712 print_tx("AutoConfig: ErrCode ", pDCTstat->ErrCode);
1713 print_t("AutoConfig: Done\n");
1715 return pDCTstat->ErrCode;
1719 static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
1720 struct DCTStatStruc *pDCTstat, u8 dct)
1722 /* Set bank addressing, program Mask values and build a chip-select
1723 * population map. This routine programs PCI 0:24N:2x80 config register
1724 * and PCI 0:24N:2x60,64,68,6C config registers (CS Mask 0-3).
1727 u8 ChipSel, Rows, Cols, Ranks ,Banks, DevWidth;
1728 u32 BankAddrReg, csMask;
1739 dev = pDCTstat->dev_dct;
1740 reg_off = 0x100 * dct;
1743 for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel+=2) {
1745 if ((pDCTstat->Status & (1 << SB_64MuxedMode)) && ChipSel >=4)
1748 if (pDCTstat->DIMMValid & (1<<byte)) {
1749 smbaddr = Get_DIMMAddress_D(pDCTstat, (ChipSel + dct));
1751 byte = mctRead_SPD(smbaddr, SPD_ROWSZ);
1754 byte = mctRead_SPD(smbaddr, SPD_COLSZ);
1757 Banks = mctRead_SPD(smbaddr, SPD_LBANKS);
1759 byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH);
1760 DevWidth = byte & 0x7f; /* bits 0-6 = bank 0 width */
1762 byte = mctRead_SPD(smbaddr, SPD_DMBANKS);
1763 Ranks = (byte & 7) + 1;
1765 /* Configure Bank encoding
1766 * Use a 6-bit key into a lookup table.
1767 * Key (index) = CCCBRR, where CCC is the number of
1768 * Columns minus 9,RR is the number of Rows minus 13,
1769 * and B is the number of banks minus 2.
1770 * See "6-bit Bank Addressing Table" at the end of
1772 byte = Cols - 9; /* 9 Cols is smallest dev size */
1773 byte <<= 3; /* make room for row and bank bits*/
1777 /* 13 Rows is smallest dev size */
1778 byte |= Rows - 13; /* CCCBRR internal encode */
1780 for (dword=0; dword < 12; dword++) {
1781 if (byte == Tab_BankAddr[dword])
1787 /* bit no. of CS field in address mapping reg.*/
1788 dword <<= (ChipSel<<1);
1789 BankAddrReg |= dword;
1791 /* Mask value=(2pow(rows+cols+banks+3)-1)>>8,
1792 or 2pow(rows+cols+banks-5)-1*/
1795 byte = Rows + Cols; /* cl=rows+cols*/
1797 byte -= 2; /* 3 banks - 5 */
1799 byte -= 3; /* 2 banks - 5 */
1800 /* mask size (64-bit rank only) */
1802 if (pDCTstat->Status & (1 << SB_128bitmode))
1803 byte++; /* double mask size if in 128-bit mode*/
1805 csMask |= 1 << byte;
1808 /*set ChipSelect population indicator even bits*/
1809 pDCTstat->CSPresent |= (1<<ChipSel);
1811 /*set ChipSelect population indicator odd bits*/
1812 pDCTstat->CSPresent |= 1 << (ChipSel + 1);
1814 reg = 0x60+(ChipSel<<1) + reg_off; /*Dram CS Mask Register */
1816 val &= 0x1FF83FE0; /* Mask out reserved bits.*/
1817 Set_NB32(dev, reg, val);
1820 if (pDCTstat->DIMMSPDCSE & (1<<ChipSel))
1821 pDCTstat->CSTestFail |= (1<<ChipSel);
1823 } /* while ChipSel*/
1825 SetCSTriState(pMCTstat, pDCTstat, dct);
1826 /* SetCKETriState */
1827 SetODTTriState(pMCTstat, pDCTstat, dct);
1829 if (pDCTstat->Status & (1 << SB_128bitmode)) {
1830 SetCSTriState(pMCTstat, pDCTstat, 1); /* force dct1) */
1831 SetODTTriState(pMCTstat, pDCTstat, 1); /* force dct1) */
1833 word = pDCTstat->CSPresent;
1834 mctGetCS_ExcludeMap(); /* mask out specified chip-selects */
1835 word ^= pDCTstat->CSPresent;
1836 pDCTstat->CSTestFail |= word; /* enable ODT to disabled DIMMs */
1837 if (!pDCTstat->CSPresent)
1838 pDCTstat->ErrCode = SC_StopError;
1840 reg = 0x80 + reg_off; /* Bank Addressing Register */
1841 Set_NB32(dev, reg, BankAddrReg);
1843 // dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
1845 print_tx("SPDSetBanks: Status ", pDCTstat->Status);
1846 print_tx("SPDSetBanks: ErrStatus ", pDCTstat->ErrStatus);
1847 print_tx("SPDSetBanks: ErrCode ", pDCTstat->ErrCode);
1848 print_t("SPDSetBanks: Done\n");
1852 static void SPDCalcWidth_D(struct MCTStatStruc *pMCTstat,
1853 struct DCTStatStruc *pDCTstat)
1855 /* Per SPDs, check the symmetry of DIMM pairs (DIMM on Channel A
1856 * matching with DIMM on Channel B), the overall DIMM population,
1857 * and determine the width mode: 64-bit, 64-bit muxed, 128-bit.
1861 u8 smbaddr, smbaddr1;
1864 /* Check Symmetry of Channel A and Channel B DIMMs
1865 (must be matched for 128-bit mode).*/
1866 for (i=0; i < MAX_DIMMS_SUPPORTED; i += 2) {
1867 if ((pDCTstat->DIMMValid & (1 << i)) && (pDCTstat->DIMMValid & (1<<(i+1)))) {
1868 smbaddr = Get_DIMMAddress_D(pDCTstat, i);
1869 smbaddr1 = Get_DIMMAddress_D(pDCTstat, i+1);
1871 byte = mctRead_SPD(smbaddr, SPD_ROWSZ) & 0x1f;
1872 byte1 = mctRead_SPD(smbaddr1, SPD_ROWSZ) & 0x1f;
1873 if (byte != byte1) {
1874 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1878 byte = mctRead_SPD(smbaddr, SPD_COLSZ) & 0x1f;
1879 byte1 = mctRead_SPD(smbaddr1, SPD_COLSZ) & 0x1f;
1880 if (byte != byte1) {
1881 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1885 byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
1886 byte1 = mctRead_SPD(smbaddr1, SPD_BANKSZ);
1887 if (byte != byte1) {
1888 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1892 byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0x7f;
1893 byte1 = mctRead_SPD(smbaddr1, SPD_DEVWIDTH) & 0x7f;
1894 if (byte != byte1) {
1895 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1899 byte = mctRead_SPD(smbaddr, SPD_DMBANKS) & 7; /* #ranks-1 */
1900 byte1 = mctRead_SPD(smbaddr1, SPD_DMBANKS) & 7; /* #ranks-1 */
1901 if (byte != byte1) {
1902 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1912 static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
1913 struct DCTStatStruc *pDCTstat, u8 dct)
1915 /* Requires that Mask values for each bank be programmed first and that
1916 * the chip-select population indicator is correctly set.
1920 u32 nxtcsBase, curcsBase;
1922 u32 Sizeq, BiggestBank;
1932 dev = pDCTstat->dev_dct;
1933 reg_off = 0x100 * dct;
1937 /* CS Sparing 1=enabled, 0=disabled */
1938 if (mctGet_NVbits(NV_CS_SpareCTL) & 1) {
1939 if (MCT_DIMM_SPARE_NO_WARM) {
1940 /* Do no warm-reset DIMM spare */
1941 if (pMCTstat->GStatus & 1 << GSB_EnDIMMSpareNW) {
1942 word = pDCTstat->CSPresent;
1946 /* Make sure at least two chip-selects are available */
1949 pDCTstat->ErrStatus |= 1 << SB_SpareDis;
1952 if (!mctGet_NVbits(NV_DQSTrainCTL)) { /*DQS Training 1=enabled, 0=disabled */
1953 word = pDCTstat->CSPresent;
1955 word &= ~(1 << val);
1957 /* Make sure at least two chip-selects are available */
1960 pDCTstat->ErrStatus |= 1 << SB_SpareDis;
1965 nxtcsBase = 0; /* Next available cs base ADDR[39:8] */
1966 for (p=0; p < MAX_DIMMS_SUPPORTED; p++) {
1968 for (q = 0; q < MAX_CS_SUPPORTED; q++) { /* from DIMMS to CS */
1969 if (pDCTstat->CSPresent & (1 << q)) { /* bank present? */
1970 reg = 0x40 + (q << 2) + reg_off; /* Base[q] reg.*/
1971 val = Get_NB32(dev, reg);
1972 if (!(val & 3)) { /* (CSEnable|Spare==1)bank is enabled already? */
1973 reg = 0x60 + ((q << 1) & 0xc) + reg_off; /*Mask[q] reg.*/
1974 val = Get_NB32(dev, reg);
1978 Sizeq = val; //never used
1979 if (val > BiggestBank) {
1980 /*Bingo! possibly Map this chip-select next! */
1985 } /*if bank present */
1987 if (BiggestBank !=0) {
1988 curcsBase = nxtcsBase; /* curcsBase=nxtcsBase*/
1989 /* DRAM CS Base b Address Register offset */
1990 reg = 0x40 + (b << 2) + reg_off;
1993 val = 1 << Spare; /* Spare Enable*/
1996 val |= 1 << CSEnable; /* Bank Enable */
1998 Set_NB32(dev, reg, val);
2002 /* let nxtcsBase+=Size[b] */
2003 nxtcsBase += BiggestBank;
2006 /* bank present but disabled?*/
2007 if ( pDCTstat->CSTestFail & (1 << p)) {
2008 /* DRAM CS Base b Address Register offset */
2009 reg = (p << 2) + 0x40 + reg_off;
2010 val = 1 << TestFail;
2011 Set_NB32(dev, reg, val);
2016 pDCTstat->DCTSysLimit = nxtcsBase - 1;
2017 mct_AfterStitchMemory(pMCTstat, pDCTstat, dct);
2020 // dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
2022 print_tx("StitchMemory: Status ", pDCTstat->Status);
2023 print_tx("StitchMemory: ErrStatus ", pDCTstat->ErrStatus);
2024 print_tx("StitchMemory: ErrCode ", pDCTstat->ErrCode);
2025 print_t("StitchMemory: Done\n");
2029 static u8 Get_Tk_D(u8 k)
2031 return Table_T_k[k];
2035 static u8 Get_CLj_D(u8 j)
2037 return Table_CL2_j[j];
2040 static u8 Get_DefTrc_k_D(u8 k)
2042 return Tab_defTrc_k[k];
2046 static u16 Get_40Tk_D(u8 k)
2048 return Tab_40T_k[k]; /* FIXME: k or k<<1 ?*/
2052 static u16 Get_Fk_D(u8 k)
2054 return Table_F_k[k]; /* FIXME: k or k<<1 ? */
2058 static u8 Dimm_Supports_D(struct DCTStatStruc *pDCTstat,
2068 DIMMi = Get_DIMMAddress_D(pDCTstat, i);
2072 /* check if DIMMi supports CLj */
2073 CL_i = mctRead_SPD(DIMMi, SPD_CASLAT);
2076 /*find out if its CL X, CLX-1, or CLX-2 */
2077 word = bsr(byte); /* bit position of CLj */
2078 wordx = bsr(CL_i); /* bit position of CLX of CLi */
2079 wordx -= word; /* CL number (CL no. = 0,1, 2, or 3) */
2080 wordx <<= 3; /* 8 bits per SPD byte index */
2081 /*get T from SPD byte 9, 23, 25*/
2082 word = (EncodedTSPD >> wordx) & 0xFF;
2084 byte = mctRead_SPD(DIMMi, word); /* DIMMi speed */
2087 } else if (byte == 0){
2088 pDCTstat->ErrStatus |= 1<<SB_NoCycTime;
2091 ret = 0; /* DIMM is capable! */
2100 static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
2101 struct DCTStatStruc *pDCTstat)
2103 /* Check DIMMs present, verify checksum, flag SDRAM type,
2104 * build population indicator bitmaps, and preload bus loading
2105 * of DIMMs into DCTStatStruc.
2106 * MAAload=number of devices on the "A" bus.
2107 * MABload=number of devices on the "B" bus.
2108 * MAAdimms=number of DIMMs on the "A" bus slots.
2109 * MABdimms=number of DIMMs on the "B" bus slots.
2110 * DATAAload=number of ranks on the "A" bus slots.
2111 * DATABload=number of ranks on the "B" bus slots.
2118 u16 RegDIMMPresent, MaxDimms;
2124 /* preload data structure with addrs */
2125 mctGet_DIMMAddr(pDCTstat, pDCTstat->Node_ID);
2127 DimmSlots = MaxDimms = mctGet_NVbits(NV_MAX_DIMMS);
2129 SPDCtrl = mctGet_NVbits(NV_SPDCHK_RESTRT);
2132 pDCTstat->DimmQRPresent = 0;
2134 for (i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
2138 if ((pDCTstat->DimmQRPresent & (1 << i)) || (i < DimmSlots)) {
2139 print_tx("\t DIMMPresence: i=", i);
2140 smbaddr = Get_DIMMAddress_D(pDCTstat, i);
2141 print_tx("\t DIMMPresence: smbaddr=", smbaddr);
2144 for (Index=0; Index < 64; Index++){
2146 status = mctRead_SPD(smbaddr, Index);
2149 byte = status & 0xFF;
2155 pDCTstat->DIMMPresent |= 1 << i;
2156 if ((Checksum & 0xFF) == byte) {
2157 byte = mctRead_SPD(smbaddr, SPD_TYPE);
2158 if (byte == JED_DDR2SDRAM) {
2159 /*Dimm is 'Present'*/
2160 pDCTstat->DIMMValid |= 1 << i;
2163 pDCTstat->DIMMSPDCSE = 1 << i;
2165 pDCTstat->ErrStatus |= 1 << SB_DIMMChkSum;
2166 pDCTstat->ErrCode = SC_StopError;
2168 /*if NV_SPDCHK_RESTRT is set to 1, ignore faulty SPD checksum*/
2169 pDCTstat->ErrStatus |= 1<<SB_DIMMChkSum;
2170 byte = mctRead_SPD(smbaddr, SPD_TYPE);
2171 if (byte == JED_DDR2SDRAM)
2172 pDCTstat->DIMMValid |= 1 << i;
2175 /* Check module type */
2176 byte = mctRead_SPD(smbaddr, SPD_DIMMTYPE);
2177 if (byte & JED_REGADCMSK)
2178 RegDIMMPresent |= 1 << i;
2179 /* Check ECC capable */
2180 byte = mctRead_SPD(smbaddr, SPD_EDCTYPE);
2181 if (byte & JED_ECC) {
2182 /* DIMM is ECC capable */
2183 pDCTstat->DimmECCPresent |= 1 << i;
2185 if (byte & JED_ADRCPAR) {
2186 /* DIMM is ECC capable */
2187 pDCTstat->DimmPARPresent |= 1 << i;
2189 /* Check if x4 device */
2190 devwidth = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE;
2191 if (devwidth == 4) {
2192 /* DIMM is made with x4 or x16 drams */
2193 pDCTstat->Dimmx4Present |= 1 << i;
2194 } else if (devwidth == 8) {
2195 pDCTstat->Dimmx8Present |= 1 << i;
2196 } else if (devwidth == 16) {
2197 pDCTstat->Dimmx16Present |= 1 << i;
2199 /* check page size */
2200 byte = mctRead_SPD(smbaddr, SPD_COLSZ);
2204 word *= devwidth; /* (((2^COLBITS) / 8) * ORG) / 2048 */
2207 pDCTstat->DIMM2Kpage |= 1 << i;
2209 /*Check if SPD diag bit 'analysis probe installed' is set */
2210 byte = mctRead_SPD(smbaddr, SPD_ATTRIB);
2211 if ( byte & JED_PROBEMSK )
2212 pDCTstat->Status |= 1<<SB_DiagClks;
2214 byte = mctRead_SPD(smbaddr, SPD_DMBANKS);
2215 if (!(byte & (1<< SPDPLBit)))
2216 pDCTstat->DimmPlPresent |= 1 << i;
2220 /* if any DIMMs are QR, we have to make two passes through DIMMs*/
2221 if ( pDCTstat->DimmQRPresent == 0) {
2224 if (i < DimmSlots) {
2225 pDCTstat->DimmQRPresent |= (1 << i) | (1 << (i+4));
2227 byte = 2; /* upper two ranks of QR DIMM will be counted on another DIMM number iteration*/
2228 } else if (byte == 2) {
2229 pDCTstat->DimmDRPresent |= 1 << i;
2234 else if (devwidth == 4)
2238 bytex <<= 1; /*double Addr bus load value for dual rank DIMMs*/
2241 pDCTstat->DATAload[j] += byte; /*number of ranks on DATA bus*/
2242 pDCTstat->MAload[j] += bytex; /*number of devices on CMD/ADDR bus*/
2243 pDCTstat->MAdimms[j]++; /*number of DIMMs on A bus */
2244 /*check for DRAM package Year <= 06*/
2245 byte = mctRead_SPD(smbaddr, SPD_MANDATEYR);
2246 if (byte < MYEAR06) {
2247 /*Year < 06 and hence Week < 24 of 06 */
2248 pDCTstat->DimmYr06 |= 1 << i;
2249 pDCTstat->DimmWk2406 |= 1 << i;
2250 } else if (byte == MYEAR06) {
2251 /*Year = 06, check if Week <= 24 */
2252 pDCTstat->DimmYr06 |= 1 << i;
2253 byte = mctRead_SPD(smbaddr, SPD_MANDATEWK);
2254 if (byte <= MWEEK24)
2255 pDCTstat->DimmWk2406 |= 1 << i;
2261 print_tx("\t DIMMPresence: DIMMValid=", pDCTstat->DIMMValid);
2262 print_tx("\t DIMMPresence: DIMMPresent=", pDCTstat->DIMMPresent);
2263 print_tx("\t DIMMPresence: RegDIMMPresent=", RegDIMMPresent);
2264 print_tx("\t DIMMPresence: DimmECCPresent=", pDCTstat->DimmECCPresent);
2265 print_tx("\t DIMMPresence: DimmPARPresent=", pDCTstat->DimmPARPresent);
2266 print_tx("\t DIMMPresence: Dimmx4Present=", pDCTstat->Dimmx4Present);
2267 print_tx("\t DIMMPresence: Dimmx8Present=", pDCTstat->Dimmx8Present);
2268 print_tx("\t DIMMPresence: Dimmx16Present=", pDCTstat->Dimmx16Present);
2269 print_tx("\t DIMMPresence: DimmPlPresent=", pDCTstat->DimmPlPresent);
2270 print_tx("\t DIMMPresence: DimmDRPresent=", pDCTstat->DimmDRPresent);
2271 print_tx("\t DIMMPresence: DimmQRPresent=", pDCTstat->DimmQRPresent);
2272 print_tx("\t DIMMPresence: DATAload[0]=", pDCTstat->DATAload[0]);
2273 print_tx("\t DIMMPresence: MAload[0]=", pDCTstat->MAload[0]);
2274 print_tx("\t DIMMPresence: MAdimms[0]=", pDCTstat->MAdimms[0]);
2275 print_tx("\t DIMMPresence: DATAload[1]=", pDCTstat->DATAload[1]);
2276 print_tx("\t DIMMPresence: MAload[1]=", pDCTstat->MAload[1]);
2277 print_tx("\t DIMMPresence: MAdimms[1]=", pDCTstat->MAdimms[1]);
2279 if (pDCTstat->DIMMValid != 0) { /* If any DIMMs are present...*/
2280 if (RegDIMMPresent != 0) {
2281 if ((RegDIMMPresent ^ pDCTstat->DIMMValid) !=0) {
2282 /* module type DIMM mismatch (reg'ed, unbuffered) */
2283 pDCTstat->ErrStatus |= 1<<SB_DimmMismatchM;
2284 pDCTstat->ErrCode = SC_StopError;
2286 /* all DIMMs are registered */
2287 pDCTstat->Status |= 1<<SB_Registered;
2290 if (pDCTstat->DimmECCPresent != 0) {
2291 if ((pDCTstat->DimmECCPresent ^ pDCTstat->DIMMValid )== 0) {
2292 /* all DIMMs are ECC capable */
2293 pDCTstat->Status |= 1<<SB_ECCDIMMs;
2296 if (pDCTstat->DimmPARPresent != 0) {
2297 if ((pDCTstat->DimmPARPresent ^ pDCTstat->DIMMValid) == 0) {
2298 /*all DIMMs are Parity capable */
2299 pDCTstat->Status |= 1<<SB_PARDIMMs;
2303 /* no DIMMs present or no DIMMs that qualified. */
2304 pDCTstat->ErrStatus |= 1<<SB_NoDimms;
2305 pDCTstat->ErrCode = SC_StopError;
2308 print_tx("\t DIMMPresence: Status ", pDCTstat->Status);
2309 print_tx("\t DIMMPresence: ErrStatus ", pDCTstat->ErrStatus);
2310 print_tx("\t DIMMPresence: ErrCode ", pDCTstat->ErrCode);
2311 print_t("\t DIMMPresence: Done\n");
2313 mctHookAfterDIMMpre();
2315 return pDCTstat->ErrCode;
2319 static u8 Sys_Capability_D(struct MCTStatStruc *pMCTstat,
2320 struct DCTStatStruc *pDCTstat, int j, int k)
2322 /* Determine if system is capable of operating at given input
2323 * parameters for CL, and T. There are three components to
2324 * determining "maximum frequency" in AUTO mode: SPD component,
2325 * Bus load component, and "Preset" max frequency component.
2326 * This procedure is used to help find the SPD component and relies
2327 * on pre-determination of the bus load component and the Preset
2328 * components. The generalized algorithm for finding maximum
2329 * frequency is structured this way so as to optimize for CAS
2330 * latency (which might get better as a result of reduced frequency).
2331 * See "Global relationship between index values and item values"
2332 * for definition of CAS latency index (j) and Frequency index (k).
2337 if (Get_Fk_D(k) > pDCTstat->PresetmaxFreq)
2342 /* compare proposed CAS latency with AMD Si capabilities */
2343 if ((j < J_MIN) || (j > J_MAX))
2355 static u8 Get_DIMMAddress_D(struct DCTStatStruc *pDCTstat, u8 i)
2359 p = pDCTstat->DIMMAddr;
2360 //mct_BeforeGetDIMMAddress();
2365 static void mct_initDCT(struct MCTStatStruc *pMCTstat,
2366 struct DCTStatStruc *pDCTstat)
2371 /* Config. DCT0 for Ganged or unganged mode */
2372 print_t("\tmct_initDCT: DCTInit_D 0\n");
2373 DCTInit_D(pMCTstat, pDCTstat, 0);
2374 if (pDCTstat->ErrCode == SC_FatalErr) {
2375 // Do nothing goto exitDCTInit; /* any fatal errors? */
2377 /* Configure DCT1 if unganged and enabled*/
2378 if (!pDCTstat->GangedMode) {
2379 if ( pDCTstat->DIMMValidDCT[1] > 0) {
2380 print_t("\tmct_initDCT: DCTInit_D 1\n");
2381 err_code = pDCTstat->ErrCode; /* save DCT0 errors */
2382 pDCTstat->ErrCode = 0;
2383 DCTInit_D(pMCTstat, pDCTstat, 1);
2384 if (pDCTstat->ErrCode == 2) /* DCT1 is not Running */
2385 pDCTstat->ErrCode = err_code; /* Using DCT0 Error code to update pDCTstat.ErrCode */
2387 val = 1 << DisDramInterface;
2388 Set_NB32(pDCTstat->dev_dct, 0x100 + 0x94, val);
2396 static void mct_DramInit(struct MCTStatStruc *pMCTstat,
2397 struct DCTStatStruc *pDCTstat, u8 dct)
2401 mct_BeforeDramInit_Prod_D(pMCTstat, pDCTstat);
2402 // FIXME: for rev A: mct_BeforeDramInit_D(pDCTstat, dct);
2404 /* Disable auto refresh before Dram init when in ganged mode (Erratum 278) */
2405 if (pDCTstat->LogicalCPUID & AMD_DR_LT_B2) {
2406 if (pDCTstat->GangedMode) {
2407 val = Get_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct));
2408 val |= 1 << DisAutoRefresh;
2409 Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
2413 mct_DramInit_Hw_D(pMCTstat, pDCTstat, dct);
2415 /* Re-enable auto refresh after Dram init when in ganged mode
2416 * to ensure both DCTs are in sync (Erratum 278)
2419 if (pDCTstat->LogicalCPUID & AMD_DR_LT_B2) {
2420 if (pDCTstat->GangedMode) {
2422 val = Get_NB32(pDCTstat->dev_dct, 0x90 + (0x100 * dct));
2423 } while (!(val & (1 << InitDram)));
2427 val = Get_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct));
2428 val &= ~(1 << DisAutoRefresh);
2429 Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
2430 val |= 1 << DisAutoRefresh;
2431 Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
2432 val &= ~(1 << DisAutoRefresh);
2433 Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
2439 static u8 mct_setMode(struct MCTStatStruc *pMCTstat,
2440 struct DCTStatStruc *pDCTstat)
2447 byte = bytex = pDCTstat->DIMMValid;
2448 bytex &= 0x55; /* CHA DIMM pop */
2449 pDCTstat->DIMMValidDCT[0] = bytex;
2451 byte &= 0xAA; /* CHB DIMM popa */
2453 pDCTstat->DIMMValidDCT[1] = byte;
2455 if (byte != bytex) {
2456 pDCTstat->ErrStatus &= ~(1 << SB_DimmMismatchO);
2458 if ( mctGet_NVbits(NV_Unganged) )
2459 pDCTstat->ErrStatus |= (1 << SB_DimmMismatchO);
2461 if (!(pDCTstat->ErrStatus & (1 << SB_DimmMismatchO))) {
2462 pDCTstat->GangedMode = 1;
2463 /* valid 128-bit mode population. */
2464 pDCTstat->Status |= 1 << SB_128bitmode;
2466 val = Get_NB32(pDCTstat->dev_dct, reg);
2467 val |= 1 << DctGangEn;
2468 Set_NB32(pDCTstat->dev_dct, reg, val);
2469 print_tx("setMode: DRAM Controller Select Low Register = ", val);
2472 return pDCTstat->ErrCode;
2476 u32 Get_NB32(u32 dev, u32 reg)
2478 return pci_read_config32(dev, reg);
2482 void Set_NB32(u32 dev, u32 reg, u32 val)
2484 pci_write_config32(dev, reg, val);
2488 u32 Get_NB32_index(u32 dev, u32 index_reg, u32 index)
2492 Set_NB32(dev, index_reg, index);
2493 dword = Get_NB32(dev, index_reg+0x4);
2498 void Set_NB32_index(u32 dev, u32 index_reg, u32 index, u32 data)
2500 Set_NB32(dev, index_reg, index);
2501 Set_NB32(dev, index_reg + 0x4, data);
2505 u32 Get_NB32_index_wait(u32 dev, u32 index_reg, u32 index)
2511 index &= ~(1 << DctAccessWrite);
2512 Set_NB32(dev, index_reg, index);
2514 dword = Get_NB32(dev, index_reg);
2515 } while (!(dword & (1 << DctAccessDone)));
2516 dword = Get_NB32(dev, index_reg + 0x4);
2522 void Set_NB32_index_wait(u32 dev, u32 index_reg, u32 index, u32 data)
2527 Set_NB32(dev, index_reg + 0x4, data);
2528 index |= (1 << DctAccessWrite);
2529 Set_NB32(dev, index_reg, index);
2531 dword = Get_NB32(dev, index_reg);
2532 } while (!(dword & (1 << DctAccessDone)));
2537 static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
2538 struct DCTStatStruc *pDCTstat, u8 dct)
2540 /* Get platform specific config/timing values from the interface layer
2541 * and program them into DCT.
2544 u32 dev = pDCTstat->dev_dct;
2546 u8 i, i_start, i_end;
2548 if (pDCTstat->GangedMode) {
2549 SyncSetting(pDCTstat);
2556 for (i=i_start; i<i_end; i++) {
2557 index_reg = 0x98 + (i * 0x100);
2558 Set_NB32_index_wait(dev, index_reg, 0x00, pDCTstat->CH_ODC_CTL[i]); /* Channel A Output Driver Compensation Control */
2559 Set_NB32_index_wait(dev, index_reg, 0x04, pDCTstat->CH_ADDR_TMG[i]); /* Channel A Output Driver Compensation Control */
2562 return pDCTstat->ErrCode;
2567 static void mct_SyncDCTsReady(struct DCTStatStruc *pDCTstat)
2572 if (pDCTstat->NodePresent) {
2573 print_tx("mct_SyncDCTsReady: Node ", pDCTstat->Node_ID);
2574 dev = pDCTstat->dev_dct;
2576 if ((pDCTstat->DIMMValidDCT[0] ) || (pDCTstat->DIMMValidDCT[1])) { /* This Node has dram */
2578 val = Get_NB32(dev, 0x110);
2579 } while (!(val & (1 << DramEnabled)));
2580 print_t("mct_SyncDCTsReady: DramEnabled\n");
2582 } /* Node is present */
2586 static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
2587 struct DCTStatStruc *pDCTstat, u8 dct)
2589 if (!pDCTstat->GangedMode) {
2591 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
2592 if (pDCTstat->DIMMValidDCT[dct] == 0)
2593 pDCTstat->ErrCode = SC_StopError;
2595 pDCTstat->CSPresent = 0;
2596 pDCTstat->CSTestFail = 0;
2597 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
2598 if (pDCTstat->DIMMValidDCT[dct] == 0)
2599 pDCTstat->ErrCode = SC_StopError;
2604 static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,
2605 struct DCTStatStruc *pDCTstat, u8 dct)
2610 SPDCalcWidth_D(pMCTstat, pDCTstat);
2611 ret = mct_setMode(pMCTstat, pDCTstat);
2613 ret = pDCTstat->ErrCode;
2616 print_tx("SPDCalcWidth: Status ", pDCTstat->Status);
2617 print_tx("SPDCalcWidth: ErrStatus ", pDCTstat->ErrStatus);
2618 print_tx("SPDCalcWidth: ErrCode ", pDCTstat->ErrCode);
2619 print_t("SPDCalcWidth: Done\n");
2625 static void mct_AfterStitchMemory(struct MCTStatStruc *pMCTstat,
2626 struct DCTStatStruc *pDCTstat, u8 dct)
2635 _MemHoleRemap = mctGet_NVbits(NV_MemHole);
2636 DramHoleBase = mctGet_NVbits(NV_BottomIO);
2638 /* Increase hole size so;[31:24]to[31:16]
2639 * it has granularity of 128MB shl eax,8
2640 * Set 'effective' bottom IOmov DramHoleBase,eax
2642 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2644 /* In unganged mode, we must add DCT0 and DCT1 to DCTSysLimit */
2645 if (!pDCTstat->GangedMode) {
2646 dev = pDCTstat->dev_dct;
2647 pDCTstat->NodeSysLimit += pDCTstat->DCTSysLimit;
2648 /* if DCT0 and DCT1 both exist, set DctSelBaseAddr[47:27] to the top of DCT0 */
2650 if (pDCTstat->DIMMValidDCT[1] > 0) {
2651 dword = pDCTstat->DCTSysLimit + 1;
2652 dword += pDCTstat->NodeSysBase;
2653 dword >>= 8; /* scale [39:8] to [47:27],and to F2x110[31:11] */
2654 if ((dword >= DramHoleBase) && _MemHoleRemap) {
2655 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2656 val = pMCTstat->HoleBase;
2658 val = (((~val) & 0xFF) + 1);
2663 val = Get_NB32(dev, reg);
2666 val |= 3; /* Set F2x110[DctSelHiRngEn], F2x110[DctSelHi] */
2667 Set_NB32(dev, reg, val);
2668 print_tx("AfterStitch DCT0 and DCT1: DRAM Controller Select Low Register = ", val);
2669 print_tx("AfterStitch DCT0 and DCT1: DRAM Controller Select High Register = ", dword);
2673 Set_NB32(dev, reg, val);
2676 /* Program the DctSelBaseAddr value to 0
2677 if DCT 0 is disabled */
2678 if (pDCTstat->DIMMValidDCT[0] == 0) {
2679 dword = pDCTstat->NodeSysBase;
2681 if ((dword >= DramHoleBase) && _MemHoleRemap) {
2682 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2683 val = pMCTstat->HoleBase;
2686 val |= (((~val) & 0xFFFF) + 1);
2691 Set_NB32(dev, reg, val);
2694 val |= 3; /* Set F2x110[DctSelHiRngEn], F2x110[DctSelHi] */
2695 Set_NB32(dev, reg, val);
2696 print_tx("AfterStitch DCT1 only: DRAM Controller Select Low Register = ", val);
2697 print_tx("AfterStitch DCT1 only: DRAM Controller Select High Register = ", dword);
2701 pDCTstat->NodeSysLimit += pDCTstat->DCTSysLimit;
2703 print_tx("AfterStitch pDCTstat->NodeSysBase = ", pDCTstat->NodeSysBase);
2704 print_tx("mct_AfterStitchMemory: pDCTstat->NodeSysLimit ", pDCTstat->NodeSysLimit);
2708 static u8 mct_DIMMPresence(struct MCTStatStruc *pMCTstat,
2709 struct DCTStatStruc *pDCTstat, u8 dct)
2714 ret = DIMMPresence_D(pMCTstat, pDCTstat);
2716 ret = pDCTstat->ErrCode;
2722 /* mct_BeforeGetDIMMAddress inline in C */
2725 static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
2726 struct DCTStatStruc *pDCTstatA)
2730 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
2731 struct DCTStatStruc *pDCTstat;
2732 pDCTstat = pDCTstatA + Node;
2733 if (pDCTstat->NodePresent) {
2734 if (pDCTstat->DIMMValidDCT[0]) {
2735 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[0];
2736 Set_OtherTiming(pMCTstat, pDCTstat, 0);
2738 if (pDCTstat->DIMMValidDCT[1] && !pDCTstat->GangedMode ) {
2739 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[1];
2740 Set_OtherTiming(pMCTstat, pDCTstat, 1);
2742 } /* Node is present*/
2747 static void Set_OtherTiming(struct MCTStatStruc *pMCTstat,
2748 struct DCTStatStruc *pDCTstat, u8 dct)
2751 u32 reg_off = 0x100 * dct;
2754 u32 dev = pDCTstat->dev_dct;
2756 Get_Trdrd(pMCTstat, pDCTstat, dct);
2757 Get_Twrwr(pMCTstat, pDCTstat, dct);
2758 Get_Twrrd(pMCTstat, pDCTstat, dct);
2759 Get_TrwtTO(pMCTstat, pDCTstat, dct);
2760 Get_TrwtWB(pMCTstat, pDCTstat);
2762 reg = 0x8C + reg_off; /* Dram Timing Hi */
2763 val = Get_NB32(dev, reg);
2765 dword = pDCTstat->TrwtTO; //0x07
2767 dword = pDCTstat->Twrrd; //0x03
2769 dword = pDCTstat->Twrwr; //0x03
2771 dword = pDCTstat->Trdrd; //0x03
2773 dword = pDCTstat->TrwtWB; //0x07
2775 val = OtherTiming_A_D(pDCTstat, val);
2776 Set_NB32(dev, reg, val);
2781 static void Get_Trdrd(struct MCTStatStruc *pMCTstat,
2782 struct DCTStatStruc *pDCTstat, u8 dct)
2788 u32 index_reg = 0x98 + 0x100 * dct;
2789 u32 dev = pDCTstat->dev_dct;
2791 if ((pDCTstat->Dimmx4Present != 0) && (pDCTstat->Dimmx8Present != 0)) {
2792 /* mixed (x4 or x8) DIMM types
2793 the largest DqsRcvEnGrossDelay of any DIMM minus the DqsRcvEnGrossDelay
2794 of any other DIMM is equal to the Critical Gross Delay Difference (CGDD) for Trdrd.*/
2795 byte = Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
2803 Trdrd with non-mixed DIMM types
2804 RdDqsTime are the same for all DIMMs and DqsRcvEn difference between
2805 any two DIMMs is less than half of a MEMCLK, BIOS should program Trdrd to 0000b,
2806 else BIOS should program Trdrd to 0001b.
2808 RdDqsTime are the same for all DIMMs
2809 DDR400~DDR667 only use one set register
2810 DDR800 have two set register for DIMM0 and DIMM1 */
2812 if (pDCTstat->Speed > 3) {
2813 /* DIMM0+DIMM1 exist */ //NOTE it should be 5
2814 val = bsf(pDCTstat->DIMMValid);
2815 dword = bsr(pDCTstat->DIMMValid);
2816 if (dword != val && dword != 0) {
2817 /* DCT Read DQS Timing Control - DIMM0 - Low */
2818 dword = Get_NB32_index_wait(dev, index_reg, 0x05);
2819 /* DCT Read DQS Timing Control - DIMM1 - Low */
2820 val = Get_NB32_index_wait(dev, index_reg, 0x105);
2824 /* DCT Read DQS Timing Control - DIMM0 - High */
2825 dword = Get_NB32_index_wait(dev, index_reg, 0x06);
2826 /* DCT Read DQS Timing Control - DIMM1 - High */
2827 val = Get_NB32_index_wait(dev, index_reg, 0x106);
2833 /* DqsRcvEn difference between any two DIMMs is
2834 less than half of a MEMCLK */
2835 /* DqsRcvEn byte 1,0*/
2836 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x10))
2838 /* DqsRcvEn byte 3,2*/
2839 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x11))
2841 /* DqsRcvEn byte 5,4*/
2842 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x20))
2844 /* DqsRcvEn byte 7,6*/
2845 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x21))
2848 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x12))
2854 pDCTstat->Trdrd = Trdrd;
2859 static void Get_Twrwr(struct MCTStatStruc *pMCTstat,
2860 struct DCTStatStruc *pDCTstat, u8 dct)
2863 u32 index_reg = 0x98 + 0x100 * dct;
2864 u32 dev = pDCTstat->dev_dct;
2868 /* WrDatGrossDlyByte only use one set register when DDR400~DDR667
2869 DDR800 have two set register for DIMM0 and DIMM1 */
2870 if (pDCTstat->Speed > 3) {
2871 val = bsf(pDCTstat->DIMMValid);
2872 dword = bsr(pDCTstat->DIMMValid);
2873 if (dword != val && dword != 0) {
2874 /*the largest WrDatGrossDlyByte of any DIMM minus the
2875 WrDatGrossDlyByte of any other DIMM is equal to CGDD */
2876 val = Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
2883 pDCTstat->Twrwr = Twrwr;
2887 static void Get_Twrrd(struct MCTStatStruc *pMCTstat,
2888 struct DCTStatStruc *pDCTstat, u8 dct)
2890 u8 byte, bytex, val;
2891 u32 index_reg = 0x98 + 0x100 * dct;
2892 u32 dev = pDCTstat->dev_dct;
2894 /* On any given byte lane, the largest WrDatGrossDlyByte delay of
2895 any DIMM minus the DqsRcvEnGrossDelay delay of any other DIMM is
2896 equal to the Critical Gross Delay Difference (CGDD) for Twrrd.*/
2898 /* WrDatGrossDlyByte only use one set register when DDR400~DDR667
2899 DDR800 have two set register for DIMM0 and DIMM1 */
2900 if (pDCTstat->Speed > 3) {
2901 val = Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
2903 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 1); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM0 */
2904 pDCTstat->WrDatGrossH = (u8) val; /* low byte = max value */
2907 Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
2909 bytex = pDCTstat->DqsRcvEnGrossL;
2910 byte = pDCTstat->WrDatGrossH;
2920 pDCTstat->Twrrd = bytex;
2924 static void Get_TrwtTO(struct MCTStatStruc *pMCTstat,
2925 struct DCTStatStruc *pDCTstat, u8 dct)
2928 u32 index_reg = 0x98 + 0x100 * dct;
2929 u32 dev = pDCTstat->dev_dct;
2931 /* On any given byte lane, the largest WrDatGrossDlyByte delay of
2932 any DIMM minus the DqsRcvEnGrossDelay delay of any other DIMM is
2933 equal to the Critical Gross Delay Difference (CGDD) for TrwtTO. */
2934 Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
2935 Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
2936 bytex = pDCTstat->DqsRcvEnGrossL;
2937 byte = pDCTstat->WrDatGrossH;
2940 if ((bytex == 1) || (bytex == 2))
2946 if ((byte == 0) || (byte == 1))
2952 pDCTstat->TrwtTO = bytex;
2956 static void Get_TrwtWB(struct MCTStatStruc *pMCTstat,
2957 struct DCTStatStruc *pDCTstat)
2959 /* TrwtWB ensures read-to-write data-bus turnaround.
2960 This value should be one more than the programmed TrwtTO.*/
2961 pDCTstat->TrwtWB = pDCTstat->TrwtTO + 1;
2965 static u8 Check_DqsRcvEn_Diff(struct DCTStatStruc *pDCTstat,
2966 u8 dct, u32 dev, u32 index_reg,
2969 u8 Smallest_0, Largest_0, Smallest_1, Largest_1;
2983 for (i=0; i < 8; i+=2) {
2984 if ( pDCTstat->DIMMValid & (1 << i)) {
2985 val = Get_NB32_index_wait(dev, index_reg, index);
2987 if (byte < Smallest_0)
2989 if (byte > Largest_0)
2992 byte = (val >> 16) & 0xFF;
2993 if (byte < Smallest_1)
2995 if (byte > Largest_1)
3002 /* check if total DqsRcvEn delay difference between any
3003 two DIMMs is less than half of a MEMCLK */
3004 if ((Largest_0 - Smallest_0) > 31)
3007 if ((Largest_1 - Smallest_1) > 31)
3013 static u8 Get_DqsRcvEnGross_Diff(struct DCTStatStruc *pDCTstat,
3014 u32 dev, u32 index_reg)
3016 u8 Smallest, Largest;
3020 /* The largest DqsRcvEnGrossDelay of any DIMM minus the
3021 DqsRcvEnGrossDelay of any other DIMM is equal to the Critical
3022 Gross Delay Difference (CGDD) */
3023 /* DqsRcvEn byte 1,0 */
3024 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x10);
3025 Largest = val & 0xFF;
3026 Smallest = (val >> 8) & 0xFF;
3028 /* DqsRcvEn byte 3,2 */
3029 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x11);
3031 bytex = (val >> 8) & 0xFF;
3032 if (bytex < Smallest)
3037 /* DqsRcvEn byte 5,4 */
3038 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x20);
3040 bytex = (val >> 8) & 0xFF;
3041 if (bytex < Smallest)
3046 /* DqsRcvEn byte 7,6 */
3047 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x21);
3049 bytex = (val >> 8) & 0xFF;
3050 if (bytex < Smallest)
3055 if (pDCTstat->DimmECCPresent> 0) {
3057 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x12);
3059 bytex = (val >> 8) & 0xFF;
3060 if (bytex < Smallest)
3066 pDCTstat->DqsRcvEnGrossL = Largest;
3067 return Largest - Smallest;
3071 static u8 Get_WrDatGross_Diff(struct DCTStatStruc *pDCTstat,
3072 u8 dct, u32 dev, u32 index_reg)
3074 u8 Smallest, Largest;
3078 /* The largest WrDatGrossDlyByte of any DIMM minus the
3079 WrDatGrossDlyByte of any other DIMM is equal to CGDD */
3080 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x01); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM0 */
3081 Largest = val & 0xFF;
3082 Smallest = (val >> 8) & 0xFF;
3083 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x101); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM1 */
3085 bytex = (val >> 8) & 0xFF;
3086 if (bytex < Smallest)
3091 // FIXME: Add Cx support.
3093 pDCTstat->WrDatGrossH = Largest;
3094 return Largest - Smallest;
3097 static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
3098 u32 dev, u32 index_reg,
3101 u8 Smallest, Largest;
3114 for (i=0; i < 8; i+=2) {
3115 if ( pDCTstat->DIMMValid & (1 << i)) {
3116 val = Get_NB32_index_wait(dev, index_reg, index);
3118 byte = (val >> 5) & 0xFF;
3119 if (byte < Smallest)
3124 byte = (val >> (16 + 5)) & 0xFF;
3125 if (byte < Smallest)
3141 static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat,
3142 u8 dct, u32 dev, u32 index_reg,
3145 u8 Smallest, Largest;
3153 for (i=0; i < 2; i++) {
3154 val = Get_NB32_index_wait(dev, index_reg, index);
3157 for (j=0; j < 4; j++) {
3159 if (byte < Smallest)
3168 if (pDCTstat->DimmECCPresent > 0) {
3170 val = Get_NB32_index_wait(dev, index_reg, index);
3174 if (byte < Smallest)
3189 static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
3190 struct DCTStatStruc *pDCTstat)
3192 print_t("\tmct_FinalMCT_D: Clr Cl, Wb\n");
3195 /* ClrClToNB_D postponed until we're done executing from ROM */
3196 mct_ClrWbEnhWsbDis_D(pMCTstat, pDCTstat);
3200 static void mct_InitialMCT_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat)
3202 print_t("\tmct_InitialMCT_D: Set Cl, Wb\n");
3203 mct_SetClToNB_D(pMCTstat, pDCTstat);
3204 mct_SetWbEnhWsbDis_D(pMCTstat, pDCTstat);
3208 static u32 mct_NodePresent_D(void)
3216 static void mct_init(struct MCTStatStruc *pMCTstat,
3217 struct DCTStatStruc *pDCTstat)
3222 pDCTstat->GangedMode = 0;
3223 pDCTstat->DRPresent = 1;
3225 /* enable extend PCI configuration access */
3227 _RDMSR(addr, &lo, &hi);
3228 if (hi & (1 << (46-32))) {
3229 pDCTstat->Status |= 1 << SB_ExtConfig;
3232 _WRMSR(addr, lo, hi);
3237 static void clear_legacy_Mode(struct MCTStatStruc *pMCTstat,
3238 struct DCTStatStruc *pDCTstat)
3242 u32 dev = pDCTstat->dev_dct;
3244 /* Clear Legacy BIOS Mode bit */
3246 val = Get_NB32(dev, reg);
3247 val &= ~(1<<LegacyBiosMode);
3248 Set_NB32(dev, reg, val);
3252 static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
3253 struct DCTStatStruc *pDCTstatA)
3256 u32 Drambase, Dramlimit;
3262 struct DCTStatStruc *pDCTstat;
3264 pDCTstat = pDCTstatA + 0;
3265 dev = pDCTstat->dev_map;
3267 /* Copy dram map from F1x40/44,F1x48/4c,
3268 to F1x120/124(Node0),F1x120/124(Node1),...*/
3269 for (Node=0; Node < MAX_NODES_SUPPORTED; Node++) {
3270 pDCTstat = pDCTstatA + Node;
3271 devx = pDCTstat->dev_map;
3273 /* get base/limit from Node0 */
3274 reg = 0x40 + (Node << 3); /* Node0/Dram Base 0 */
3275 val = Get_NB32(dev, reg);
3276 Drambase = val >> ( 16 + 3);
3278 reg = 0x44 + (Node << 3); /* Node0/Dram Base 0 */
3279 val = Get_NB32(dev, reg);
3280 Dramlimit = val >> (16 + 3);
3282 /* set base/limit to F1x120/124 per Node */
3283 if (pDCTstat->NodePresent) {
3284 reg = 0x120; /* F1x120,DramBase[47:27] */
3285 val = Get_NB32(devx, reg);
3288 Set_NB32(devx, reg, val);
3291 val = Get_NB32(devx, reg);
3294 Set_NB32(devx, reg, val);
3296 if ( pMCTstat->GStatus & ( 1 << GSB_HWHole)) {
3298 val = Get_NB32(devx, reg);
3299 val |= (1 << DramMemHoistValid);
3300 val &= ~(0xFF << 24);
3301 dword = (pMCTstat->HoleBase >> (24 - 8)) & 0xFF;
3304 Set_NB32(devx, reg, val);
3311 static void SetCSTriState(struct MCTStatStruc *pMCTstat,
3312 struct DCTStatStruc *pDCTstat, u8 dct)
3315 u32 dev = pDCTstat->dev_dct;
3316 u32 index_reg = 0x98 + 0x100 * dct;
3321 /* Tri-state unused chipselects when motherboard
3322 termination is available */
3324 // FIXME: skip for Ax
3326 word = pDCTstat->CSPresent;
3327 if (pDCTstat->Status & (1 << SB_Registered)) {
3328 for (cs = 0; cs < 8; cs++) {
3329 if (word & (1 << cs)) {
3331 word |= 1 << (cs + 1);
3335 word = (~word) & 0xFF;
3337 val = Get_NB32_index_wait(dev, index_reg, index);
3339 Set_NB32_index_wait(dev, index_reg, index, val);
3344 static void SetCKETriState(struct MCTStatStruc *pMCTstat,
3345 struct DCTStatStruc *pDCTstat, u8 dct)
3349 u32 index_reg = 0x98 + 0x100 * dct;
3354 /* Tri-state unused CKEs when motherboard termination is available */
3356 // FIXME: skip for Ax
3358 dev = pDCTstat->dev_dct;
3360 for (cs = 0; cs < 8; cs++) {
3361 if (pDCTstat->CSPresent & (1 << cs)) {
3370 val = Get_NB32_index_wait(dev, index_reg, index);
3371 if ((word & 0x00FF) == 1)
3376 if ((word >> 8) == 1)
3381 Set_NB32_index_wait(dev, index_reg, index, val);
3385 static void SetODTTriState(struct MCTStatStruc *pMCTstat,
3386 struct DCTStatStruc *pDCTstat, u8 dct)
3390 u32 index_reg = 0x98 + 0x100 * dct;
3396 // FIXME: skip for Ax
3398 dev = pDCTstat->dev_dct;
3400 /* Tri-state unused ODTs when motherboard termination is available */
3401 max_dimms = (u8) mctGet_NVbits(NV_MAX_DIMMS);
3402 odt = 0x0F; /* tristate all the pins then clear the used ones. */
3404 for (cs = 0; cs < 8; cs += 2) {
3405 if (pDCTstat->CSPresent & (1 << cs)) {
3406 odt &= ~(1 << (cs / 2));
3408 /* if quad-rank capable platform clear adtitional pins */
3409 if (max_dimms != MAX_CS_SUPPORTED) {
3410 if (pDCTstat->CSPresent & (1 << (cs + 1)))
3411 odt &= ~(4 << (cs / 2));
3417 val = Get_NB32_index_wait(dev, index_reg, index);
3419 Set_NB32_index_wait(dev, index_reg, index, val);
3424 static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
3425 struct DCTStatStruc *pDCTstat, u8 dct)
3428 u32 index_reg = 0x98 + 0x100 * dct;
3429 u32 dev = pDCTstat->dev_dct;
3435 val = Get_NB32_index_wait(dev, index_reg, 0x00);
3437 for (i=0; i < 6; i++) {
3441 p = Table_Comp_Rise_Slew_15x;
3442 valx = p[(val >> 16) & 3];
3446 p = Table_Comp_Fall_Slew_15x;
3447 valx = p[(val >> 16) & 3];
3450 p = Table_Comp_Rise_Slew_20x;
3451 valx = p[(val >> 8) & 3];
3454 p = Table_Comp_Fall_Slew_20x;
3455 valx = p[(val >> 8) & 3];
3459 dword |= valx << (5 * i);
3462 /* Override/Exception */
3463 if (!pDCTstat->GangedMode) {
3464 i = 0; /* use i for the dct setting required */
3465 if (pDCTstat->MAdimms[0] < 4)
3467 if (((pDCTstat->Speed == 2) || (pDCTstat->Speed == 3)) && (pDCTstat->MAdimms[i] == 4)) {
3468 dword &= 0xF18FFF18;
3469 index_reg = 0x98; /* force dct = 0 */
3473 Set_NB32_index_wait(dev, index_reg, 0x0a, dword);
3477 static void WaitRoutine_D(u32 time)
3486 static void mct_EarlyArbEn_D(struct MCTStatStruc *pMCTstat,
3487 struct DCTStatStruc *pDCTstat)
3491 u32 dev = pDCTstat->dev_dct;
3493 /* GhEnhancement #18429 modified by askar: For low NB CLK :
3494 * Memclk ratio, the DCT may need to arbitrate early to avoid
3495 * unnecessary bubbles.
3496 * bit 19 of F2x[1,0]78 Dram Control Register, set this bit only when
3497 * NB CLK : Memclk ratio is between 3:1 (inclusive) to 4:5 (inclusive)
3501 val = Get_NB32(dev, reg);
3503 //FIXME: check for Cx
3504 if (CheckNBCOFEarlyArbEn(pMCTstat, pDCTstat))
3505 val |= (1 << EarlyArbEn);
3507 Set_NB32(dev, reg, val);
3512 static u8 CheckNBCOFEarlyArbEn(struct MCTStatStruc *pMCTstat,
3513 struct DCTStatStruc *pDCTstat)
3519 u32 dev = pDCTstat->dev_dct;
3523 /* Check if NB COF >= 4*Memclk, if it is not, return a fatal error
3526 /* 3*(Fn2xD4[NBFid]+4)/(2^NbDid)/(3+Fn2x94[MemClkFreq]) */
3527 _RDMSR(0xC0010071, &lo, &hi);
3533 val = Get_NB32(dev, reg);
3534 if (!(val & (1 << MemClkFreqVal)))
3535 val = Get_NB32(dev, reg * 0x100); /* get the DCT1 value */
3543 dev = pDCTstat->dev_nbmisc;
3545 val = Get_NB32(dev, reg);
3553 // Yes this could be nicer but this was how the asm was....
3554 if (val < 3) { /* NClk:MemClk < 3:1 */
3556 } else if (val > 4) { /* NClk:MemClk >= 5:1 */
3558 } else if ((val == 4) && (rem > tmp)) { /* NClk:MemClk > 4.5:1 */
3561 return 1; /* 3:1 <= NClk:MemClk <= 4.5:1*/
3566 static void mct_ResetDataStruct_D(struct MCTStatStruc *pMCTstat,
3567 struct DCTStatStruc *pDCTstatA)
3571 struct DCTStatStruc *pDCTstat;
3574 u16 host_serv1, host_serv2;
3576 /* Initialize Data structures by clearing all entries to 0 */
3577 p = (u8 *) pMCTstat;
3578 for (i = 0; i < sizeof(struct MCTStatStruc); i++) {
3582 for (Node = 0; Node < 8; Node++) {
3583 pDCTstat = pDCTstatA + Node;
3584 host_serv1 = pDCTstat->HostBiosSrvc1;
3585 host_serv2 = pDCTstat->HostBiosSrvc2;
3587 p = (u8 *) pDCTstat;
3589 stop = (u32)(&((struct DCTStatStruc *)0)->CH_MaxRdLat[2]);
3590 for (i = start; i < stop ; i++) {
3594 start = (u32)(&((struct DCTStatStruc *)0)->CH_D_BC_RCVRDLY[2][4]);
3595 stop = sizeof(struct DCTStatStruc);
3596 for (i = start; i < stop; i++) {
3599 pDCTstat->HostBiosSrvc1 = host_serv1;
3600 pDCTstat->HostBiosSrvc2 = host_serv2;
3605 static void mct_BeforeDramInit_Prod_D(struct MCTStatStruc *pMCTstat,
3606 struct DCTStatStruc *pDCTstat)
3610 u32 dev = pDCTstat->dev_dct;
3612 // FIXME: skip for Ax
3613 if ((pDCTstat->Speed == 3) || ( pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533Mhz
3614 for (i=0; i < 2; i++) {
3615 reg_off = 0x100 * i;
3616 Set_NB32(dev, 0x98 + reg_off, 0x0D000030);
3617 Set_NB32(dev, 0x9C + reg_off, 0x00000806);
3618 Set_NB32(dev, 0x98 + reg_off, 0x4D040F30);
3624 static void mct_AdjustDelayRange_D(struct MCTStatStruc *pMCTstat,
3625 struct DCTStatStruc *pDCTstat, u8 *dqs_pos)
3627 // FIXME: Skip for Ax
3628 if ((pDCTstat->Speed == 3) || ( pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533Mhz
3634 void mct_SetClToNB_D(struct MCTStatStruc *pMCTstat,
3635 struct DCTStatStruc *pDCTstat)
3640 // FIXME: Maybe check the CPUID? - not for now.
3641 // pDCTstat->LogicalCPUID;
3644 _RDMSR(msr, &lo, &hi);
3645 lo |= 1 << ClLinesToNbDis;
3646 _WRMSR(msr, lo, hi);
3650 void mct_ClrClToNB_D(struct MCTStatStruc *pMCTstat,
3651 struct DCTStatStruc *pDCTstat)
3657 // FIXME: Maybe check the CPUID? - not for now.
3658 // pDCTstat->LogicalCPUID;
3661 _RDMSR(msr, &lo, &hi);
3662 if (!pDCTstat->ClToNB_flag)
3663 lo &= ~(1<<ClLinesToNbDis);
3664 _WRMSR(msr, lo, hi);
3669 void mct_SetWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
3670 struct DCTStatStruc *pDCTstat)
3675 // FIXME: Maybe check the CPUID? - not for now.
3676 // pDCTstat->LogicalCPUID;
3679 _RDMSR(msr, &lo, &hi);
3680 hi |= (1 << WbEnhWsbDis_D);
3681 _WRMSR(msr, lo, hi);
3685 void mct_ClrWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
3686 struct DCTStatStruc *pDCTstat)
3691 // FIXME: Maybe check the CPUID? - not for now.
3692 // pDCTstat->LogicalCPUID;
3695 _RDMSR(msr, &lo, &hi);
3696 hi &= ~(1 << WbEnhWsbDis_D);
3697 _WRMSR(msr, lo, hi);
3701 void mct_SetDramConfigHi_D(struct DCTStatStruc *pDCTstat, u32 dct,
3704 /* Bug#15114: Comp. update interrupted by Freq. change can cause
3705 * subsequent update to be invalid during any MemClk frequency change:
3706 * Solution: From the bug report:
3707 * 1. A software-initiated frequency change should be wrapped into the
3708 * following sequence :
3709 * - a) Disable Compensation (F2[1, 0]9C_x08[30] )
3710 * b) Reset the Begin Compensation bit (D3CMP->COMP_CONFIG[0]) in all the compensation engines
3711 * c) Do frequency change
3712 * d) Enable Compensation (F2[1, 0]9C_x08[30] )
3713 * 2. A software-initiated Disable Compensation should always be
3714 * followed by step b) of the above steps.
3715 * Silicon Status: Fixed In Rev B0
3717 * Errata#177: DRAM Phy Automatic Compensation Updates May Be Invalid
3718 * Solution: BIOS should disable the phy automatic compensation prior
3719 * to initiating a memory clock frequency change as follows:
3720 * 1. Disable PhyAutoComp by writing 1'b1 to F2x[1, 0]9C_x08[30]
3721 * 2. Reset the Begin Compensation bits by writing 32'h0 to
3722 * F2x[1, 0]9C_x4D004F00
3723 * 3. Perform frequency change
3724 * 4. Enable PhyAutoComp by writing 1'b0 to F2x[1, 0]9C_08[30]
3725 * In addition, any time software disables the automatic phy
3726 * compensation it should reset the begin compensation bit per step 2.
3727 * Silicon Status: Fixed in DR-B0
3730 u32 dev = pDCTstat->dev_dct;
3731 u32 index_reg = 0x98 + 0x100 * dct;
3737 val = Get_NB32_index_wait(dev, index_reg, index);
3738 Set_NB32_index_wait(dev, index_reg, index, val | (1 << DisAutoComp));
3740 //FIXME: check for Bx Cx CPU
3741 // if Ax mct_SetDramConfigHi_Samp_D
3744 index = 0x4D014F00; /* F2x[1, 0]9C_x[D0FFFFF:D000000] DRAM Phy Debug Registers */
3745 index |= 1 << DctAccessWrite;
3747 Set_NB32_index_wait(dev, index_reg, index, val);
3749 Set_NB32(dev, 0x94 + 0x100 * dct, DramConfigHi);
3752 val = Get_NB32_index_wait(dev, index_reg, index);
3753 Set_NB32_index_wait(dev, index_reg, index, val & (~(1 << DisAutoComp)));
3756 static void mct_BeforeDQSTrain_D(struct MCTStatStruc *pMCTstat,
3757 struct DCTStatStruc *pDCTstatA)
3760 struct DCTStatStruc *pDCTstat;
3764 * Bug#15115: Uncertainty In The Sync Chain Leads To Setup Violations
3766 * Solution: BIOS should program DRAM Control Register[RdPtrInit] =
3767 * 5h, (F2x[1, 0]78[3:0] = 5h).
3768 * Silicon Status: Fixed In Rev B0
3770 * Bug#15880: Determine validity of reset settings for DDR PHY timing.
3771 * Solutiuon: At least, set WrDqs fine delay to be 0 for DDR2 training.
3774 for (Node = 0; Node < 8; Node++) {
3775 pDCTstat = pDCTstatA + Node;
3777 if (pDCTstat->NodePresent)
3778 mct_BeforeDQSTrain_Samp_D(pMCTstat, pDCTstat);
3779 mct_ResetDLL_D(pMCTstat, pDCTstat, 0);
3780 mct_ResetDLL_D(pMCTstat, pDCTstat, 1);
3785 static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat,
3786 struct DCTStatStruc *pDCTstat, u8 dct)
3789 u32 dev = pDCTstat->dev_dct;
3790 u32 reg_off = 0x100 * dct;
3796 /* Skip reset DLL for B3 */
3797 if (pDCTstat->LogicalCPUID & AMD_DR_B3) {
3802 _RDMSR(addr, &lo, &hi);
3803 if(lo & (1<<17)) { /* save the old value */
3806 lo |= (1<<17); /* HWCR.wrap32dis */
3807 lo &= ~(1<<15); /* SSEDIS */
3808 /* Setting wrap32dis allows 64-bit memory references in 32bit mode */
3809 _WRMSR(addr, lo, hi);
3812 pDCTstat->Channel = dct;
3813 Receiver = mct_InitReceiver_D(pDCTstat, dct);
3814 /* there are four receiver pairs, loosely associated with chipselects.*/
3815 for (; Receiver < 8; Receiver += 2) {
3816 if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, dct, Receiver)) {
3817 addr = mct_GetRcvrSysAddr_D(pMCTstat, pDCTstat, dct, Receiver, &valid);
3819 mct_Read1LTestPattern_D(pMCTstat, pDCTstat, addr); /* cache fills */
3821 /* Write 0000_8000h to register F2x[1,0]9C_xD080F0C */
3822 Set_NB32_index_wait(dev, 0x98 + reg_off, 0x4D080F0C, 0x00008000);
3823 mct_Wait(80); /* wait >= 300ns */
3825 /* Write 0000_0000h to register F2x[1,0]9C_xD080F0C */
3826 Set_NB32_index_wait(dev, 0x98 + reg_off, 0x4D080F0C, 0x00000000);
3827 mct_Wait(800); /* wait >= 2us */
3834 _RDMSR(addr, &lo, &hi);
3835 lo &= ~(1<<17); /* restore HWCR.wrap32dis */
3836 _WRMSR(addr, lo, hi);
3841 static void mct_EnableDatIntlv_D(struct MCTStatStruc *pMCTstat,
3842 struct DCTStatStruc *pDCTstat)
3844 u32 dev = pDCTstat->dev_dct;
3847 /* Enable F2x110[DctDatIntlv] */
3848 // Call back not required mctHookBeforeDatIntlv_D()
3849 // FIXME Skip for Ax
3850 if (!pDCTstat->GangedMode) {
3851 val = Get_NB32(dev, 0x110);
3852 val |= 1 << 5; // DctDatIntlv
3853 Set_NB32(dev, 0x110, val);
3855 // FIXME Skip for Cx
3856 dev = pDCTstat->dev_nbmisc;
3857 val = Get_NB32(dev, 0x8C); // NB Configuration Hi
3858 val |= 1 << (36-32); // DisDatMask
3859 Set_NB32(dev, 0x8C, val);
3864 static void mct_SetupSync_D(struct MCTStatStruc *pMCTstat,
3865 struct DCTStatStruc *pDCTstat)
3867 /* set F2x78[ChSetupSync] when F2x[1, 0]9C_x04[AddrCmdSetup, CsOdtSetup,
3868 * CkeSetup] setups for one DCT are all 0s and at least one of the setups,
3869 * F2x[1, 0]9C_x04[AddrCmdSetup, CsOdtSetup, CkeSetup], of the other
3873 u32 dev = pDCTstat->dev_dct;
3876 cha = pDCTstat->CH_ADDR_TMG[0] & 0x0202020;
3877 chb = pDCTstat->CH_ADDR_TMG[1] & 0x0202020;
3879 if ((cha != chb) && ((cha == 0) || (chb == 0))) {
3880 val = Get_NB32(dev, 0x78);
3882 Set_NB32(dev, 0x78, val);
3887 static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct) {
3890 u32 reg_off = 0x100 * dct;
3891 u32 dev = pDCTstat->dev_dct;
3893 if (pDCTstat->LogicalCPUID & (AMD_DR_B2 | AMD_DR_B3)) {
3894 mct_Wait(10000); /* Wait 50 us*/
3895 val = Get_NB32(dev, 0x110);
3896 if ( val & (1 << DramEnabled)) {
3897 /* If 50 us expires while DramEnable =0 then do the following */
3898 val = Get_NB32(dev, 0x90 + reg_off);
3899 val &= ~(1 << Width128); /* Program Width128 = 0 */
3900 Set_NB32(dev, 0x90 + reg_off, val);
3902 val = Get_NB32_index_wait(dev, 0x98 + reg_off, 0x05); /* Perform dummy CSR read to F2x09C_x05 */
3904 if (pDCTstat->GangedMode) {
3905 val = Get_NB32(dev, 0x90 + reg_off);
3906 val |= 1 << Width128; /* Program Width128 = 0 */
3907 Set_NB32(dev, 0x90 + reg_off, val);
3914 /* ==========================================================
3915 * 6-bit Bank Addressing Table
3918 * CCC=Columns-9 binary
3919 * ==========================================================
3920 * DCT CCCBRR Rows Banks Columns 64-bit CS Size
3922 * 0000 000000 13 2 9 128MB
3923 * 0001 001000 13 2 10 256MB
3924 * 0010 001001 14 2 10 512MB
3925 * 0011 010000 13 2 11 512MB
3926 * 0100 001100 13 3 10 512MB
3927 * 0101 001101 14 3 10 1GB
3928 * 0110 010001 14 2 11 1GB
3929 * 0111 001110 15 3 10 2GB
3930 * 1000 010101 14 3 11 2GB
3931 * 1001 010110 15 3 11 4GB
3932 * 1010 001111 16 3 10 4GB
3933 * 1011 010111 16 3 11 8GB