2 * This file is part of the coreboot project.
4 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 /* Description: Main memory controller system configuration for DDR 2 */
23 /* KNOWN ISSUES - ERRATA
25 * Trtp is not calculated correctly when the controller is in 64-bit mode, it
26 * is 1 busclock off. No fix planned. The controller is not ordinarily in
29 * 32 Byte burst not supported. No fix planned. The controller is not
30 * ordinarily in 64-bit mode.
32 * Trc precision does not use extra Jedec defined fractional component.
33 * Instead Trc (course) is rounded up to nearest 1 ns.
35 * Mini and Micro DIMM not supported. Only RDIMM, UDIMM, SO-DIMM defined types
39 static u8 ReconfigureDIMMspare_D(struct MCTStatStruc *pMCTstat,
40 struct DCTStatStruc *pDCTstatA);
41 static void DQSTiming_D(struct MCTStatStruc *pMCTstat,
42 struct DCTStatStruc *pDCTstatA);
43 static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
44 struct DCTStatStruc *pDCTstatA);
45 static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
46 struct DCTStatStruc *pDCTstatA);
47 static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
48 struct DCTStatStruc *pDCTstatA);
49 static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
50 struct DCTStatStruc *pDCTstat);
51 static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
52 struct DCTStatStruc *pDCTstat);
53 static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
54 struct DCTStatStruc *pDCTstatA);
55 static u8 NodePresent_D(u8 Node);
56 static void SyncDCTsReady_D(struct MCTStatStruc *pMCTstat,
57 struct DCTStatStruc *pDCTstatA);
58 static void StartupDCT_D(struct MCTStatStruc *pMCTstat,
59 struct DCTStatStruc *pDCTstat, u8 dct);
60 static void ClearDCT_D(struct MCTStatStruc *pMCTstat,
61 struct DCTStatStruc *pDCTstat, u8 dct);
62 static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
63 struct DCTStatStruc *pDCTstat, u8 dct);
64 static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
65 struct DCTStatStruc *pDCTstat);
66 static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
67 struct DCTStatStruc *pDCTstat, u8 dct);
68 static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
69 struct DCTStatStruc *pDCTstat, u8 dct);
70 static u8 PlatformSpec_D(struct MCTStatStruc *pMCTstat,
71 struct DCTStatStruc *pDCTstat, u8 dct);
72 static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
73 struct DCTStatStruc *pDCTstat, u8 dct);
74 static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
75 struct DCTStatStruc *pDCTstat, u8 dct);
76 static u8 Get_DefTrc_k_D(u8 k);
77 static u16 Get_40Tk_D(u8 k);
78 static u16 Get_Fk_D(u8 k);
79 static u8 Dimm_Supports_D(struct DCTStatStruc *pDCTstat, u8 i, u8 j, u8 k);
80 static u8 Sys_Capability_D(struct MCTStatStruc *pMCTstat,
81 struct DCTStatStruc *pDCTstat, int j, int k);
82 static u8 Get_DIMMAddress_D(struct DCTStatStruc *pDCTstat, u8 i);
83 static void mct_initDCT(struct MCTStatStruc *pMCTstat,
84 struct DCTStatStruc *pDCTstat);
85 static void mct_DramInit(struct MCTStatStruc *pMCTstat,
86 struct DCTStatStruc *pDCTstat, u8 dct);
87 static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
88 struct DCTStatStruc *pDCTstat, u8 dct);
89 static void mct_SyncDCTsReady(struct DCTStatStruc *pDCTstat);
90 static void Get_Trdrd(struct MCTStatStruc *pMCTstat,
91 struct DCTStatStruc *pDCTstat, u8 dct);
92 static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
93 struct DCTStatStruc *pDCTstat, u8 dct);
94 static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,\
95 struct DCTStatStruc *pDCTstat, u8 dct);
96 static void mct_AfterStitchMemory(struct MCTStatStruc *pMCTstat,
97 struct DCTStatStruc *pDCTstat, u8 dct);
98 static u8 mct_DIMMPresence(struct MCTStatStruc *pMCTstat,
99 struct DCTStatStruc *pDCTstat, u8 dct);
100 static void Set_OtherTiming(struct MCTStatStruc *pMCTstat,
101 struct DCTStatStruc *pDCTstat, u8 dct);
102 static void Get_Twrwr(struct MCTStatStruc *pMCTstat,
103 struct DCTStatStruc *pDCTstat, u8 dct);
104 static void Get_Twrrd(struct MCTStatStruc *pMCTstat,
105 struct DCTStatStruc *pDCTstat, u8 dct);
106 static void Get_TrwtTO(struct MCTStatStruc *pMCTstat,
107 struct DCTStatStruc *pDCTstat, u8 dct);
108 static void Get_TrwtWB(struct MCTStatStruc *pMCTstat,
109 struct DCTStatStruc *pDCTstat);
110 static u8 Check_DqsRcvEn_Diff(struct DCTStatStruc *pDCTstat, u8 dct,
111 u32 dev, u32 index_reg, u32 index);
112 static u8 Get_DqsRcvEnGross_Diff(struct DCTStatStruc *pDCTstat,
113 u32 dev, u32 index_reg);
114 static u8 Get_WrDatGross_Diff(struct DCTStatStruc *pDCTstat, u8 dct,
115 u32 dev, u32 index_reg);
116 static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
117 u32 dev, u32 index_reg, u32 index);
118 static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
119 struct DCTStatStruc *pDCTstat);
120 static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat, u8 dct,
121 u32 dev, u32 index_reg, u32 index);
122 static void mct_InitialMCT_D(struct MCTStatStruc *pMCTstat,
123 struct DCTStatStruc *pDCTstat);
124 static void mct_init(struct MCTStatStruc *pMCTstat,
125 struct DCTStatStruc *pDCTstat);
126 static void clear_legacy_Mode(struct MCTStatStruc *pMCTstat,
127 struct DCTStatStruc *pDCTstat);
128 static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
129 struct DCTStatStruc *pDCTstatA);
130 static void SetCSTriState(struct MCTStatStruc *pMCTstat,
131 struct DCTStatStruc *pDCTstat, u8 dct);
132 static void SetODTTriState(struct MCTStatStruc *pMCTstat,
133 struct DCTStatStruc *pDCTstat, u8 dct);
134 static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
135 struct DCTStatStruc *pDCTstat, u8 dct);
136 static u32 mct_NodePresent_D(void);
137 static void WaitRoutine_D(u32 time);
138 static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
139 struct DCTStatStruc *pDCTstatA);
140 static void mct_ResetDataStruct_D(struct MCTStatStruc *pMCTstat,
141 struct DCTStatStruc *pDCTstatA);
142 static void mct_EarlyArbEn_D(struct MCTStatStruc *pMCTstat,
143 struct DCTStatStruc *pDCTstat);
144 static void mct_BeforeDramInit_Prod_D(struct MCTStatStruc *pMCTstat,
145 struct DCTStatStruc *pDCTstat);
146 void mct_ClrClToNB_D(struct MCTStatStruc *pMCTstat,
147 struct DCTStatStruc *pDCTstat);
148 static u8 CheckNBCOFEarlyArbEn(struct MCTStatStruc *pMCTstat,
149 struct DCTStatStruc *pDCTstat);
150 void mct_ClrWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
151 struct DCTStatStruc *pDCTstat);
152 static void mct_BeforeDQSTrain_D(struct MCTStatStruc *pMCTstat,
153 struct DCTStatStruc *pDCTstatA);
154 static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct);
155 static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat,
156 struct DCTStatStruc *pDCTstat, u8 dct);
157 static u32 mct_DisDllShutdownSR(struct MCTStatStruc *pMCTstat,
158 struct DCTStatStruc *pDCTstat, u32 DramConfigLo, u8 dct);
159 static void mct_EnDllShutdownSR(struct MCTStatStruc *pMCTstat,
160 struct DCTStatStruc *pDCTstat, u8 dct);
162 /*See mctAutoInitMCT header for index relationships to CL and T*/
163 static const u16 Table_F_k[] = {00,200,266,333,400,533 };
164 static const u8 Table_T_k[] = {0x00,0x50,0x3D,0x30,0x25, 0x18 };
165 static const u8 Table_CL2_j[] = {0x04,0x08,0x10,0x20,0x40, 0x80 };
166 static const u8 Tab_defTrc_k[] = {0x0,0x41,0x3C,0x3C,0x3A, 0x3A };
167 static const u16 Tab_40T_k[] = {00,200,150,120,100,75 };
168 static const u8 Tab_TrefT_k[] = {00,0,1,1,2,2,3,4,5,6,0,0};
169 static const u8 Tab_BankAddr[] = {0x0,0x08,0x09,0x10,0x0C,0x0D,0x11,0x0E,0x15,0x16,0x0F,0x17};
170 static const u8 Tab_tCL_j[] = {0,2,3,4,5};
171 static const u8 Tab_1KTfawT_k[] = {00,8,10,13,14,20};
172 static const u8 Tab_2KTfawT_k[] = {00,10,14,17,18,24};
173 static const u8 Tab_L1CLKDis[] = {8,8,6,4,2,0,8,8};
174 static const u8 Tab_M2CLKDis[] = {2,0,8,8,2,0,2,0};
175 static const u8 Tab_S1CLKDis[] = {8,0,8,8,8,0,8,0};
176 static const u8 Table_Comp_Rise_Slew_20x[] = {7, 3, 2, 2, 0xFF};
177 static const u8 Table_Comp_Rise_Slew_15x[] = {7, 7, 3, 2, 0xFF};
178 static const u8 Table_Comp_Fall_Slew_20x[] = {7, 5, 3, 2, 0xFF};
179 static const u8 Table_Comp_Fall_Slew_15x[] = {7, 7, 5, 3, 0xFF};
181 static void mctAutoInitMCT_D(struct MCTStatStruc *pMCTstat,
182 struct DCTStatStruc *pDCTstatA)
185 * Memory may be mapped contiguously all the way up to 4GB (depending
186 * on setup options). It is the responsibility of PCI subsystem to
187 * create an uncacheable IO region below 4GB and to adjust TOP_MEM
188 * downward prior to any IO mapping or accesses. It is the same
189 * responsibility of the CPU sub-system prior to accessing LAPIC.
191 * Slot Number is an external convention, and is determined by OEM with
192 * accompanying silk screening. OEM may choose to use Slot number
193 * convention which is consistent with DIMM number conventions.
194 * All AMD engineering
197 * Run-Time Requirements:
198 * 1. Complete Hypertransport Bus Configuration
199 * 2. SMBus Controller Initialized
200 * 3. Checksummed or Valid NVRAM bits
201 * 4. MCG_CTL=-1, MC4_CTL_EN=0 for all CPUs
202 * 5. MCi_STS from shutdown/warm reset recorded (if desired) prior to
204 * 6. All var MTRRs reset to zero
205 * 7. State of NB_CFG.DisDatMsk set properly on all CPUs
206 * 8. All CPUs at 2Ghz Speed (unless DQS training is not installed).
207 * 9. All cHT links at max Speed/Width (unless DQS training is not
211 * Global relationship between index values and item values:
213 * --------------------------
225 mctInitMemGPIOs_A_D(); /* Set any required GPIOs*/
228 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
229 struct DCTStatStruc *pDCTstat;
230 pDCTstat = pDCTstatA + Node;
231 pDCTstat->Node_ID = Node;
232 pDCTstat->dev_host = PA_HOST(Node);
233 pDCTstat->dev_map = PA_MAP(Node);
234 pDCTstat->dev_dct = PA_DCT(Node);
235 pDCTstat->dev_nbmisc = PA_NBMISC(Node);
236 pDCTstat->NodeSysBase = node_sys_base;
238 print_tx("mctAutoInitMCT_D: mct_init Node ", Node);
239 mct_init(pMCTstat, pDCTstat);
240 mctNodeIDDebugPort_D();
241 pDCTstat->NodePresent = NodePresent_D(Node);
242 if (pDCTstat->NodePresent) { /* See if Node is there*/
243 print_t("mctAutoInitMCT_D: clear_legacy_Mode\n");
244 clear_legacy_Mode(pMCTstat, pDCTstat);
245 pDCTstat->LogicalCPUID = mctGetLogicalCPUID_D(Node);
247 print_t("mctAutoInitMCT_D: mct_InitialMCT_D\n");
248 mct_InitialMCT_D(pMCTstat, pDCTstat);
250 print_t("mctAutoInitMCT_D: mctSMBhub_Init\n");
251 mctSMBhub_Init(Node); /* Switch SMBUS crossbar to proper node*/
253 print_t("mctAutoInitMCT_D: mct_initDCT\n");
254 mct_initDCT(pMCTstat, pDCTstat);
255 if (pDCTstat->ErrCode == SC_FatalErr) {
256 goto fatalexit; /* any fatal errors?*/
257 } else if (pDCTstat->ErrCode < SC_StopError) {
260 } /* if Node present */
261 node_sys_base = pDCTstat->NodeSysBase;
262 node_sys_base += (pDCTstat->NodeSysLimit + 2) & ~0x0F;
264 if (NodesWmem == 0) {
265 print_debug("No Nodes?!\n");
269 print_t("mctAutoInitMCT_D: SyncDCTsReady_D\n");
270 SyncDCTsReady_D(pMCTstat, pDCTstatA); /* Make sure DCTs are ready for accesses.*/
272 print_t("mctAutoInitMCT_D: HTMemMapInit_D\n");
273 HTMemMapInit_D(pMCTstat, pDCTstatA); /* Map local memory into system address space.*/
276 print_t("mctAutoInitMCT_D: CPUMemTyping_D\n");
277 CPUMemTyping_D(pMCTstat, pDCTstatA); /* Map dram into WB/UC CPU cacheability */
278 mctHookAfterCPU(); /* Setup external northbridge(s) */
280 print_t("mctAutoInitMCT_D: DQSTiming_D\n");
281 DQSTiming_D(pMCTstat, pDCTstatA); /* Get Receiver Enable and DQS signal timing*/
283 print_t("mctAutoInitMCT_D: UMAMemTyping_D\n");
284 UMAMemTyping_D(pMCTstat, pDCTstatA); /* Fix up for UMA sizing */
286 print_t("mctAutoInitMCT_D: :OtherTiming\n");
287 mct_OtherTiming(pMCTstat, pDCTstatA);
289 if (ReconfigureDIMMspare_D(pMCTstat, pDCTstatA)) { /* RESET# if 1st pass of DIMM spare enabled*/
293 InterleaveNodes_D(pMCTstat, pDCTstatA);
294 InterleaveChannels_D(pMCTstat, pDCTstatA);
296 print_t("mctAutoInitMCT_D: ECCInit_D\n");
297 if (ECCInit_D(pMCTstat, pDCTstatA)) { /* Setup ECC control and ECC check-bits*/
298 print_t("mctAutoInitMCT_D: MCTMemClr_D\n");
299 MCTMemClr_D(pMCTstat,pDCTstatA);
302 mct_FinalMCT_D(pMCTstat, (pDCTstatA + 0) ); // Node 0
303 print_t("All Done\n");
307 die("mct_d: fatalexit");
311 static u8 ReconfigureDIMMspare_D(struct MCTStatStruc *pMCTstat,
312 struct DCTStatStruc *pDCTstatA)
316 if (mctGet_NVbits(NV_CS_SpareCTL)) {
317 if (MCT_DIMM_SPARE_NO_WARM) {
318 /* Do no warm-reset DIMM spare */
319 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
320 LoadDQSSigTmgRegs_D(pMCTstat, pDCTstatA);
323 mct_ResetDataStruct_D(pMCTstat, pDCTstatA);
324 pMCTstat->GStatus |= 1 << GSB_EnDIMMSpareNW;
328 /* Do warm-reset DIMM spare */
329 if (mctGet_NVbits(NV_DQSTrainCTL))
343 static void DQSTiming_D(struct MCTStatStruc *pMCTstat,
344 struct DCTStatStruc *pDCTstatA)
348 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
351 nv_DQSTrainCTL = mctGet_NVbits(NV_DQSTrainCTL);
352 /* FIXME: BOZO- DQS training every time*/
355 print_t("DQSTiming_D: mct_BeforeDQSTrain_D:\n");
356 mct_BeforeDQSTrain_D(pMCTstat, pDCTstatA);
357 phyAssistedMemFnceTraining(pMCTstat, pDCTstatA);
359 if (nv_DQSTrainCTL) {
360 mctHookBeforeAnyTraining(pMCTstat, pDCTstatA);
362 print_t("DQSTiming_D: TrainReceiverEn_D FirstPass:\n");
363 TrainReceiverEn_D(pMCTstat, pDCTstatA, FirstPass);
365 print_t("DQSTiming_D: mct_TrainDQSPos_D\n");
366 mct_TrainDQSPos_D(pMCTstat, pDCTstatA);
368 // Second Pass never used for Barcelona!
369 //print_t("DQSTiming_D: TrainReceiverEn_D SecondPass:\n");
370 //TrainReceiverEn_D(pMCTstat, pDCTstatA, SecondPass);
372 print_t("DQSTiming_D: mctSetEccDQSRcvrEn_D\n");
373 mctSetEccDQSRcvrEn_D(pMCTstat, pDCTstatA);
375 print_t("DQSTiming_D: TrainMaxReadLatency_D\n");
376 //FIXME - currently uses calculated value TrainMaxReadLatency_D(pMCTstat, pDCTstatA);
377 mctHookAfterAnyTraining();
378 mctSaveDQSSigTmg_D();
380 print_t("DQSTiming_D: mct_EndDQSTraining_D\n");
381 mct_EndDQSTraining_D(pMCTstat, pDCTstatA);
383 print_t("DQSTiming_D: MCTMemClr_D\n");
384 MCTMemClr_D(pMCTstat, pDCTstatA);
386 mctGetDQSSigTmg_D(); /* get values into data structure */
387 LoadDQSSigTmgRegs_D(pMCTstat, pDCTstatA); /* load values into registers.*/
388 //mctDoWarmResetMemClr_D();
389 MCTMemClr_D(pMCTstat, pDCTstatA);
394 static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
395 struct DCTStatStruc *pDCTstatA)
397 u8 Node, Receiver, Channel, Dir, DIMM;
405 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
406 struct DCTStatStruc *pDCTstat;
407 pDCTstat = pDCTstatA + Node;
409 if (pDCTstat->DCTSysLimit) {
410 dev = pDCTstat->dev_dct;
411 for (Channel = 0;Channel < 2; Channel++) {
412 /* there are four receiver pairs,
413 loosely associated with chipselects.*/
414 index_reg = 0x98 + Channel * 0x100;
415 for (Receiver = 0; Receiver < 8; Receiver += 2) {
416 /* Set Receiver Enable Values */
417 mct_SetRcvrEnDly_D(pDCTstat,
419 1, /* FinalValue, From stack */
423 (Receiver >> 1) * 3 + 0x10, /* Addl_Index */
424 2); /* Pass Second Pass ? */
428 for (Channel = 0; Channel<2; Channel++) {
429 SetEccDQSRcvrEn_D(pDCTstat, Channel);
432 for (Channel = 0; Channel < 2; Channel++) {
434 index_reg = 0x98 + Channel * 0x100;
437 * when 400, 533, 667, it will support dimm0/1/2/3,
438 * and set conf for dimm0, hw will copy to dimm1/2/3
439 * set for dimm1, hw will copy to dimm3
440 * Rev A/B only support DIMM0/1 when 800Mhz and above
441 * + 0x100 to next dimm
442 * Rev C support DIMM0/1/2/3 when 800Mhz and above
443 * + 0x100 to next dimm
445 for (DIMM = 0; DIMM < 2; DIMM++) {
447 index = 0; /* CHA Write Data Timing Low */
449 if (pDCTstat->Speed >= 4) {
450 index = 0x100 * DIMM;
455 for (Dir=0;Dir<2;Dir++) {//RD/WR
456 p = pDCTstat->CH_D_DIR_B_DQS[Channel][DIMM][Dir];
457 val = stream_to_int(p); /* CHA Read Data Timing High */
458 Set_NB32_index_wait(dev, index_reg, index+1, val);
459 val = stream_to_int(p+4); /* CHA Write Data Timing High */
460 Set_NB32_index_wait(dev, index_reg, index+2, val);
461 val = *(p+8); /* CHA Write ECC Timing */
462 Set_NB32_index_wait(dev, index_reg, index+3, val);
468 for (Channel = 0; Channel<2; Channel++) {
469 reg = 0x78 + Channel * 0x100;
470 val = Get_NB32(dev, reg);
472 val |= ((u32) pDCTstat->CH_MaxRdLat[Channel] << 22);
473 val &= ~(1<<DqsRcvEnTrain);
474 Set_NB32(dev, reg, val); /* program MaxRdLatency to correspond with current delay*/
481 static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
482 struct DCTStatStruc *pDCTstatA);
483 static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
484 struct DCTStatStruc *pDCTstatA)
486 /* Clear MC4_STS for all Nodes in the system. This is required in some
487 * circumstances to clear left over garbage from cold reset, shutdown,
488 * or normal ECC memory conditioning.
491 //FIXME: this function depends on pDCTstat Array ( with Node id ) - Is this really a problem?
496 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
497 struct DCTStatStruc *pDCTstat;
498 pDCTstat = pDCTstatA + Node;
500 if (pDCTstat->NodePresent) {
501 dev = pDCTstat->dev_nbmisc;
502 /*MCA NB Status Low (alias to MC4_STS[31:0] */
503 Set_NB32(dev, 0x48, 0);
504 /* MCA NB Status High (alias to MC4_STS[63:32] */
505 Set_NB32(dev, 0x4C, 0);
511 static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
512 struct DCTStatStruc *pDCTstatA)
515 u32 NextBase, BottomIO;
516 u8 _MemHoleRemap, DramHoleBase, DramHoleOffset;
517 u32 HoleSize, DramSelBaseAddr;
523 struct DCTStatStruc *pDCTstat;
525 _MemHoleRemap = mctGet_NVbits(NV_MemHole);
527 if (pMCTstat->HoleBase == 0) {
528 DramHoleBase = mctGet_NVbits(NV_BottomIO);
530 DramHoleBase = pMCTstat->HoleBase >> (24-8);
533 BottomIO = DramHoleBase << (24-8);
536 pDCTstat = pDCTstatA + 0;
537 dev = pDCTstat->dev_map;
540 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
541 pDCTstat = pDCTstatA + Node;
542 devx = pDCTstat->dev_map;
544 pDCTstat = pDCTstatA + Node;
545 if (!pDCTstat->GangedMode) {
546 DramSelBaseAddr = pDCTstat->NodeSysLimit - pDCTstat->DCTSysLimit;
547 /*In unganged mode, we must add DCT0 and DCT1 to DCTSysLimit */
548 val = pDCTstat->NodeSysLimit;
549 if ((val & 0xFF) == 0xFE) {
553 pDCTstat->DCTSysLimit = val;
556 base = pDCTstat->DCTSysBase;
557 limit = pDCTstat->DCTSysLimit;
561 DramSelBaseAddr += NextBase;
562 printk(BIOS_DEBUG, " Node: %02x base: %02x limit: %02x BottomIO: %02x\n", Node, base, limit, BottomIO);
565 if ((base < BottomIO) && (limit >= BottomIO)) {
567 pDCTstat->Status |= 1 << SB_HWHole;
568 pMCTstat->GStatus |= 1 << GSB_HWHole;
569 pDCTstat->DCTSysBase = base;
570 pDCTstat->DCTSysLimit = limit;
571 pDCTstat->DCTHoleBase = BottomIO;
572 pMCTstat->HoleBase = BottomIO;
573 HoleSize = _4GB_RJ8 - BottomIO; /* HoleSize[39:8] */
574 if ((DramSelBaseAddr > 0) && (DramSelBaseAddr < BottomIO))
575 base = DramSelBaseAddr;
576 val = ((base + HoleSize) >> (24-8)) & 0xFF;
577 DramHoleOffset = val;
578 val <<= 8; /* shl 16, rol 24 */
579 val |= DramHoleBase << 24;
580 val |= 1 << DramHoleValid;
581 Set_NB32(devx, 0xF0, val); /* Dram Hole Address Reg */
582 pDCTstat->DCTSysLimit += HoleSize;
583 base = pDCTstat->DCTSysBase;
584 limit = pDCTstat->DCTSysLimit;
585 } else if (base == BottomIO) {
587 pMCTstat->GStatus |= 1<<GSB_SpIntRemapHole;
588 pDCTstat->Status |= 1<<SB_SWNodeHole;
589 pMCTstat->GStatus |= 1<<GSB_SoftHole;
590 pMCTstat->HoleBase = base;
594 pDCTstat->DCTSysBase = base;
595 pDCTstat->DCTSysLimit = limit;
597 /* No Remapping. Normal Contiguous mapping */
598 pDCTstat->DCTSysBase = base;
599 pDCTstat->DCTSysLimit = limit;
602 /*No Remapping. Normal Contiguous mapping*/
603 pDCTstat->DCTSysBase = base;
604 pDCTstat->DCTSysLimit = limit;
606 base |= 3; /* set WE,RE fields*/
607 pMCTstat->SysLimit = limit;
609 Set_NB32(dev, 0x40 + (Node << 3), base); /* [Node] + Dram Base 0 */
611 /* if Node limit > 1GB then set it to 1GB boundary for each node */
612 if ((mctSetNodeBoundary_D()) && (limit > 0x00400000)) {
617 val = limit & 0xFFFF0000;
619 Set_NB32(dev, 0x44 + (Node << 3), val); /* set DstNode */
621 limit = pDCTstat->DCTSysLimit;
623 NextBase = (limit & 0xFFFF0000) + 0x10000;
624 if ((mctSetNodeBoundary_D()) && (NextBase > 0x00400000)) {
626 NextBase &= 0xFFC00000;
632 /* Copy dram map from Node 0 to Node 1-7 */
633 for (Node = 1; Node < MAX_NODES_SUPPORTED; Node++) {
635 pDCTstat = pDCTstatA + Node;
636 devx = pDCTstat->dev_map;
638 if (pDCTstat->NodePresent) {
639 printk(BIOS_DEBUG, " Copy dram map from Node 0 to Node %02x \n", Node);
640 reg = 0x40; /*Dram Base 0*/
642 val = Get_NB32(dev, reg);
643 Set_NB32(devx, reg, val);
645 } while ( reg < 0x80);
647 break; /* stop at first absent Node */
651 /*Copy dram map to F1x120/124*/
652 mct_HTMemMapExt(pMCTstat, pDCTstatA);
656 static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
657 struct DCTStatStruc *pDCTstatA)
660 /* Initiates a memory clear operation for all node. The mem clr
661 * is done in parallel. After the memclr is complete, all processors
662 * status are checked to ensure that memclr has completed.
665 struct DCTStatStruc *pDCTstat;
667 if (!mctGet_NVbits(NV_DQSTrainCTL)){
668 // FIXME: callback to wrapper: mctDoWarmResetMemClr_D
669 } else { // NV_DQSTrainCTL == 1
670 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
671 pDCTstat = pDCTstatA + Node;
673 if (pDCTstat->NodePresent) {
674 DCTMemClr_Init_D(pMCTstat, pDCTstat);
677 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
678 pDCTstat = pDCTstatA + Node;
680 if (pDCTstat->NodePresent) {
681 DCTMemClr_Sync_D(pMCTstat, pDCTstat);
688 static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
689 struct DCTStatStruc *pDCTstat)
695 /* Initiates a memory clear operation on one node */
696 if (pDCTstat->DCTSysLimit) {
697 dev = pDCTstat->dev_dct;
701 val = Get_NB32(dev, reg);
702 } while (val & (1 << MemClrBusy));
704 val |= (1 << MemClrInit);
705 Set_NB32(dev, reg, val);
711 static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
712 struct DCTStatStruc *pDCTstatA)
714 /* Ensures that memory clear has completed on all node.*/
716 struct DCTStatStruc *pDCTstat;
718 if (!mctGet_NVbits(NV_DQSTrainCTL)){
719 // callback to wrapper: mctDoWarmResetMemClr_D
720 } else { // NV_DQSTrainCTL == 1
721 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
722 pDCTstat = pDCTstatA + Node;
724 if (pDCTstat->NodePresent) {
725 DCTMemClr_Sync_D(pMCTstat, pDCTstat);
732 static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
733 struct DCTStatStruc *pDCTstat)
736 u32 dev = pDCTstat->dev_dct;
739 /* Ensure that a memory clear operation has completed on one node */
740 if (pDCTstat->DCTSysLimit){
744 val = Get_NB32(dev, reg);
745 } while (val & (1 << MemClrBusy));
748 val = Get_NB32(dev, reg);
749 } while (!(val & (1 << Dr_MemClrStatus)));
752 val = 0x0FE40FC0; // BKDG recommended
753 val |= MCCH_FlushWrOnStpGnt; // Set for S3
754 Set_NB32(dev, 0x11C, val);
758 static u8 NodePresent_D(u8 Node)
761 * Determine if a single Hammer Node exists within the network.
769 dev = PA_HOST(Node); /*test device/vendor id at host bridge */
770 val = Get_NB32(dev, 0);
771 dword = mct_NodePresent_D(); /* FIXME: BOZO -11001022h rev for F */
772 if (val == dword) { /* AMD Hammer Family CPU HT Configuration */
773 if (oemNodePresent_D(Node, &ret))
775 /* Node ID register */
776 val = Get_NB32(dev, 0x60);
779 if (val == dword) /* current nodeID = requested nodeID ? */
789 static void DCTInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 dct)
792 * Initialize DRAM on single Athlon 64/Opteron Node.
798 ClearDCT_D(pMCTstat, pDCTstat, dct);
799 stopDCTflag = 1; /*preload flag with 'disable' */
800 if (mct_DIMMPresence(pMCTstat, pDCTstat, dct) < SC_StopError) {
801 print_t("\t\tDCTInit_D: mct_DIMMPresence Done\n");
802 if (mct_SPDCalcWidth(pMCTstat, pDCTstat, dct) < SC_StopError) {
803 print_t("\t\tDCTInit_D: mct_SPDCalcWidth Done\n");
804 if (AutoCycTiming_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
805 print_t("\t\tDCTInit_D: AutoCycTiming_D Done\n");
806 if (AutoConfig_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
807 print_t("\t\tDCTInit_D: AutoConfig_D Done\n");
808 if (PlatformSpec_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
809 print_t("\t\tDCTInit_D: PlatformSpec_D Done\n");
811 if (!(pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW))) {
812 print_t("\t\tDCTInit_D: StartupDCT_D\n");
813 StartupDCT_D(pMCTstat, pDCTstat, dct); /*yeaahhh! */
821 u32 reg_off = dct * 0x100;
822 val = 1<<DisDramInterface;
823 Set_NB32(pDCTstat->dev_dct, reg_off+0x94, val);
824 /*To maximize power savings when DisDramInterface=1b,
825 all of the MemClkDis bits should also be set.*/
827 Set_NB32(pDCTstat->dev_dct, reg_off+0x88, val);
829 mct_EnDllShutdownSR(pMCTstat, pDCTstat, dct);
834 static void SyncDCTsReady_D(struct MCTStatStruc *pMCTstat,
835 struct DCTStatStruc *pDCTstatA)
837 /* Wait (and block further access to dram) for all DCTs to be ready,
838 * by polling all InitDram bits and waiting for possible memory clear
839 * operations to be complete. Read MemClkFreqVal bit to see if
840 * the DIMMs are present in this node.
845 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
846 struct DCTStatStruc *pDCTstat;
847 pDCTstat = pDCTstatA + Node;
848 mct_SyncDCTsReady(pDCTstat);
853 static void StartupDCT_D(struct MCTStatStruc *pMCTstat,
854 struct DCTStatStruc *pDCTstat, u8 dct)
856 /* Read MemClkFreqVal bit to see if the DIMMs are present in this node.
857 * If the DIMMs are present then set the DRAM Enable bit for this node.
859 * Setting dram init starts up the DCT state machine, initializes the
860 * dram devices with MRS commands, and kicks off any
861 * HW memory clear process that the chip is capable of. The sooner
862 * that dram init is set for all nodes, the faster the memory system
863 * initialization can complete. Thus, the init loop is unrolled into
864 * two loops so as to start the processes for non BSP nodes sooner.
865 * This procedure will not wait for the process to finish.
866 * Synchronization is handled elsewhere.
873 u32 reg_off = dct * 0x100;
875 dev = pDCTstat->dev_dct;
876 val = Get_NB32(dev, 0x94 + reg_off);
877 if (val & (1<<MemClkFreqVal)) {
878 print_t("\t\t\tStartupDCT_D: MemClkFreqVal\n");
879 byte = mctGet_NVbits(NV_DQSTrainCTL);
881 /* Enable DQSRcvEn training mode */
882 print_t("\t\t\tStartupDCT_D: DqsRcvEnTrain set \n");
883 reg = 0x78 + reg_off;
884 val = Get_NB32(dev, reg);
885 /* Setting this bit forces a 1T window with hard left
886 * pass/fail edge and a probabilistic right pass/fail
887 * edge. LEFT edge is referenced for final
888 * receiver enable position.*/
889 val |= 1 << DqsRcvEnTrain;
890 Set_NB32(dev, reg, val);
892 mctHookBeforeDramInit(); /* generalized Hook */
893 print_t("\t\t\tStartupDCT_D: DramInit \n");
894 mct_DramInit(pMCTstat, pDCTstat, dct);
895 AfterDramInit_D(pDCTstat, dct);
896 mctHookAfterDramInit(); /* generalized Hook*/
901 static void ClearDCT_D(struct MCTStatStruc *pMCTstat,
902 struct DCTStatStruc *pDCTstat, u8 dct)
905 u32 dev = pDCTstat->dev_dct;
906 u32 reg = 0x40 + 0x100 * dct;
909 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
910 reg_end = 0x78 + 0x100 * dct;
912 reg_end = 0xA4 + 0x100 * dct;
915 while(reg < reg_end) {
916 Set_NB32(dev, reg, val);
921 dev = pDCTstat->dev_map;
923 Set_NB32(dev, reg, val);
927 static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
928 struct DCTStatStruc *pDCTstat, u8 dct)
930 /* Initialize DCT Timing registers as per DIMM SPD.
931 * For primary timing (T, CL) use best case T value.
932 * For secondary timing params., use most aggressive settings
935 * There are three components to determining "maximum frequency":
936 * SPD component, Bus load component, and "Preset" max frequency
939 * The SPD component is a function of the min cycle time specified
940 * by each DIMM, and the interaction of cycle times from all DIMMs
941 * in conjunction with CAS latency. The SPD component only applies
942 * when user timing mode is 'Auto'.
944 * The Bus load component is a limiting factor determined by electrical
945 * characteristics on the bus as a result of varying number of device
946 * loads. The Bus load component is specific to each platform but may
947 * also be a function of other factors. The bus load component only
948 * applies when user timing mode is 'Auto'.
950 * The Preset component is subdivided into three items and is the
951 * minimum of the set: Silicon revision, user limit setting when user
952 * timing mode is 'Auto' and memclock mode is 'Limit', OEM build
953 * specification of the maximum frequency. The Preset component is only
954 * applies when user timing mode is 'Auto'.
959 u8 Trp, Trrd, Trcd, Tras, Trc, Trfc[4], Rows;
960 u32 DramTimingLo, DramTimingHi;
973 /* Get primary timing (CAS Latency and Cycle Time) */
974 if (pDCTstat->Speed == 0) {
975 mctGet_MaxLoadFreq(pDCTstat);
977 /* and Factor in presets (setup options, Si cap, etc.) */
978 GetPresetmaxF_D(pMCTstat, pDCTstat);
980 /* Go get best T and CL as specified by DIMM mfgs. and OEM */
981 SPDGetTCL_D(pMCTstat, pDCTstat, dct);
982 /* skip callback mctForce800to1067_D */
983 pDCTstat->Speed = pDCTstat->DIMMAutoSpeed;
984 pDCTstat->CASL = pDCTstat->DIMMCASL;
986 /* if "manual" memclock mode */
987 if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 2)
988 pDCTstat->Speed = mctGet_NVbits(NV_MemCkVal) + 1;
991 mct_AfterGetCLT(pMCTstat, pDCTstat, dct);
993 /* Gather all DIMM mini-max values for cycle timing data */
1003 for (i=0; i < 4; i++)
1006 for ( i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
1008 if (pDCTstat->DIMMValid & (1 << i)) {
1009 smbaddr = Get_DIMMAddress_D(pDCTstat, dct + i);
1010 byte = mctRead_SPD(smbaddr, SPD_ROWSZ);
1012 Rows = byte; /* keep track of largest row sz */
1014 byte = mctRead_SPD(smbaddr, SPD_TRP);
1018 byte = mctRead_SPD(smbaddr, SPD_TRRD);
1022 byte = mctRead_SPD(smbaddr, SPD_TRCD);
1026 byte = mctRead_SPD(smbaddr, SPD_TRTP);
1030 byte = mctRead_SPD(smbaddr, SPD_TWR);
1034 byte = mctRead_SPD(smbaddr, SPD_TWTR);
1038 val = mctRead_SPD(smbaddr, SPD_TRC);
1039 if ((val == 0) || (val == 0xFF)) {
1040 pDCTstat->ErrStatus |= 1<<SB_NoTrcTrfc;
1041 pDCTstat->ErrCode = SC_VarianceErr;
1042 val = Get_DefTrc_k_D(pDCTstat->Speed);
1044 byte = mctRead_SPD(smbaddr, SPD_TRCRFC);
1046 val++; /* round up in case fractional extension is non-zero.*/
1052 /* dev density=rank size/#devs per rank */
1053 byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
1055 val = ((byte >> 5) | (byte << 3)) & 0xFF;
1058 byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE; /* dev density=2^(rows+columns+banks) */
1061 } else if (byte == 8) {
1063 } else if (byte == 16) {
1069 if (Trfc[LDIMM] < byte)
1072 byte = mctRead_SPD(smbaddr, SPD_TRAS);
1075 } /* Dimm Present */
1078 /* Convert DRAM CycleTiming values and store into DCT structure */
1080 byte = pDCTstat->Speed;
1083 Tk40 = Get_40Tk_D(byte);
1087 1. All secondary time values given in SPDs are in binary with units of ns.
1088 2. Some time values are scaled by four, in order to have least count of 0.25 ns
1089 (more accuracy). JEDEC SPD spec. shows which ones are x1 and x4.
1090 3. Internally to this SW, cycle time, Tk, is scaled by 10 to affect a
1091 least count of 0.1 ns (more accuracy).
1092 4. SPD values not scaled are multiplied by 10 and then divided by 10T to find
1093 equivalent minimum number of bus clocks (a remainder causes round-up of clocks).
1094 5. SPD values that are prescaled by 4 are multiplied by 10 and then divided by 40T to find
1095 equivalent minimum number of bus clocks (a remainder causes round-up of clocks).*/
1099 pDCTstat->DIMMTras = (u16)dword;
1101 if (dword % Tk40) { /* round up number of busclocks */
1105 if (val < Min_TrasT_1066)
1106 val = Min_TrasT_1066;
1107 else if (val > Max_TrasT_1066)
1108 val = Max_TrasT_1066;
1110 if (val < Min_TrasT)
1112 else if (val > Max_TrasT)
1115 pDCTstat->Tras = val;
1119 pDCTstat->DIMMTrp = dword;
1121 if (dword % Tk40) { /* round up number of busclocks */
1125 if (val < Min_TrasT_1066)
1126 val = Min_TrpT_1066;
1127 else if (val > Max_TrpT_1066)
1128 val = Max_TrpT_1066;
1132 else if (val > Max_TrpT)
1135 pDCTstat->Trp = val;
1139 pDCTstat->DIMMTrrd = dword;
1141 if (dword % Tk40) { /* round up number of busclocks */
1145 if (val < Min_TrrdT_1066)
1146 val = Min_TrrdT_1066;
1147 else if (val > Max_TrrdT_1066)
1148 val = Max_TrrdT_1066;
1150 if (val < Min_TrrdT)
1152 else if (val > Max_TrrdT)
1155 pDCTstat->Trrd = val;
1159 pDCTstat->DIMMTrcd = dword;
1161 if (dword % Tk40) { /* round up number of busclocks */
1165 if (val < Min_TrcdT_1066)
1166 val = Min_TrcdT_1066;
1167 else if (val > Max_TrcdT_1066)
1168 val = Max_TrcdT_1066;
1170 if (val < Min_TrcdT)
1172 else if (val > Max_TrcdT)
1175 pDCTstat->Trcd = val;
1179 pDCTstat->DIMMTrc = dword;
1181 if (dword % Tk40) { /* round up number of busclocks */
1185 if (val < Min_TrcT_1066)
1186 val = Min_TrcT_1066;
1187 else if (val > Max_TrcT_1066)
1188 val = Max_TrcT_1066;
1192 else if (val > Max_TrcT)
1195 pDCTstat->Trc = val;
1199 pDCTstat->DIMMTrtp = dword;
1200 val = pDCTstat->Speed;
1201 if (val <= 2) { /* 7.75ns / Speed in ns to get clock # */
1202 val = 2; /* for DDR400/DDR533 */
1203 } else { /* Note a speed of 3 will be a Trtp of 3 */
1204 val = 3; /* for DDR667/DDR800/DDR1066 */
1206 pDCTstat->Trtp = val;
1210 pDCTstat->DIMMTwr = dword;
1212 if (dword % Tk40) { /* round up number of busclocks */
1216 if (val < Min_TwrT_1066)
1217 val = Min_TwrT_1066;
1218 else if (val > Max_TwrT_1066)
1219 val = Max_TwrT_1066;
1223 else if (val > Max_TwrT)
1226 pDCTstat->Twr = val;
1230 pDCTstat->DIMMTwtr = dword;
1232 if (dword % Tk40) { /* round up number of busclocks */
1236 if (val < Min_TwrT_1066)
1237 val = Min_TwtrT_1066;
1238 else if (val > Max_TwtrT_1066)
1239 val = Max_TwtrT_1066;
1241 if (val < Min_TwtrT)
1243 else if (val > Max_TwtrT)
1246 pDCTstat->Twtr = val;
1251 pDCTstat->Trfc[i] = Trfc[i];
1253 mctAdjustAutoCycTmg_D();
1255 /* Program DRAM Timing values */
1256 DramTimingLo = 0; /* Dram Timing Low init */
1257 val = pDCTstat->CASL;
1258 val = Tab_tCL_j[val];
1259 DramTimingLo |= val;
1261 val = pDCTstat->Trcd;
1263 val -= Bias_TrcdT_1066;
1267 DramTimingLo |= val<<4;
1269 val = pDCTstat->Trp;
1271 val -= Bias_TrpT_1066;
1276 DramTimingLo |= val<<7;
1278 val = pDCTstat->Trtp;
1280 DramTimingLo |= val<<11;
1282 val = pDCTstat->Tras;
1284 val -= Bias_TrasT_1066;
1287 DramTimingLo |= val<<12;
1289 val = pDCTstat->Trc;
1291 DramTimingLo |= val<<16;
1294 val = pDCTstat->Twr;
1296 DramTimingLo |= val<<20;
1299 val = pDCTstat->Trrd;
1301 val -= Bias_TrrdT_1066;
1304 DramTimingLo |= val<<22;
1307 DramTimingHi = 0; /* Dram Timing Low init */
1308 val = pDCTstat->Twtr;
1310 val -= Bias_TwtrT_1066;
1313 DramTimingHi |= val<<8;
1316 DramTimingHi |= val<<16;
1323 DramTimingHi |= val << 20;
1326 dev = pDCTstat->dev_dct;
1327 reg_off = 0x100 * dct;
1328 print_tx("AutoCycTiming: DramTimingLo ", DramTimingLo);
1329 print_tx("AutoCycTiming: DramTimingHi ", DramTimingHi);
1331 Set_NB32(dev, 0x88 + reg_off, DramTimingLo); /*DCT Timing Low*/
1332 DramTimingHi |=0x0000FC77;
1333 Set_NB32(dev, 0x8c + reg_off, DramTimingHi); /*DCT Timing Hi*/
1337 dword = pDCTstat->Twr;
1338 dword -= Bias_TwrT_1066;
1340 reg = 0x84 + reg_off;
1341 val = Get_NB32(dev, reg);
1344 Set_NB32(dev, reg, val);
1346 // dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
1348 print_tx("AutoCycTiming: Status ", pDCTstat->Status);
1349 print_tx("AutoCycTiming: ErrStatus ", pDCTstat->ErrStatus);
1350 print_tx("AutoCycTiming: ErrCode ", pDCTstat->ErrCode);
1351 print_t("AutoCycTiming: Done\n");
1353 mctHookAfterAutoCycTmg();
1355 return pDCTstat->ErrCode;
1359 static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
1360 struct DCTStatStruc *pDCTstat)
1362 /* Get max frequency from OEM platform definition, from any user
1363 * override (limiting) of max frequency, and from any Si Revision
1364 * Specific information. Return the least of these three in
1365 * DCTStatStruc.PresetmaxFreq.
1371 /* Get CPU Si Revision defined limit (NPT) */
1372 proposedFreq = 533; /* Rev F0 programmable max memclock is */
1374 /*Get User defined limit if "limit" mode */
1375 if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 1) {
1376 word = Get_Fk_D(mctGet_NVbits(NV_MemCkVal) + 1);
1377 if (word < proposedFreq)
1378 proposedFreq = word;
1380 /* Get Platform defined limit */
1381 word = mctGet_NVbits(NV_MAX_MEMCLK);
1382 if (word < proposedFreq)
1383 proposedFreq = word;
1385 word = pDCTstat->PresetmaxFreq;
1386 if (word > proposedFreq)
1387 word = proposedFreq;
1389 pDCTstat->PresetmaxFreq = word;
1395 static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
1396 struct DCTStatStruc *pDCTstat, u8 dct)
1398 /* Find the best T and CL primary timing parameter pair, per Mfg.,
1399 * for the given set of DIMMs, and store into DCTStatStruc
1400 * (.DIMMAutoSpeed and .DIMMCASL). See "Global relationship between
1401 * index values and item values" for definition of CAS latency
1402 * index (j) and Frequency index (k).
1407 /* i={0..7} (std. physical DIMM number)
1408 * j is an integer which enumerates increasing CAS latency.
1409 * k is an integer which enumerates decreasing cycle time.
1410 * CL no. {0,1,2} corresponds to CL X, CL X-.5, or CL X-1 (per individual DIMM)
1411 * Max timing values are per parameter, of all DIMMs, spec'd in ns like the SPD.
1416 for (k=K_MAX; k >= K_MIN; k--) {
1417 for (j = J_MIN; j <= J_MAX; j++) {
1418 if (Sys_Capability_D(pMCTstat, pDCTstat, j, k) ) {
1419 /* 1. check to see if DIMMi is populated.
1420 2. check if DIMMi supports CLj and Tjk */
1421 for (i = 0; i < MAX_DIMMS_SUPPORTED; i++) {
1422 if (pDCTstat->DIMMValid & (1 << i)) {
1423 if (Dimm_Supports_D(pDCTstat, i, j, k))
1427 if (i == MAX_DIMMS_SUPPORTED) {
1437 if (T1min != 0xFF) {
1438 pDCTstat->DIMMCASL = CL1min; /*mfg. optimized */
1439 pDCTstat->DIMMAutoSpeed = T1min;
1440 print_tx("SPDGetTCL_D: DIMMCASL ", pDCTstat->DIMMCASL);
1441 print_tx("SPDGetTCL_D: DIMMAutoSpeed ", pDCTstat->DIMMAutoSpeed);
1444 pDCTstat->DIMMCASL = CL_DEF; /* failsafe values (running in min. mode) */
1445 pDCTstat->DIMMAutoSpeed = T_DEF;
1446 pDCTstat->ErrStatus |= 1 << SB_DimmMismatchT;
1447 pDCTstat->ErrStatus |= 1 << SB_MinimumMode;
1448 pDCTstat->ErrCode = SC_VarianceErr;
1450 print_tx("SPDGetTCL_D: Status ", pDCTstat->Status);
1451 print_tx("SPDGetTCL_D: ErrStatus ", pDCTstat->ErrStatus);
1452 print_tx("SPDGetTCL_D: ErrCode ", pDCTstat->ErrCode);
1453 print_t("SPDGetTCL_D: Done\n");
1457 static u8 PlatformSpec_D(struct MCTStatStruc *pMCTstat,
1458 struct DCTStatStruc *pDCTstat, u8 dct)
1464 mctGet_PS_Cfg_D(pMCTstat, pDCTstat, dct);
1466 if (pDCTstat->GangedMode) {
1467 mctGet_PS_Cfg_D(pMCTstat, pDCTstat, 1);
1470 if ( pDCTstat->_2Tmode == 2) {
1471 dev = pDCTstat->dev_dct;
1472 reg = 0x94 + 0x100 * dct; /* Dram Configuration Hi */
1473 val = Get_NB32(dev, reg);
1474 val |= 1 << 20; /* 2T CMD mode */
1475 Set_NB32(dev, reg, val);
1478 mct_PlatformSpec(pMCTstat, pDCTstat, dct);
1479 InitPhyCompensation(pMCTstat, pDCTstat, dct);
1480 mctHookAfterPSCfg();
1481 return pDCTstat->ErrCode;
1485 static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
1486 struct DCTStatStruc *pDCTstat, u8 dct)
1488 u32 DramControl, DramTimingLo, Status;
1489 u32 DramConfigLo, DramConfigHi, DramConfigMisc, DramConfigMisc2;
1497 print_tx("AutoConfig_D: DCT: ", dct);
1502 DramConfigMisc2 = 0;
1504 /* set bank addressing and Masks, plus CS pops */
1505 SPDSetBanks_D(pMCTstat, pDCTstat, dct);
1506 if (pDCTstat->ErrCode == SC_StopError)
1507 goto AutoConfig_exit;
1509 /* map chip-selects into local address space */
1510 StitchMemory_D(pMCTstat, pDCTstat, dct);
1511 InterleaveBanks_D(pMCTstat, pDCTstat, dct);
1513 /* temp image of status (for convenience). RO usage! */
1514 Status = pDCTstat->Status;
1516 dev = pDCTstat->dev_dct;
1517 reg_off = 0x100 * dct;
1520 /* Build Dram Control Register Value */
1521 DramConfigMisc2 = Get_NB32 (dev, 0xA8 + reg_off); /* Dram Control*/
1522 DramControl = Get_NB32 (dev, 0x78 + reg_off); /* Dram Control*/
1524 if (mctGet_NVbits(NV_CLKHZAltVidC3))
1525 DramControl |= 1<<16;
1527 // FIXME: Add support(skip) for Ax and Cx versions
1528 DramControl |= 5; /* RdPtrInit */
1531 /* Build Dram Config Lo Register Value */
1532 DramConfigLo |= 1 << 4; /* 75 Ohms ODT */
1533 if (mctGet_NVbits(NV_MAX_DIMMS) == 8) {
1534 if (pDCTstat->Speed == 3) {
1535 if ((pDCTstat->MAdimms[dct] == 4))
1536 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1537 } else if (pDCTstat->Speed == 4){
1538 if ((pDCTstat->MAdimms[dct] != 1))
1539 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1542 // FIXME: Skip for Ax versions
1543 if ((pDCTstat->MAdimms[dct] == 4)) {
1544 if ( pDCTstat->DimmQRPresent != 0) {
1545 if ((pDCTstat->Speed == 3) || (pDCTstat->Speed == 4)) {
1546 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1548 } else if ((pDCTstat->MAdimms[dct] == 4)) {
1549 if (pDCTstat->Speed == 4) {
1550 if ( pDCTstat->DimmQRPresent != 0) {
1551 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1555 } else if ((pDCTstat->MAdimms[dct] == 2)) {
1556 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1561 // FIXME: Skip for Ax versions
1562 /* callback not required - if (!mctParityControl_D()) */
1563 if (Status & (1 << SB_PARDIMMs)) {
1564 DramConfigLo |= 1 << ParEn;
1565 DramConfigMisc2 |= 1 << ActiveCmdAtRst;
1567 DramConfigLo &= ~(1 << ParEn);
1568 DramConfigMisc2 &= ~(1 << ActiveCmdAtRst);
1571 if (mctGet_NVbits(NV_BurstLen32)) {
1572 if (!pDCTstat->GangedMode)
1573 DramConfigLo |= 1 << BurstLength32;
1576 if (Status & (1 << SB_128bitmode))
1577 DramConfigLo |= 1 << Width128; /* 128-bit mode (normal) */
1582 if (pDCTstat->Dimmx4Present & (1 << word))
1583 DramConfigLo |= 1 << dword; /* X4Dimm[3:0] */
1589 if (!(Status & (1 << SB_Registered)))
1590 DramConfigLo |= 1 << UnBuffDimm; /* Unbuffered DIMMs */
1592 if (mctGet_NVbits(NV_ECC_CAP))
1593 if (Status & (1 << SB_ECCDIMMs))
1594 if ( mctGet_NVbits(NV_ECC))
1595 DramConfigLo |= 1 << DimmEcEn;
1597 DramConfigLo = mct_DisDllShutdownSR(pMCTstat, pDCTstat, DramConfigLo, dct);
1599 /* Build Dram Config Hi Register Value */
1600 dword = pDCTstat->Speed;
1601 DramConfigHi |= dword - 1; /* get MemClk encoding */
1602 DramConfigHi |= 1 << MemClkFreqVal;
1604 if (Status & (1 << SB_Registered))
1605 if ((pDCTstat->Dimmx4Present != 0) && (pDCTstat->Dimmx8Present != 0))
1606 /* set only if x8 Registered DIMMs in System*/
1607 DramConfigHi |= 1 << RDqsEn;
1609 if (mctGet_NVbits(NV_CKE_PDEN)) {
1610 DramConfigHi |= 1 << 15; /* PowerDownEn */
1611 if (mctGet_NVbits(NV_CKE_CTL))
1612 /*Chip Select control of CKE*/
1613 DramConfigHi |= 1 << 16;
1616 /* Control Bank Swizzle */
1617 if (0) /* call back not needed mctBankSwizzleControl_D()) */
1618 DramConfigHi &= ~(1 << BankSwizzleMode);
1620 DramConfigHi |= 1 << BankSwizzleMode; /* recommended setting (default) */
1622 /* Check for Quadrank DIMM presence */
1623 if ( pDCTstat->DimmQRPresent != 0) {
1624 byte = mctGet_NVbits(NV_4RANKType);
1626 DramConfigHi |= 1 << 17; /* S4 (4-Rank SO-DIMMs) */
1628 DramConfigHi |= 1 << 18; /* R4 (4-Rank Registered DIMMs) */
1631 if (0) /* call back not needed mctOverrideDcqBypMax_D ) */
1632 val = mctGet_NVbits(NV_BYPMAX);
1634 val = 0x0f; // recommended setting (default)
1635 DramConfigHi |= val << 24;
1637 val = pDCTstat->DIMM2Kpage;
1638 if (pDCTstat->GangedMode != 0) {
1646 val = Tab_2KTfawT_k[pDCTstat->Speed];
1648 val = Tab_1KTfawT_k[pDCTstat->Speed];
1650 if (pDCTstat->Speed == 5)
1655 DramConfigHi |= val; /* Tfaw for 1K or 2K paged drams */
1657 // FIXME: Skip for Ax versions
1658 DramConfigHi |= 1 << DcqArbBypassEn;
1661 /* Build MemClkDis Value from Dram Timing Lo and
1662 Dram Config Misc Registers
1663 1. We will assume that MemClkDis field has been preset prior to this
1665 2. We will only set MemClkDis bits if a DIMM is NOT present AND if:
1666 NV_AllMemClks <>0 AND SB_DiagClks ==0 */
1669 /* Dram Timing Low (owns Clock Enable bits) */
1670 DramTimingLo = Get_NB32(dev, 0x88 + reg_off);
1671 if (mctGet_NVbits(NV_AllMemClks) == 0) {
1672 /* Special Jedec SPD diagnostic bit - "enable all clocks" */
1673 if (!(pDCTstat->Status & (1<<SB_DiagClks))) {
1675 byte = mctGet_NVbits(NV_PACK_TYPE);
1678 else if (byte == PT_M2)
1684 while(dword < MAX_DIMMS_SUPPORTED) {
1686 print_tx("DramTimingLo: val=", val);
1687 if (!(pDCTstat->DIMMValid & (1<<val)))
1689 DramTimingLo |= 1<<(dword+24);
1695 print_tx("AutoConfig_D: DramControl: ", DramControl);
1696 print_tx("AutoConfig_D: DramTimingLo: ", DramTimingLo);
1697 print_tx("AutoConfig_D: DramConfigMisc: ", DramConfigMisc);
1698 print_tx("AutoConfig_D: DramConfigMisc2: ", DramConfigMisc2);
1699 print_tx("AutoConfig_D: DramConfigLo: ", DramConfigLo);
1700 print_tx("AutoConfig_D: DramConfigHi: ", DramConfigHi);
1702 /* Write Values to the registers */
1703 Set_NB32(dev, 0x78 + reg_off, DramControl);
1704 Set_NB32(dev, 0x88 + reg_off, DramTimingLo);
1705 Set_NB32(dev, 0xA0 + reg_off, DramConfigMisc);
1706 Set_NB32(dev, 0xA8 + reg_off, DramConfigMisc2);
1707 Set_NB32(dev, 0x90 + reg_off, DramConfigLo);
1708 mct_SetDramConfigHi_D(pDCTstat, dct, DramConfigHi);
1709 mct_ForceAutoPrecharge_D(pDCTstat, dct);
1710 mct_EarlyArbEn_D(pMCTstat, pDCTstat);
1711 mctHookAfterAutoCfg();
1713 // dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
1715 print_tx("AutoConfig: Status ", pDCTstat->Status);
1716 print_tx("AutoConfig: ErrStatus ", pDCTstat->ErrStatus);
1717 print_tx("AutoConfig: ErrCode ", pDCTstat->ErrCode);
1718 print_t("AutoConfig: Done\n");
1720 return pDCTstat->ErrCode;
1724 static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
1725 struct DCTStatStruc *pDCTstat, u8 dct)
1727 /* Set bank addressing, program Mask values and build a chip-select
1728 * population map. This routine programs PCI 0:24N:2x80 config register
1729 * and PCI 0:24N:2x60,64,68,6C config registers (CS Mask 0-3).
1732 u8 ChipSel, Rows, Cols, Ranks ,Banks, DevWidth;
1733 u32 BankAddrReg, csMask;
1744 dev = pDCTstat->dev_dct;
1745 reg_off = 0x100 * dct;
1748 for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel+=2) {
1750 if ((pDCTstat->Status & (1 << SB_64MuxedMode)) && ChipSel >=4)
1753 if (pDCTstat->DIMMValid & (1<<byte)) {
1754 smbaddr = Get_DIMMAddress_D(pDCTstat, (ChipSel + dct));
1756 byte = mctRead_SPD(smbaddr, SPD_ROWSZ);
1759 byte = mctRead_SPD(smbaddr, SPD_COLSZ);
1762 Banks = mctRead_SPD(smbaddr, SPD_LBANKS);
1764 byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH);
1765 DevWidth = byte & 0x7f; /* bits 0-6 = bank 0 width */
1767 byte = mctRead_SPD(smbaddr, SPD_DMBANKS);
1768 Ranks = (byte & 7) + 1;
1770 /* Configure Bank encoding
1771 * Use a 6-bit key into a lookup table.
1772 * Key (index) = CCCBRR, where CCC is the number of
1773 * Columns minus 9,RR is the number of Rows minus 13,
1774 * and B is the number of banks minus 2.
1775 * See "6-bit Bank Addressing Table" at the end of
1777 byte = Cols - 9; /* 9 Cols is smallest dev size */
1778 byte <<= 3; /* make room for row and bank bits*/
1782 /* 13 Rows is smallest dev size */
1783 byte |= Rows - 13; /* CCCBRR internal encode */
1785 for (dword=0; dword < 12; dword++) {
1786 if (byte == Tab_BankAddr[dword])
1792 /* bit no. of CS field in address mapping reg.*/
1793 dword <<= (ChipSel<<1);
1794 BankAddrReg |= dword;
1796 /* Mask value=(2pow(rows+cols+banks+3)-1)>>8,
1797 or 2pow(rows+cols+banks-5)-1*/
1800 byte = Rows + Cols; /* cl=rows+cols*/
1802 byte -= 2; /* 3 banks - 5 */
1804 byte -= 3; /* 2 banks - 5 */
1805 /* mask size (64-bit rank only) */
1807 if (pDCTstat->Status & (1 << SB_128bitmode))
1808 byte++; /* double mask size if in 128-bit mode*/
1810 csMask |= 1 << byte;
1813 /*set ChipSelect population indicator even bits*/
1814 pDCTstat->CSPresent |= (1<<ChipSel);
1816 /*set ChipSelect population indicator odd bits*/
1817 pDCTstat->CSPresent |= 1 << (ChipSel + 1);
1819 reg = 0x60+(ChipSel<<1) + reg_off; /*Dram CS Mask Register */
1821 val &= 0x1FF83FE0; /* Mask out reserved bits.*/
1822 Set_NB32(dev, reg, val);
1825 if (pDCTstat->DIMMSPDCSE & (1<<ChipSel))
1826 pDCTstat->CSTestFail |= (1<<ChipSel);
1828 } /* while ChipSel*/
1830 SetCSTriState(pMCTstat, pDCTstat, dct);
1831 /* SetCKETriState */
1832 SetODTTriState(pMCTstat, pDCTstat, dct);
1834 if (pDCTstat->Status & (1 << SB_128bitmode)) {
1835 SetCSTriState(pMCTstat, pDCTstat, 1); /* force dct1) */
1836 SetODTTriState(pMCTstat, pDCTstat, 1); /* force dct1) */
1838 word = pDCTstat->CSPresent;
1839 mctGetCS_ExcludeMap(); /* mask out specified chip-selects */
1840 word ^= pDCTstat->CSPresent;
1841 pDCTstat->CSTestFail |= word; /* enable ODT to disabled DIMMs */
1842 if (!pDCTstat->CSPresent)
1843 pDCTstat->ErrCode = SC_StopError;
1845 reg = 0x80 + reg_off; /* Bank Addressing Register */
1846 Set_NB32(dev, reg, BankAddrReg);
1848 // dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
1850 print_tx("SPDSetBanks: Status ", pDCTstat->Status);
1851 print_tx("SPDSetBanks: ErrStatus ", pDCTstat->ErrStatus);
1852 print_tx("SPDSetBanks: ErrCode ", pDCTstat->ErrCode);
1853 print_t("SPDSetBanks: Done\n");
1857 static void SPDCalcWidth_D(struct MCTStatStruc *pMCTstat,
1858 struct DCTStatStruc *pDCTstat)
1860 /* Per SPDs, check the symmetry of DIMM pairs (DIMM on Channel A
1861 * matching with DIMM on Channel B), the overall DIMM population,
1862 * and determine the width mode: 64-bit, 64-bit muxed, 128-bit.
1866 u8 smbaddr, smbaddr1;
1869 /* Check Symmetry of Channel A and Channel B DIMMs
1870 (must be matched for 128-bit mode).*/
1871 for (i=0; i < MAX_DIMMS_SUPPORTED; i += 2) {
1872 if ((pDCTstat->DIMMValid & (1 << i)) && (pDCTstat->DIMMValid & (1<<(i+1)))) {
1873 smbaddr = Get_DIMMAddress_D(pDCTstat, i);
1874 smbaddr1 = Get_DIMMAddress_D(pDCTstat, i+1);
1876 byte = mctRead_SPD(smbaddr, SPD_ROWSZ) & 0x1f;
1877 byte1 = mctRead_SPD(smbaddr1, SPD_ROWSZ) & 0x1f;
1878 if (byte != byte1) {
1879 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1883 byte = mctRead_SPD(smbaddr, SPD_COLSZ) & 0x1f;
1884 byte1 = mctRead_SPD(smbaddr1, SPD_COLSZ) & 0x1f;
1885 if (byte != byte1) {
1886 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1890 byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
1891 byte1 = mctRead_SPD(smbaddr1, SPD_BANKSZ);
1892 if (byte != byte1) {
1893 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1897 byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0x7f;
1898 byte1 = mctRead_SPD(smbaddr1, SPD_DEVWIDTH) & 0x7f;
1899 if (byte != byte1) {
1900 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1904 byte = mctRead_SPD(smbaddr, SPD_DMBANKS) & 7; /* #ranks-1 */
1905 byte1 = mctRead_SPD(smbaddr1, SPD_DMBANKS) & 7; /* #ranks-1 */
1906 if (byte != byte1) {
1907 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1917 static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
1918 struct DCTStatStruc *pDCTstat, u8 dct)
1920 /* Requires that Mask values for each bank be programmed first and that
1921 * the chip-select population indicator is correctly set.
1925 u32 nxtcsBase, curcsBase;
1927 u32 Sizeq, BiggestBank;
1937 dev = pDCTstat->dev_dct;
1938 reg_off = 0x100 * dct;
1942 /* CS Sparing 1=enabled, 0=disabled */
1943 if (mctGet_NVbits(NV_CS_SpareCTL) & 1) {
1944 if (MCT_DIMM_SPARE_NO_WARM) {
1945 /* Do no warm-reset DIMM spare */
1946 if (pMCTstat->GStatus & 1 << GSB_EnDIMMSpareNW) {
1947 word = pDCTstat->CSPresent;
1951 /* Make sure at least two chip-selects are available */
1954 pDCTstat->ErrStatus |= 1 << SB_SpareDis;
1957 if (!mctGet_NVbits(NV_DQSTrainCTL)) { /*DQS Training 1=enabled, 0=disabled */
1958 word = pDCTstat->CSPresent;
1960 word &= ~(1 << val);
1962 /* Make sure at least two chip-selects are available */
1965 pDCTstat->ErrStatus |= 1 << SB_SpareDis;
1970 nxtcsBase = 0; /* Next available cs base ADDR[39:8] */
1971 for (p=0; p < MAX_DIMMS_SUPPORTED; p++) {
1973 for (q = 0; q < MAX_CS_SUPPORTED; q++) { /* from DIMMS to CS */
1974 if (pDCTstat->CSPresent & (1 << q)) { /* bank present? */
1975 reg = 0x40 + (q << 2) + reg_off; /* Base[q] reg.*/
1976 val = Get_NB32(dev, reg);
1977 if (!(val & 3)) { /* (CSEnable|Spare==1)bank is enabled already? */
1978 reg = 0x60 + ((q << 1) & 0xc) + reg_off; /*Mask[q] reg.*/
1979 val = Get_NB32(dev, reg);
1983 Sizeq = val; //never used
1984 if (val > BiggestBank) {
1985 /*Bingo! possibly Map this chip-select next! */
1990 } /*if bank present */
1992 if (BiggestBank !=0) {
1993 curcsBase = nxtcsBase; /* curcsBase=nxtcsBase*/
1994 /* DRAM CS Base b Address Register offset */
1995 reg = 0x40 + (b << 2) + reg_off;
1998 val = 1 << Spare; /* Spare Enable*/
2001 val |= 1 << CSEnable; /* Bank Enable */
2003 Set_NB32(dev, reg, val);
2007 /* let nxtcsBase+=Size[b] */
2008 nxtcsBase += BiggestBank;
2011 /* bank present but disabled?*/
2012 if ( pDCTstat->CSTestFail & (1 << p)) {
2013 /* DRAM CS Base b Address Register offset */
2014 reg = (p << 2) + 0x40 + reg_off;
2015 val = 1 << TestFail;
2016 Set_NB32(dev, reg, val);
2021 pDCTstat->DCTSysLimit = nxtcsBase - 1;
2022 mct_AfterStitchMemory(pMCTstat, pDCTstat, dct);
2025 // dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
2027 print_tx("StitchMemory: Status ", pDCTstat->Status);
2028 print_tx("StitchMemory: ErrStatus ", pDCTstat->ErrStatus);
2029 print_tx("StitchMemory: ErrCode ", pDCTstat->ErrCode);
2030 print_t("StitchMemory: Done\n");
2034 static u8 Get_Tk_D(u8 k)
2036 return Table_T_k[k];
2040 static u8 Get_CLj_D(u8 j)
2042 return Table_CL2_j[j];
2045 static u8 Get_DefTrc_k_D(u8 k)
2047 return Tab_defTrc_k[k];
2051 static u16 Get_40Tk_D(u8 k)
2053 return Tab_40T_k[k]; /* FIXME: k or k<<1 ?*/
2057 static u16 Get_Fk_D(u8 k)
2059 return Table_F_k[k]; /* FIXME: k or k<<1 ? */
2063 static u8 Dimm_Supports_D(struct DCTStatStruc *pDCTstat,
2073 DIMMi = Get_DIMMAddress_D(pDCTstat, i);
2077 /* check if DIMMi supports CLj */
2078 CL_i = mctRead_SPD(DIMMi, SPD_CASLAT);
2081 /*find out if its CL X, CLX-1, or CLX-2 */
2082 word = bsr(byte); /* bit position of CLj */
2083 wordx = bsr(CL_i); /* bit position of CLX of CLi */
2084 wordx -= word; /* CL number (CL no. = 0,1, 2, or 3) */
2085 wordx <<= 3; /* 8 bits per SPD byte index */
2086 /*get T from SPD byte 9, 23, 25*/
2087 word = (EncodedTSPD >> wordx) & 0xFF;
2089 byte = mctRead_SPD(DIMMi, word); /* DIMMi speed */
2092 } else if (byte == 0){
2093 pDCTstat->ErrStatus |= 1<<SB_NoCycTime;
2096 ret = 0; /* DIMM is capable! */
2105 static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
2106 struct DCTStatStruc *pDCTstat)
2108 /* Check DIMMs present, verify checksum, flag SDRAM type,
2109 * build population indicator bitmaps, and preload bus loading
2110 * of DIMMs into DCTStatStruc.
2111 * MAAload=number of devices on the "A" bus.
2112 * MABload=number of devices on the "B" bus.
2113 * MAAdimms=number of DIMMs on the "A" bus slots.
2114 * MABdimms=number of DIMMs on the "B" bus slots.
2115 * DATAAload=number of ranks on the "A" bus slots.
2116 * DATABload=number of ranks on the "B" bus slots.
2123 u16 RegDIMMPresent, MaxDimms;
2129 /* preload data structure with addrs */
2130 mctGet_DIMMAddr(pDCTstat, pDCTstat->Node_ID);
2132 DimmSlots = MaxDimms = mctGet_NVbits(NV_MAX_DIMMS);
2134 SPDCtrl = mctGet_NVbits(NV_SPDCHK_RESTRT);
2137 pDCTstat->DimmQRPresent = 0;
2139 for (i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
2143 if ((pDCTstat->DimmQRPresent & (1 << i)) || (i < DimmSlots)) {
2144 print_tx("\t DIMMPresence: i=", i);
2145 smbaddr = Get_DIMMAddress_D(pDCTstat, i);
2146 print_tx("\t DIMMPresence: smbaddr=", smbaddr);
2149 for (Index=0; Index < 64; Index++){
2151 status = mctRead_SPD(smbaddr, Index);
2154 byte = status & 0xFF;
2160 pDCTstat->DIMMPresent |= 1 << i;
2161 if ((Checksum & 0xFF) == byte) {
2162 byte = mctRead_SPD(smbaddr, SPD_TYPE);
2163 if (byte == JED_DDR2SDRAM) {
2164 /*Dimm is 'Present'*/
2165 pDCTstat->DIMMValid |= 1 << i;
2168 pDCTstat->DIMMSPDCSE = 1 << i;
2170 pDCTstat->ErrStatus |= 1 << SB_DIMMChkSum;
2171 pDCTstat->ErrCode = SC_StopError;
2173 /*if NV_SPDCHK_RESTRT is set to 1, ignore faulty SPD checksum*/
2174 pDCTstat->ErrStatus |= 1<<SB_DIMMChkSum;
2175 byte = mctRead_SPD(smbaddr, SPD_TYPE);
2176 if (byte == JED_DDR2SDRAM)
2177 pDCTstat->DIMMValid |= 1 << i;
2180 /* Check module type */
2181 byte = mctRead_SPD(smbaddr, SPD_DIMMTYPE);
2182 if (byte & JED_REGADCMSK)
2183 RegDIMMPresent |= 1 << i;
2184 /* Check ECC capable */
2185 byte = mctRead_SPD(smbaddr, SPD_EDCTYPE);
2186 if (byte & JED_ECC) {
2187 /* DIMM is ECC capable */
2188 pDCTstat->DimmECCPresent |= 1 << i;
2190 if (byte & JED_ADRCPAR) {
2191 /* DIMM is ECC capable */
2192 pDCTstat->DimmPARPresent |= 1 << i;
2194 /* Check if x4 device */
2195 devwidth = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE;
2196 if (devwidth == 4) {
2197 /* DIMM is made with x4 or x16 drams */
2198 pDCTstat->Dimmx4Present |= 1 << i;
2199 } else if (devwidth == 8) {
2200 pDCTstat->Dimmx8Present |= 1 << i;
2201 } else if (devwidth == 16) {
2202 pDCTstat->Dimmx16Present |= 1 << i;
2204 /* check page size */
2205 byte = mctRead_SPD(smbaddr, SPD_COLSZ);
2209 word *= devwidth; /* (((2^COLBITS) / 8) * ORG) / 2048 */
2212 pDCTstat->DIMM2Kpage |= 1 << i;
2214 /*Check if SPD diag bit 'analysis probe installed' is set */
2215 byte = mctRead_SPD(smbaddr, SPD_ATTRIB);
2216 if ( byte & JED_PROBEMSK )
2217 pDCTstat->Status |= 1<<SB_DiagClks;
2219 byte = mctRead_SPD(smbaddr, SPD_DMBANKS);
2220 if (!(byte & (1<< SPDPLBit)))
2221 pDCTstat->DimmPlPresent |= 1 << i;
2225 /* if any DIMMs are QR, we have to make two passes through DIMMs*/
2226 if ( pDCTstat->DimmQRPresent == 0) {
2229 if (i < DimmSlots) {
2230 pDCTstat->DimmQRPresent |= (1 << i) | (1 << (i+4));
2232 byte = 2; /* upper two ranks of QR DIMM will be counted on another DIMM number iteration*/
2233 } else if (byte == 2) {
2234 pDCTstat->DimmDRPresent |= 1 << i;
2239 else if (devwidth == 4)
2243 bytex <<= 1; /*double Addr bus load value for dual rank DIMMs*/
2246 pDCTstat->DATAload[j] += byte; /*number of ranks on DATA bus*/
2247 pDCTstat->MAload[j] += bytex; /*number of devices on CMD/ADDR bus*/
2248 pDCTstat->MAdimms[j]++; /*number of DIMMs on A bus */
2249 /*check for DRAM package Year <= 06*/
2250 byte = mctRead_SPD(smbaddr, SPD_MANDATEYR);
2251 if (byte < MYEAR06) {
2252 /*Year < 06 and hence Week < 24 of 06 */
2253 pDCTstat->DimmYr06 |= 1 << i;
2254 pDCTstat->DimmWk2406 |= 1 << i;
2255 } else if (byte == MYEAR06) {
2256 /*Year = 06, check if Week <= 24 */
2257 pDCTstat->DimmYr06 |= 1 << i;
2258 byte = mctRead_SPD(smbaddr, SPD_MANDATEWK);
2259 if (byte <= MWEEK24)
2260 pDCTstat->DimmWk2406 |= 1 << i;
2266 print_tx("\t DIMMPresence: DIMMValid=", pDCTstat->DIMMValid);
2267 print_tx("\t DIMMPresence: DIMMPresent=", pDCTstat->DIMMPresent);
2268 print_tx("\t DIMMPresence: RegDIMMPresent=", RegDIMMPresent);
2269 print_tx("\t DIMMPresence: DimmECCPresent=", pDCTstat->DimmECCPresent);
2270 print_tx("\t DIMMPresence: DimmPARPresent=", pDCTstat->DimmPARPresent);
2271 print_tx("\t DIMMPresence: Dimmx4Present=", pDCTstat->Dimmx4Present);
2272 print_tx("\t DIMMPresence: Dimmx8Present=", pDCTstat->Dimmx8Present);
2273 print_tx("\t DIMMPresence: Dimmx16Present=", pDCTstat->Dimmx16Present);
2274 print_tx("\t DIMMPresence: DimmPlPresent=", pDCTstat->DimmPlPresent);
2275 print_tx("\t DIMMPresence: DimmDRPresent=", pDCTstat->DimmDRPresent);
2276 print_tx("\t DIMMPresence: DimmQRPresent=", pDCTstat->DimmQRPresent);
2277 print_tx("\t DIMMPresence: DATAload[0]=", pDCTstat->DATAload[0]);
2278 print_tx("\t DIMMPresence: MAload[0]=", pDCTstat->MAload[0]);
2279 print_tx("\t DIMMPresence: MAdimms[0]=", pDCTstat->MAdimms[0]);
2280 print_tx("\t DIMMPresence: DATAload[1]=", pDCTstat->DATAload[1]);
2281 print_tx("\t DIMMPresence: MAload[1]=", pDCTstat->MAload[1]);
2282 print_tx("\t DIMMPresence: MAdimms[1]=", pDCTstat->MAdimms[1]);
2284 if (pDCTstat->DIMMValid != 0) { /* If any DIMMs are present...*/
2285 if (RegDIMMPresent != 0) {
2286 if ((RegDIMMPresent ^ pDCTstat->DIMMValid) !=0) {
2287 /* module type DIMM mismatch (reg'ed, unbuffered) */
2288 pDCTstat->ErrStatus |= 1<<SB_DimmMismatchM;
2289 pDCTstat->ErrCode = SC_StopError;
2291 /* all DIMMs are registered */
2292 pDCTstat->Status |= 1<<SB_Registered;
2295 if (pDCTstat->DimmECCPresent != 0) {
2296 if ((pDCTstat->DimmECCPresent ^ pDCTstat->DIMMValid )== 0) {
2297 /* all DIMMs are ECC capable */
2298 pDCTstat->Status |= 1<<SB_ECCDIMMs;
2301 if (pDCTstat->DimmPARPresent != 0) {
2302 if ((pDCTstat->DimmPARPresent ^ pDCTstat->DIMMValid) == 0) {
2303 /*all DIMMs are Parity capable */
2304 pDCTstat->Status |= 1<<SB_PARDIMMs;
2308 /* no DIMMs present or no DIMMs that qualified. */
2309 pDCTstat->ErrStatus |= 1<<SB_NoDimms;
2310 pDCTstat->ErrCode = SC_StopError;
2313 print_tx("\t DIMMPresence: Status ", pDCTstat->Status);
2314 print_tx("\t DIMMPresence: ErrStatus ", pDCTstat->ErrStatus);
2315 print_tx("\t DIMMPresence: ErrCode ", pDCTstat->ErrCode);
2316 print_t("\t DIMMPresence: Done\n");
2318 mctHookAfterDIMMpre();
2320 return pDCTstat->ErrCode;
2324 static u8 Sys_Capability_D(struct MCTStatStruc *pMCTstat,
2325 struct DCTStatStruc *pDCTstat, int j, int k)
2327 /* Determine if system is capable of operating at given input
2328 * parameters for CL, and T. There are three components to
2329 * determining "maximum frequency" in AUTO mode: SPD component,
2330 * Bus load component, and "Preset" max frequency component.
2331 * This procedure is used to help find the SPD component and relies
2332 * on pre-determination of the bus load component and the Preset
2333 * components. The generalized algorithm for finding maximum
2334 * frequency is structured this way so as to optimize for CAS
2335 * latency (which might get better as a result of reduced frequency).
2336 * See "Global relationship between index values and item values"
2337 * for definition of CAS latency index (j) and Frequency index (k).
2342 if (Get_Fk_D(k) > pDCTstat->PresetmaxFreq)
2347 /* compare proposed CAS latency with AMD Si capabilities */
2348 if ((j < J_MIN) || (j > J_MAX))
2360 static u8 Get_DIMMAddress_D(struct DCTStatStruc *pDCTstat, u8 i)
2364 p = pDCTstat->DIMMAddr;
2365 //mct_BeforeGetDIMMAddress();
2370 static void mct_initDCT(struct MCTStatStruc *pMCTstat,
2371 struct DCTStatStruc *pDCTstat)
2376 /* Config. DCT0 for Ganged or unganged mode */
2377 print_t("\tmct_initDCT: DCTInit_D 0\n");
2378 DCTInit_D(pMCTstat, pDCTstat, 0);
2379 if (pDCTstat->ErrCode == SC_FatalErr) {
2380 // Do nothing goto exitDCTInit; /* any fatal errors? */
2382 /* Configure DCT1 if unganged and enabled*/
2383 if (!pDCTstat->GangedMode) {
2384 if ( pDCTstat->DIMMValidDCT[1] > 0) {
2385 print_t("\tmct_initDCT: DCTInit_D 1\n");
2386 err_code = pDCTstat->ErrCode; /* save DCT0 errors */
2387 pDCTstat->ErrCode = 0;
2388 DCTInit_D(pMCTstat, pDCTstat, 1);
2389 if (pDCTstat->ErrCode == 2) /* DCT1 is not Running */
2390 pDCTstat->ErrCode = err_code; /* Using DCT0 Error code to update pDCTstat.ErrCode */
2392 val = 1 << DisDramInterface;
2393 Set_NB32(pDCTstat->dev_dct, 0x100 + 0x94, val);
2401 static void mct_DramInit(struct MCTStatStruc *pMCTstat,
2402 struct DCTStatStruc *pDCTstat, u8 dct)
2406 mct_BeforeDramInit_Prod_D(pMCTstat, pDCTstat);
2407 // FIXME: for rev A: mct_BeforeDramInit_D(pDCTstat, dct);
2409 /* Disable auto refresh before Dram init when in ganged mode (Erratum 278) */
2410 if (pDCTstat->LogicalCPUID & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_BA)) {
2411 if (pDCTstat->GangedMode) {
2412 val = Get_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct));
2413 val |= 1 << DisAutoRefresh;
2414 Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
2418 mct_DramInit_Hw_D(pMCTstat, pDCTstat, dct);
2420 /* Re-enable auto refresh after Dram init when in ganged mode
2421 * to ensure both DCTs are in sync (Erratum 278)
2424 if (pDCTstat->LogicalCPUID & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_BA)) {
2425 if (pDCTstat->GangedMode) {
2427 val = Get_NB32(pDCTstat->dev_dct, 0x90 + (0x100 * dct));
2428 } while (!(val & (1 << InitDram)));
2432 val = Get_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct));
2433 val &= ~(1 << DisAutoRefresh);
2434 Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
2435 val |= 1 << DisAutoRefresh;
2436 Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
2437 val &= ~(1 << DisAutoRefresh);
2438 Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
2444 static u8 mct_setMode(struct MCTStatStruc *pMCTstat,
2445 struct DCTStatStruc *pDCTstat)
2452 byte = bytex = pDCTstat->DIMMValid;
2453 bytex &= 0x55; /* CHA DIMM pop */
2454 pDCTstat->DIMMValidDCT[0] = bytex;
2456 byte &= 0xAA; /* CHB DIMM popa */
2458 pDCTstat->DIMMValidDCT[1] = byte;
2460 if (byte != bytex) {
2461 pDCTstat->ErrStatus &= ~(1 << SB_DimmMismatchO);
2463 if ( mctGet_NVbits(NV_Unganged) )
2464 pDCTstat->ErrStatus |= (1 << SB_DimmMismatchO);
2466 if (!(pDCTstat->ErrStatus & (1 << SB_DimmMismatchO))) {
2467 pDCTstat->GangedMode = 1;
2468 /* valid 128-bit mode population. */
2469 pDCTstat->Status |= 1 << SB_128bitmode;
2471 val = Get_NB32(pDCTstat->dev_dct, reg);
2472 val |= 1 << DctGangEn;
2473 Set_NB32(pDCTstat->dev_dct, reg, val);
2474 print_tx("setMode: DRAM Controller Select Low Register = ", val);
2477 return pDCTstat->ErrCode;
2481 u32 Get_NB32(u32 dev, u32 reg)
2483 return pci_read_config32(dev, reg);
2487 void Set_NB32(u32 dev, u32 reg, u32 val)
2489 pci_write_config32(dev, reg, val);
2493 u32 Get_NB32_index(u32 dev, u32 index_reg, u32 index)
2497 Set_NB32(dev, index_reg, index);
2498 dword = Get_NB32(dev, index_reg+0x4);
2503 void Set_NB32_index(u32 dev, u32 index_reg, u32 index, u32 data)
2505 Set_NB32(dev, index_reg, index);
2506 Set_NB32(dev, index_reg + 0x4, data);
2510 u32 Get_NB32_index_wait(u32 dev, u32 index_reg, u32 index)
2516 index &= ~(1 << DctAccessWrite);
2517 Set_NB32(dev, index_reg, index);
2519 dword = Get_NB32(dev, index_reg);
2520 } while (!(dword & (1 << DctAccessDone)));
2521 dword = Get_NB32(dev, index_reg + 0x4);
2527 void Set_NB32_index_wait(u32 dev, u32 index_reg, u32 index, u32 data)
2532 Set_NB32(dev, index_reg + 0x4, data);
2533 index |= (1 << DctAccessWrite);
2534 Set_NB32(dev, index_reg, index);
2536 dword = Get_NB32(dev, index_reg);
2537 } while (!(dword & (1 << DctAccessDone)));
2542 static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
2543 struct DCTStatStruc *pDCTstat, u8 dct)
2545 /* Get platform specific config/timing values from the interface layer
2546 * and program them into DCT.
2549 u32 dev = pDCTstat->dev_dct;
2551 u8 i, i_start, i_end;
2553 if (pDCTstat->GangedMode) {
2554 SyncSetting(pDCTstat);
2561 for (i=i_start; i<i_end; i++) {
2562 index_reg = 0x98 + (i * 0x100);
2563 Set_NB32_index_wait(dev, index_reg, 0x00, pDCTstat->CH_ODC_CTL[i]); /* Channel A Output Driver Compensation Control */
2564 Set_NB32_index_wait(dev, index_reg, 0x04, pDCTstat->CH_ADDR_TMG[i]); /* Channel A Output Driver Compensation Control */
2567 return pDCTstat->ErrCode;
2572 static void mct_SyncDCTsReady(struct DCTStatStruc *pDCTstat)
2577 if (pDCTstat->NodePresent) {
2578 print_tx("mct_SyncDCTsReady: Node ", pDCTstat->Node_ID);
2579 dev = pDCTstat->dev_dct;
2581 if ((pDCTstat->DIMMValidDCT[0] ) || (pDCTstat->DIMMValidDCT[1])) { /* This Node has dram */
2583 val = Get_NB32(dev, 0x110);
2584 } while (!(val & (1 << DramEnabled)));
2585 print_t("mct_SyncDCTsReady: DramEnabled\n");
2587 } /* Node is present */
2591 static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
2592 struct DCTStatStruc *pDCTstat, u8 dct)
2594 if (!pDCTstat->GangedMode) {
2596 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
2597 if (pDCTstat->DIMMValidDCT[dct] == 0)
2598 pDCTstat->ErrCode = SC_StopError;
2600 pDCTstat->CSPresent = 0;
2601 pDCTstat->CSTestFail = 0;
2602 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
2603 if (pDCTstat->DIMMValidDCT[dct] == 0)
2604 pDCTstat->ErrCode = SC_StopError;
2609 static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,
2610 struct DCTStatStruc *pDCTstat, u8 dct)
2615 SPDCalcWidth_D(pMCTstat, pDCTstat);
2616 ret = mct_setMode(pMCTstat, pDCTstat);
2618 ret = pDCTstat->ErrCode;
2621 print_tx("SPDCalcWidth: Status ", pDCTstat->Status);
2622 print_tx("SPDCalcWidth: ErrStatus ", pDCTstat->ErrStatus);
2623 print_tx("SPDCalcWidth: ErrCode ", pDCTstat->ErrCode);
2624 print_t("SPDCalcWidth: Done\n");
2630 static void mct_AfterStitchMemory(struct MCTStatStruc *pMCTstat,
2631 struct DCTStatStruc *pDCTstat, u8 dct)
2640 _MemHoleRemap = mctGet_NVbits(NV_MemHole);
2641 DramHoleBase = mctGet_NVbits(NV_BottomIO);
2643 /* Increase hole size so;[31:24]to[31:16]
2644 * it has granularity of 128MB shl eax,8
2645 * Set 'effective' bottom IOmov DramHoleBase,eax
2647 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2649 /* In unganged mode, we must add DCT0 and DCT1 to DCTSysLimit */
2650 if (!pDCTstat->GangedMode) {
2651 dev = pDCTstat->dev_dct;
2652 pDCTstat->NodeSysLimit += pDCTstat->DCTSysLimit;
2653 /* if DCT0 and DCT1 both exist, set DctSelBaseAddr[47:27] to the top of DCT0 */
2655 if (pDCTstat->DIMMValidDCT[1] > 0) {
2656 dword = pDCTstat->DCTSysLimit + 1;
2657 dword += pDCTstat->NodeSysBase;
2658 dword >>= 8; /* scale [39:8] to [47:27],and to F2x110[31:11] */
2659 if ((dword >= DramHoleBase) && _MemHoleRemap) {
2660 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2661 val = pMCTstat->HoleBase;
2663 val = (((~val) & 0xFF) + 1);
2668 val = Get_NB32(dev, reg);
2671 val |= 3; /* Set F2x110[DctSelHiRngEn], F2x110[DctSelHi] */
2672 Set_NB32(dev, reg, val);
2673 print_tx("AfterStitch DCT0 and DCT1: DRAM Controller Select Low Register = ", val);
2674 print_tx("AfterStitch DCT0 and DCT1: DRAM Controller Select High Register = ", dword);
2678 Set_NB32(dev, reg, val);
2681 /* Program the DctSelBaseAddr value to 0
2682 if DCT 0 is disabled */
2683 if (pDCTstat->DIMMValidDCT[0] == 0) {
2684 dword = pDCTstat->NodeSysBase;
2686 if ((dword >= DramHoleBase) && _MemHoleRemap) {
2687 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2688 val = pMCTstat->HoleBase;
2691 val |= (((~val) & 0xFFFF) + 1);
2696 Set_NB32(dev, reg, val);
2699 val |= 3; /* Set F2x110[DctSelHiRngEn], F2x110[DctSelHi] */
2700 Set_NB32(dev, reg, val);
2701 print_tx("AfterStitch DCT1 only: DRAM Controller Select Low Register = ", val);
2702 print_tx("AfterStitch DCT1 only: DRAM Controller Select High Register = ", dword);
2706 pDCTstat->NodeSysLimit += pDCTstat->DCTSysLimit;
2708 print_tx("AfterStitch pDCTstat->NodeSysBase = ", pDCTstat->NodeSysBase);
2709 print_tx("mct_AfterStitchMemory: pDCTstat->NodeSysLimit ", pDCTstat->NodeSysLimit);
2713 static u8 mct_DIMMPresence(struct MCTStatStruc *pMCTstat,
2714 struct DCTStatStruc *pDCTstat, u8 dct)
2719 ret = DIMMPresence_D(pMCTstat, pDCTstat);
2721 ret = pDCTstat->ErrCode;
2727 /* mct_BeforeGetDIMMAddress inline in C */
2730 static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
2731 struct DCTStatStruc *pDCTstatA)
2735 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
2736 struct DCTStatStruc *pDCTstat;
2737 pDCTstat = pDCTstatA + Node;
2738 if (pDCTstat->NodePresent) {
2739 if (pDCTstat->DIMMValidDCT[0]) {
2740 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[0];
2741 Set_OtherTiming(pMCTstat, pDCTstat, 0);
2743 if (pDCTstat->DIMMValidDCT[1] && !pDCTstat->GangedMode ) {
2744 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[1];
2745 Set_OtherTiming(pMCTstat, pDCTstat, 1);
2747 } /* Node is present*/
2752 static void Set_OtherTiming(struct MCTStatStruc *pMCTstat,
2753 struct DCTStatStruc *pDCTstat, u8 dct)
2756 u32 reg_off = 0x100 * dct;
2759 u32 dev = pDCTstat->dev_dct;
2761 Get_Trdrd(pMCTstat, pDCTstat, dct);
2762 Get_Twrwr(pMCTstat, pDCTstat, dct);
2763 Get_Twrrd(pMCTstat, pDCTstat, dct);
2764 Get_TrwtTO(pMCTstat, pDCTstat, dct);
2765 Get_TrwtWB(pMCTstat, pDCTstat);
2767 reg = 0x8C + reg_off; /* Dram Timing Hi */
2768 val = Get_NB32(dev, reg);
2770 dword = pDCTstat->TrwtTO; //0x07
2772 dword = pDCTstat->Twrrd; //0x03
2774 dword = pDCTstat->Twrwr; //0x03
2776 dword = pDCTstat->Trdrd; //0x03
2778 dword = pDCTstat->TrwtWB; //0x07
2780 val = OtherTiming_A_D(pDCTstat, val);
2781 Set_NB32(dev, reg, val);
2786 static void Get_Trdrd(struct MCTStatStruc *pMCTstat,
2787 struct DCTStatStruc *pDCTstat, u8 dct)
2793 u32 index_reg = 0x98 + 0x100 * dct;
2794 u32 dev = pDCTstat->dev_dct;
2796 if ((pDCTstat->Dimmx4Present != 0) && (pDCTstat->Dimmx8Present != 0)) {
2797 /* mixed (x4 or x8) DIMM types
2798 the largest DqsRcvEnGrossDelay of any DIMM minus the DqsRcvEnGrossDelay
2799 of any other DIMM is equal to the Critical Gross Delay Difference (CGDD) for Trdrd.*/
2800 byte = Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
2808 Trdrd with non-mixed DIMM types
2809 RdDqsTime are the same for all DIMMs and DqsRcvEn difference between
2810 any two DIMMs is less than half of a MEMCLK, BIOS should program Trdrd to 0000b,
2811 else BIOS should program Trdrd to 0001b.
2813 RdDqsTime are the same for all DIMMs
2814 DDR400~DDR667 only use one set register
2815 DDR800 have two set register for DIMM0 and DIMM1 */
2817 if (pDCTstat->Speed > 3) {
2818 /* DIMM0+DIMM1 exist */ //NOTE it should be 5
2819 val = bsf(pDCTstat->DIMMValid);
2820 dword = bsr(pDCTstat->DIMMValid);
2821 if (dword != val && dword != 0) {
2822 /* DCT Read DQS Timing Control - DIMM0 - Low */
2823 dword = Get_NB32_index_wait(dev, index_reg, 0x05);
2824 /* DCT Read DQS Timing Control - DIMM1 - Low */
2825 val = Get_NB32_index_wait(dev, index_reg, 0x105);
2829 /* DCT Read DQS Timing Control - DIMM0 - High */
2830 dword = Get_NB32_index_wait(dev, index_reg, 0x06);
2831 /* DCT Read DQS Timing Control - DIMM1 - High */
2832 val = Get_NB32_index_wait(dev, index_reg, 0x106);
2838 /* DqsRcvEn difference between any two DIMMs is
2839 less than half of a MEMCLK */
2840 /* DqsRcvEn byte 1,0*/
2841 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x10))
2843 /* DqsRcvEn byte 3,2*/
2844 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x11))
2846 /* DqsRcvEn byte 5,4*/
2847 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x20))
2849 /* DqsRcvEn byte 7,6*/
2850 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x21))
2853 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x12))
2859 pDCTstat->Trdrd = Trdrd;
2864 static void Get_Twrwr(struct MCTStatStruc *pMCTstat,
2865 struct DCTStatStruc *pDCTstat, u8 dct)
2868 u32 index_reg = 0x98 + 0x100 * dct;
2869 u32 dev = pDCTstat->dev_dct;
2873 /* WrDatGrossDlyByte only use one set register when DDR400~DDR667
2874 DDR800 have two set register for DIMM0 and DIMM1 */
2875 if (pDCTstat->Speed > 3) {
2876 val = bsf(pDCTstat->DIMMValid);
2877 dword = bsr(pDCTstat->DIMMValid);
2878 if (dword != val && dword != 0) {
2879 /*the largest WrDatGrossDlyByte of any DIMM minus the
2880 WrDatGrossDlyByte of any other DIMM is equal to CGDD */
2881 val = Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
2888 pDCTstat->Twrwr = Twrwr;
2892 static void Get_Twrrd(struct MCTStatStruc *pMCTstat,
2893 struct DCTStatStruc *pDCTstat, u8 dct)
2895 u8 byte, bytex, val;
2896 u32 index_reg = 0x98 + 0x100 * dct;
2897 u32 dev = pDCTstat->dev_dct;
2899 /* On any given byte lane, the largest WrDatGrossDlyByte delay of
2900 any DIMM minus the DqsRcvEnGrossDelay delay of any other DIMM is
2901 equal to the Critical Gross Delay Difference (CGDD) for Twrrd.*/
2903 /* WrDatGrossDlyByte only use one set register when DDR400~DDR667
2904 DDR800 have two set register for DIMM0 and DIMM1 */
2905 if (pDCTstat->Speed > 3) {
2906 val = Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
2908 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 1); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM0 */
2909 pDCTstat->WrDatGrossH = (u8) val; /* low byte = max value */
2912 Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
2914 bytex = pDCTstat->DqsRcvEnGrossL;
2915 byte = pDCTstat->WrDatGrossH;
2925 pDCTstat->Twrrd = bytex;
2929 static void Get_TrwtTO(struct MCTStatStruc *pMCTstat,
2930 struct DCTStatStruc *pDCTstat, u8 dct)
2933 u32 index_reg = 0x98 + 0x100 * dct;
2934 u32 dev = pDCTstat->dev_dct;
2936 /* On any given byte lane, the largest WrDatGrossDlyByte delay of
2937 any DIMM minus the DqsRcvEnGrossDelay delay of any other DIMM is
2938 equal to the Critical Gross Delay Difference (CGDD) for TrwtTO. */
2939 Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
2940 Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
2941 bytex = pDCTstat->DqsRcvEnGrossL;
2942 byte = pDCTstat->WrDatGrossH;
2945 if ((bytex == 1) || (bytex == 2))
2951 if ((byte == 0) || (byte == 1))
2957 pDCTstat->TrwtTO = bytex;
2961 static void Get_TrwtWB(struct MCTStatStruc *pMCTstat,
2962 struct DCTStatStruc *pDCTstat)
2964 /* TrwtWB ensures read-to-write data-bus turnaround.
2965 This value should be one more than the programmed TrwtTO.*/
2966 pDCTstat->TrwtWB = pDCTstat->TrwtTO + 1;
2970 static u8 Check_DqsRcvEn_Diff(struct DCTStatStruc *pDCTstat,
2971 u8 dct, u32 dev, u32 index_reg,
2974 u8 Smallest_0, Largest_0, Smallest_1, Largest_1;
2988 for (i=0; i < 8; i+=2) {
2989 if ( pDCTstat->DIMMValid & (1 << i)) {
2990 val = Get_NB32_index_wait(dev, index_reg, index);
2992 if (byte < Smallest_0)
2994 if (byte > Largest_0)
2997 byte = (val >> 16) & 0xFF;
2998 if (byte < Smallest_1)
3000 if (byte > Largest_1)
3007 /* check if total DqsRcvEn delay difference between any
3008 two DIMMs is less than half of a MEMCLK */
3009 if ((Largest_0 - Smallest_0) > 31)
3012 if ((Largest_1 - Smallest_1) > 31)
3018 static u8 Get_DqsRcvEnGross_Diff(struct DCTStatStruc *pDCTstat,
3019 u32 dev, u32 index_reg)
3021 u8 Smallest, Largest;
3025 /* The largest DqsRcvEnGrossDelay of any DIMM minus the
3026 DqsRcvEnGrossDelay of any other DIMM is equal to the Critical
3027 Gross Delay Difference (CGDD) */
3028 /* DqsRcvEn byte 1,0 */
3029 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x10);
3030 Largest = val & 0xFF;
3031 Smallest = (val >> 8) & 0xFF;
3033 /* DqsRcvEn byte 3,2 */
3034 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x11);
3036 bytex = (val >> 8) & 0xFF;
3037 if (bytex < Smallest)
3042 /* DqsRcvEn byte 5,4 */
3043 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x20);
3045 bytex = (val >> 8) & 0xFF;
3046 if (bytex < Smallest)
3051 /* DqsRcvEn byte 7,6 */
3052 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x21);
3054 bytex = (val >> 8) & 0xFF;
3055 if (bytex < Smallest)
3060 if (pDCTstat->DimmECCPresent> 0) {
3062 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x12);
3064 bytex = (val >> 8) & 0xFF;
3065 if (bytex < Smallest)
3071 pDCTstat->DqsRcvEnGrossL = Largest;
3072 return Largest - Smallest;
3076 static u8 Get_WrDatGross_Diff(struct DCTStatStruc *pDCTstat,
3077 u8 dct, u32 dev, u32 index_reg)
3079 u8 Smallest, Largest;
3083 /* The largest WrDatGrossDlyByte of any DIMM minus the
3084 WrDatGrossDlyByte of any other DIMM is equal to CGDD */
3085 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x01); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM0 */
3086 Largest = val & 0xFF;
3087 Smallest = (val >> 8) & 0xFF;
3088 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x101); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM1 */
3090 bytex = (val >> 8) & 0xFF;
3091 if (bytex < Smallest)
3096 // FIXME: Add Cx support.
3098 pDCTstat->WrDatGrossH = Largest;
3099 return Largest - Smallest;
3102 static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
3103 u32 dev, u32 index_reg,
3106 u8 Smallest, Largest;
3119 for (i=0; i < 8; i+=2) {
3120 if ( pDCTstat->DIMMValid & (1 << i)) {
3121 val = Get_NB32_index_wait(dev, index_reg, index);
3123 byte = (val >> 5) & 0xFF;
3124 if (byte < Smallest)
3129 byte = (val >> (16 + 5)) & 0xFF;
3130 if (byte < Smallest)
3146 static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat,
3147 u8 dct, u32 dev, u32 index_reg,
3150 u8 Smallest, Largest;
3158 for (i=0; i < 2; i++) {
3159 val = Get_NB32_index_wait(dev, index_reg, index);
3162 for (j=0; j < 4; j++) {
3164 if (byte < Smallest)
3173 if (pDCTstat->DimmECCPresent > 0) {
3175 val = Get_NB32_index_wait(dev, index_reg, index);
3179 if (byte < Smallest)
3194 static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
3195 struct DCTStatStruc *pDCTstat)
3197 print_t("\tmct_FinalMCT_D: Clr Cl, Wb\n");
3200 /* ClrClToNB_D postponed until we're done executing from ROM */
3201 mct_ClrWbEnhWsbDis_D(pMCTstat, pDCTstat);
3205 static void mct_InitialMCT_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat)
3207 print_t("\tmct_InitialMCT_D: Set Cl, Wb\n");
3208 mct_SetClToNB_D(pMCTstat, pDCTstat);
3209 mct_SetWbEnhWsbDis_D(pMCTstat, pDCTstat);
3213 static u32 mct_NodePresent_D(void)
3221 static void mct_init(struct MCTStatStruc *pMCTstat,
3222 struct DCTStatStruc *pDCTstat)
3227 pDCTstat->GangedMode = 0;
3228 pDCTstat->DRPresent = 1;
3230 /* enable extend PCI configuration access */
3232 _RDMSR(addr, &lo, &hi);
3233 if (hi & (1 << (46-32))) {
3234 pDCTstat->Status |= 1 << SB_ExtConfig;
3237 _WRMSR(addr, lo, hi);
3242 static void clear_legacy_Mode(struct MCTStatStruc *pMCTstat,
3243 struct DCTStatStruc *pDCTstat)
3247 u32 dev = pDCTstat->dev_dct;
3249 /* Clear Legacy BIOS Mode bit */
3251 val = Get_NB32(dev, reg);
3252 val &= ~(1<<LegacyBiosMode);
3253 Set_NB32(dev, reg, val);
3257 static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
3258 struct DCTStatStruc *pDCTstatA)
3261 u32 Drambase, Dramlimit;
3267 struct DCTStatStruc *pDCTstat;
3269 pDCTstat = pDCTstatA + 0;
3270 dev = pDCTstat->dev_map;
3272 /* Copy dram map from F1x40/44,F1x48/4c,
3273 to F1x120/124(Node0),F1x120/124(Node1),...*/
3274 for (Node=0; Node < MAX_NODES_SUPPORTED; Node++) {
3275 pDCTstat = pDCTstatA + Node;
3276 devx = pDCTstat->dev_map;
3278 /* get base/limit from Node0 */
3279 reg = 0x40 + (Node << 3); /* Node0/Dram Base 0 */
3280 val = Get_NB32(dev, reg);
3281 Drambase = val >> ( 16 + 3);
3283 reg = 0x44 + (Node << 3); /* Node0/Dram Base 0 */
3284 val = Get_NB32(dev, reg);
3285 Dramlimit = val >> (16 + 3);
3287 /* set base/limit to F1x120/124 per Node */
3288 if (pDCTstat->NodePresent) {
3289 reg = 0x120; /* F1x120,DramBase[47:27] */
3290 val = Get_NB32(devx, reg);
3293 Set_NB32(devx, reg, val);
3296 val = Get_NB32(devx, reg);
3299 Set_NB32(devx, reg, val);
3301 if ( pMCTstat->GStatus & ( 1 << GSB_HWHole)) {
3303 val = Get_NB32(devx, reg);
3304 val |= (1 << DramMemHoistValid);
3305 val &= ~(0xFF << 24);
3306 dword = (pMCTstat->HoleBase >> (24 - 8)) & 0xFF;
3309 Set_NB32(devx, reg, val);
3316 static void SetCSTriState(struct MCTStatStruc *pMCTstat,
3317 struct DCTStatStruc *pDCTstat, u8 dct)
3320 u32 dev = pDCTstat->dev_dct;
3321 u32 index_reg = 0x98 + 0x100 * dct;
3326 /* Tri-state unused chipselects when motherboard
3327 termination is available */
3329 // FIXME: skip for Ax
3331 word = pDCTstat->CSPresent;
3332 if (pDCTstat->Status & (1 << SB_Registered)) {
3333 for (cs = 0; cs < 8; cs++) {
3334 if (word & (1 << cs)) {
3336 word |= 1 << (cs + 1);
3340 word = (~word) & 0xFF;
3342 val = Get_NB32_index_wait(dev, index_reg, index);
3344 Set_NB32_index_wait(dev, index_reg, index, val);
3349 static void SetCKETriState(struct MCTStatStruc *pMCTstat,
3350 struct DCTStatStruc *pDCTstat, u8 dct)
3354 u32 index_reg = 0x98 + 0x100 * dct;
3359 /* Tri-state unused CKEs when motherboard termination is available */
3361 // FIXME: skip for Ax
3363 dev = pDCTstat->dev_dct;
3365 for (cs = 0; cs < 8; cs++) {
3366 if (pDCTstat->CSPresent & (1 << cs)) {
3375 val = Get_NB32_index_wait(dev, index_reg, index);
3376 if ((word & 0x00FF) == 1)
3381 if ((word >> 8) == 1)
3386 Set_NB32_index_wait(dev, index_reg, index, val);
3390 static void SetODTTriState(struct MCTStatStruc *pMCTstat,
3391 struct DCTStatStruc *pDCTstat, u8 dct)
3395 u32 index_reg = 0x98 + 0x100 * dct;
3401 // FIXME: skip for Ax
3403 dev = pDCTstat->dev_dct;
3405 /* Tri-state unused ODTs when motherboard termination is available */
3406 max_dimms = (u8) mctGet_NVbits(NV_MAX_DIMMS);
3407 odt = 0x0F; /* tristate all the pins then clear the used ones. */
3409 for (cs = 0; cs < 8; cs += 2) {
3410 if (pDCTstat->CSPresent & (1 << cs)) {
3411 odt &= ~(1 << (cs / 2));
3413 /* if quad-rank capable platform clear additional pins */
3414 if (max_dimms != MAX_CS_SUPPORTED) {
3415 if (pDCTstat->CSPresent & (1 << (cs + 1)))
3416 odt &= ~(4 << (cs / 2));
3422 val = Get_NB32_index_wait(dev, index_reg, index);
3424 Set_NB32_index_wait(dev, index_reg, index, val);
3429 static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
3430 struct DCTStatStruc *pDCTstat, u8 dct)
3433 u32 index_reg = 0x98 + 0x100 * dct;
3434 u32 dev = pDCTstat->dev_dct;
3440 val = Get_NB32_index_wait(dev, index_reg, 0x00);
3442 for (i=0; i < 6; i++) {
3446 p = Table_Comp_Rise_Slew_15x;
3447 valx = p[(val >> 16) & 3];
3451 p = Table_Comp_Fall_Slew_15x;
3452 valx = p[(val >> 16) & 3];
3455 p = Table_Comp_Rise_Slew_20x;
3456 valx = p[(val >> 8) & 3];
3459 p = Table_Comp_Fall_Slew_20x;
3460 valx = p[(val >> 8) & 3];
3464 dword |= valx << (5 * i);
3467 /* Override/Exception */
3468 if (!pDCTstat->GangedMode) {
3469 i = 0; /* use i for the dct setting required */
3470 if (pDCTstat->MAdimms[0] < 4)
3472 if (((pDCTstat->Speed == 2) || (pDCTstat->Speed == 3)) && (pDCTstat->MAdimms[i] == 4)) {
3473 dword &= 0xF18FFF18;
3474 index_reg = 0x98; /* force dct = 0 */
3478 Set_NB32_index_wait(dev, index_reg, 0x0a, dword);
3482 static void WaitRoutine_D(u32 time)
3491 static void mct_EarlyArbEn_D(struct MCTStatStruc *pMCTstat,
3492 struct DCTStatStruc *pDCTstat)
3496 u32 dev = pDCTstat->dev_dct;
3498 /* GhEnhancement #18429 modified by askar: For low NB CLK :
3499 * Memclk ratio, the DCT may need to arbitrate early to avoid
3500 * unnecessary bubbles.
3501 * bit 19 of F2x[1,0]78 Dram Control Register, set this bit only when
3502 * NB CLK : Memclk ratio is between 3:1 (inclusive) to 4:5 (inclusive)
3506 val = Get_NB32(dev, reg);
3508 //FIXME: check for Cx
3509 if (CheckNBCOFEarlyArbEn(pMCTstat, pDCTstat))
3510 val |= (1 << EarlyArbEn);
3512 Set_NB32(dev, reg, val);
3517 static u8 CheckNBCOFEarlyArbEn(struct MCTStatStruc *pMCTstat,
3518 struct DCTStatStruc *pDCTstat)
3524 u32 dev = pDCTstat->dev_dct;
3528 /* Check if NB COF >= 4*Memclk, if it is not, return a fatal error
3531 /* 3*(Fn2xD4[NBFid]+4)/(2^NbDid)/(3+Fn2x94[MemClkFreq]) */
3532 _RDMSR(0xC0010071, &lo, &hi);
3538 val = Get_NB32(dev, reg);
3539 if (!(val & (1 << MemClkFreqVal)))
3540 val = Get_NB32(dev, reg * 0x100); /* get the DCT1 value */
3548 dev = pDCTstat->dev_nbmisc;
3550 val = Get_NB32(dev, reg);
3558 // Yes this could be nicer but this was how the asm was....
3559 if (val < 3) { /* NClk:MemClk < 3:1 */
3561 } else if (val > 4) { /* NClk:MemClk >= 5:1 */
3563 } else if ((val == 4) && (rem > tmp)) { /* NClk:MemClk > 4.5:1 */
3566 return 1; /* 3:1 <= NClk:MemClk <= 4.5:1*/
3571 static void mct_ResetDataStruct_D(struct MCTStatStruc *pMCTstat,
3572 struct DCTStatStruc *pDCTstatA)
3576 struct DCTStatStruc *pDCTstat;
3579 u16 host_serv1, host_serv2;
3581 /* Initialize Data structures by clearing all entries to 0 */
3582 p = (u8 *) pMCTstat;
3583 for (i = 0; i < sizeof(struct MCTStatStruc); i++) {
3587 for (Node = 0; Node < 8; Node++) {
3588 pDCTstat = pDCTstatA + Node;
3589 host_serv1 = pDCTstat->HostBiosSrvc1;
3590 host_serv2 = pDCTstat->HostBiosSrvc2;
3592 p = (u8 *) pDCTstat;
3594 stop = (u32)(&((struct DCTStatStruc *)0)->CH_MaxRdLat[2]);
3595 for (i = start; i < stop ; i++) {
3599 start = (u32)(&((struct DCTStatStruc *)0)->CH_D_BC_RCVRDLY[2][4]);
3600 stop = sizeof(struct DCTStatStruc);
3601 for (i = start; i < stop; i++) {
3604 pDCTstat->HostBiosSrvc1 = host_serv1;
3605 pDCTstat->HostBiosSrvc2 = host_serv2;
3610 static void mct_BeforeDramInit_Prod_D(struct MCTStatStruc *pMCTstat,
3611 struct DCTStatStruc *pDCTstat)
3615 u32 dev = pDCTstat->dev_dct;
3617 // FIXME: skip for Ax
3618 if ((pDCTstat->Speed == 3) || ( pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533Mhz
3619 for (i=0; i < 2; i++) {
3620 reg_off = 0x100 * i;
3621 Set_NB32(dev, 0x98 + reg_off, 0x0D000030);
3622 Set_NB32(dev, 0x9C + reg_off, 0x00000806);
3623 Set_NB32(dev, 0x98 + reg_off, 0x4D040F30);
3629 static void mct_AdjustDelayRange_D(struct MCTStatStruc *pMCTstat,
3630 struct DCTStatStruc *pDCTstat, u8 *dqs_pos)
3632 // FIXME: Skip for Ax
3633 if ((pDCTstat->Speed == 3) || ( pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533Mhz
3638 static u32 mct_DisDllShutdownSR(struct MCTStatStruc *pMCTstat,
3639 struct DCTStatStruc *pDCTstat, u32 DramConfigLo, u8 dct)
3641 u32 reg_off = 0x100 * dct;
3642 u32 dev = pDCTstat->dev_dct;
3644 /* Write 0000_07D0h to register F2x[1, 0]98_x4D0FE006 */
3645 if (pDCTstat->LogicalCPUID & (AMD_DA_C2 | AMD_RB_C3)) {
3646 Set_NB32(dev, 0x9C + reg_off, 0x7D0);
3647 Set_NB32(dev, 0x98 + reg_off, 0x4D0FE006);
3648 Set_NB32(dev, 0x9C + reg_off, 0x190);
3649 Set_NB32(dev, 0x98 + reg_off, 0x4D0FE007);
3652 return DramConfigLo | /* DisDllShutdownSR */ 1 << 27;
3655 static void mct_EnDllShutdownSR(struct MCTStatStruc *pMCTstat,
3656 struct DCTStatStruc *pDCTstat, u8 dct)
3658 u32 reg_off = 0x100 * dct;
3659 u32 dev = pDCTstat->dev_dct, val;
3661 /* Write 0000_07D0h to register F2x[1, 0]98_x4D0FE006 */
3662 if (pDCTstat->LogicalCPUID & (AMD_DA_C2 | AMD_RB_C3)) {
3663 Set_NB32(dev, 0x9C + reg_off, 0x1C);
3664 Set_NB32(dev, 0x98 + reg_off, 0x4D0FE006);
3665 Set_NB32(dev, 0x9C + reg_off, 0x13D);
3666 Set_NB32(dev, 0x98 + reg_off, 0x4D0FE007);
3668 val = Get_NB32(dev, 0x90 + reg_off);
3669 val &= ~(1 << 27/* DisDllShutdownSR */);
3670 Set_NB32(dev, 0x90 + reg_off, val);
3674 void mct_SetClToNB_D(struct MCTStatStruc *pMCTstat,
3675 struct DCTStatStruc *pDCTstat)
3680 // FIXME: Maybe check the CPUID? - not for now.
3681 // pDCTstat->LogicalCPUID;
3684 _RDMSR(msr, &lo, &hi);
3685 lo |= 1 << ClLinesToNbDis;
3686 _WRMSR(msr, lo, hi);
3690 void mct_ClrClToNB_D(struct MCTStatStruc *pMCTstat,
3691 struct DCTStatStruc *pDCTstat)
3697 // FIXME: Maybe check the CPUID? - not for now.
3698 // pDCTstat->LogicalCPUID;
3701 _RDMSR(msr, &lo, &hi);
3702 if (!pDCTstat->ClToNB_flag)
3703 lo &= ~(1<<ClLinesToNbDis);
3704 _WRMSR(msr, lo, hi);
3709 void mct_SetWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
3710 struct DCTStatStruc *pDCTstat)
3715 // FIXME: Maybe check the CPUID? - not for now.
3716 // pDCTstat->LogicalCPUID;
3719 _RDMSR(msr, &lo, &hi);
3720 hi |= (1 << WbEnhWsbDis_D);
3721 _WRMSR(msr, lo, hi);
3725 void mct_ClrWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
3726 struct DCTStatStruc *pDCTstat)
3731 // FIXME: Maybe check the CPUID? - not for now.
3732 // pDCTstat->LogicalCPUID;
3735 _RDMSR(msr, &lo, &hi);
3736 hi &= ~(1 << WbEnhWsbDis_D);
3737 _WRMSR(msr, lo, hi);
3741 void mct_SetDramConfigHi_D(struct DCTStatStruc *pDCTstat, u32 dct,
3744 /* Bug#15114: Comp. update interrupted by Freq. change can cause
3745 * subsequent update to be invalid during any MemClk frequency change:
3746 * Solution: From the bug report:
3747 * 1. A software-initiated frequency change should be wrapped into the
3748 * following sequence :
3749 * - a) Disable Compensation (F2[1, 0]9C_x08[30] )
3750 * b) Reset the Begin Compensation bit (D3CMP->COMP_CONFIG[0]) in all the compensation engines
3751 * c) Do frequency change
3752 * d) Enable Compensation (F2[1, 0]9C_x08[30] )
3753 * 2. A software-initiated Disable Compensation should always be
3754 * followed by step b) of the above steps.
3755 * Silicon Status: Fixed In Rev B0
3757 * Errata#177: DRAM Phy Automatic Compensation Updates May Be Invalid
3758 * Solution: BIOS should disable the phy automatic compensation prior
3759 * to initiating a memory clock frequency change as follows:
3760 * 1. Disable PhyAutoComp by writing 1'b1 to F2x[1, 0]9C_x08[30]
3761 * 2. Reset the Begin Compensation bits by writing 32'h0 to
3762 * F2x[1, 0]9C_x4D004F00
3763 * 3. Perform frequency change
3764 * 4. Enable PhyAutoComp by writing 1'b0 to F2x[1, 0]9C_08[30]
3765 * In addition, any time software disables the automatic phy
3766 * compensation it should reset the begin compensation bit per step 2.
3767 * Silicon Status: Fixed in DR-B0
3770 u32 dev = pDCTstat->dev_dct;
3771 u32 index_reg = 0x98 + 0x100 * dct;
3777 val = Get_NB32_index_wait(dev, index_reg, index);
3778 Set_NB32_index_wait(dev, index_reg, index, val | (1 << DisAutoComp));
3780 //FIXME: check for Bx Cx CPU
3781 // if Ax mct_SetDramConfigHi_Samp_D
3784 index = 0x4D014F00; /* F2x[1, 0]9C_x[D0FFFFF:D000000] DRAM Phy Debug Registers */
3785 index |= 1 << DctAccessWrite;
3787 Set_NB32_index_wait(dev, index_reg, index, val);
3789 Set_NB32(dev, 0x94 + 0x100 * dct, DramConfigHi);
3792 val = Get_NB32_index_wait(dev, index_reg, index);
3793 Set_NB32_index_wait(dev, index_reg, index, val & (~(1 << DisAutoComp)));
3796 static void mct_BeforeDQSTrain_D(struct MCTStatStruc *pMCTstat,
3797 struct DCTStatStruc *pDCTstatA)
3800 struct DCTStatStruc *pDCTstat;
3804 * Bug#15115: Uncertainty In The Sync Chain Leads To Setup Violations
3806 * Solution: BIOS should program DRAM Control Register[RdPtrInit] =
3807 * 5h, (F2x[1, 0]78[3:0] = 5h).
3808 * Silicon Status: Fixed In Rev B0
3810 * Bug#15880: Determine validity of reset settings for DDR PHY timing.
3811 * Solution: At least, set WrDqs fine delay to be 0 for DDR2 training.
3814 for (Node = 0; Node < 8; Node++) {
3815 pDCTstat = pDCTstatA + Node;
3817 if (pDCTstat->NodePresent) {
3818 mct_BeforeDQSTrain_Samp_D(pMCTstat, pDCTstat);
3819 mct_ResetDLL_D(pMCTstat, pDCTstat, 0);
3820 mct_ResetDLL_D(pMCTstat, pDCTstat, 1);
3825 static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat,
3826 struct DCTStatStruc *pDCTstat, u8 dct)
3829 u32 dev = pDCTstat->dev_dct;
3830 u32 reg_off = 0x100 * dct;
3836 /* Skip reset DLL for B3 */
3837 if (pDCTstat->LogicalCPUID & AMD_DR_B3) {
3842 _RDMSR(addr, &lo, &hi);
3843 if(lo & (1<<17)) { /* save the old value */
3846 lo |= (1<<17); /* HWCR.wrap32dis */
3847 lo &= ~(1<<15); /* SSEDIS */
3848 /* Setting wrap32dis allows 64-bit memory references in 32bit mode */
3849 _WRMSR(addr, lo, hi);
3852 pDCTstat->Channel = dct;
3853 Receiver = mct_InitReceiver_D(pDCTstat, dct);
3854 /* there are four receiver pairs, loosely associated with chipselects.*/
3855 for (; Receiver < 8; Receiver += 2) {
3856 if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, dct, Receiver)) {
3857 addr = mct_GetRcvrSysAddr_D(pMCTstat, pDCTstat, dct, Receiver, &valid);
3859 mct_Read1LTestPattern_D(pMCTstat, pDCTstat, addr); /* cache fills */
3861 /* Write 0000_8000h to register F2x[1,0]9C_xD080F0C */
3862 Set_NB32_index_wait(dev, 0x98 + reg_off, 0x4D080F0C, 0x00008000);
3863 mct_Wait(80); /* wait >= 300ns */
3865 /* Write 0000_0000h to register F2x[1,0]9C_xD080F0C */
3866 Set_NB32_index_wait(dev, 0x98 + reg_off, 0x4D080F0C, 0x00000000);
3867 mct_Wait(800); /* wait >= 2us */
3874 _RDMSR(addr, &lo, &hi);
3875 lo &= ~(1<<17); /* restore HWCR.wrap32dis */
3876 _WRMSR(addr, lo, hi);
3881 static void mct_EnableDatIntlv_D(struct MCTStatStruc *pMCTstat,
3882 struct DCTStatStruc *pDCTstat)
3884 u32 dev = pDCTstat->dev_dct;
3887 /* Enable F2x110[DctDatIntlv] */
3888 // Call back not required mctHookBeforeDatIntlv_D()
3889 // FIXME Skip for Ax
3890 if (!pDCTstat->GangedMode) {
3891 val = Get_NB32(dev, 0x110);
3892 val |= 1 << 5; // DctDatIntlv
3893 Set_NB32(dev, 0x110, val);
3895 // FIXME Skip for Cx
3896 dev = pDCTstat->dev_nbmisc;
3897 val = Get_NB32(dev, 0x8C); // NB Configuration Hi
3898 val |= 1 << (36-32); // DisDatMask
3899 Set_NB32(dev, 0x8C, val);
3904 static void mct_SetupSync_D(struct MCTStatStruc *pMCTstat,
3905 struct DCTStatStruc *pDCTstat)
3907 /* set F2x78[ChSetupSync] when F2x[1, 0]9C_x04[AddrCmdSetup, CsOdtSetup,
3908 * CkeSetup] setups for one DCT are all 0s and at least one of the setups,
3909 * F2x[1, 0]9C_x04[AddrCmdSetup, CsOdtSetup, CkeSetup], of the other
3913 u32 dev = pDCTstat->dev_dct;
3916 cha = pDCTstat->CH_ADDR_TMG[0] & 0x0202020;
3917 chb = pDCTstat->CH_ADDR_TMG[1] & 0x0202020;
3919 if ((cha != chb) && ((cha == 0) || (chb == 0))) {
3920 val = Get_NB32(dev, 0x78);
3922 Set_NB32(dev, 0x78, val);
3927 static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct) {
3930 u32 reg_off = 0x100 * dct;
3931 u32 dev = pDCTstat->dev_dct;
3933 if (pDCTstat->LogicalCPUID & (AMD_DR_B2 | AMD_DR_B3)) {
3934 mct_Wait(10000); /* Wait 50 us*/
3935 val = Get_NB32(dev, 0x110);
3936 if ( val & (1 << DramEnabled)) {
3937 /* If 50 us expires while DramEnable =0 then do the following */
3938 val = Get_NB32(dev, 0x90 + reg_off);
3939 val &= ~(1 << Width128); /* Program Width128 = 0 */
3940 Set_NB32(dev, 0x90 + reg_off, val);
3942 val = Get_NB32_index_wait(dev, 0x98 + reg_off, 0x05); /* Perform dummy CSR read to F2x09C_x05 */
3944 if (pDCTstat->GangedMode) {
3945 val = Get_NB32(dev, 0x90 + reg_off);
3946 val |= 1 << Width128; /* Program Width128 = 0 */
3947 Set_NB32(dev, 0x90 + reg_off, val);
3954 /* ==========================================================
3955 * 6-bit Bank Addressing Table
3958 * CCC=Columns-9 binary
3959 * ==========================================================
3960 * DCT CCCBRR Rows Banks Columns 64-bit CS Size
3962 * 0000 000000 13 2 9 128MB
3963 * 0001 001000 13 2 10 256MB
3964 * 0010 001001 14 2 10 512MB
3965 * 0011 010000 13 2 11 512MB
3966 * 0100 001100 13 3 10 512MB
3967 * 0101 001101 14 3 10 1GB
3968 * 0110 010001 14 2 11 1GB
3969 * 0111 001110 15 3 10 2GB
3970 * 1000 010101 14 3 11 2GB
3971 * 1001 010110 15 3 11 4GB
3972 * 1010 001111 16 3 10 4GB
3973 * 1011 010111 16 3 11 8GB