2 * This file is part of the coreboot project.
4 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 /* Description: Main memory controller system configuration for DDR 2 */
23 /* KNOWN ISSUES - ERRATA
25 * Trtp is not calculated correctly when the controller is in 64-bit mode, it
26 * is 1 busclock off. No fix planned. The controller is not ordinarily in
29 * 32 Byte burst not supported. No fix planned. The controller is not
30 * ordinarily in 64-bit mode.
32 * Trc precision does not use extra Jedec defined fractional component.
33 * InsteadTrc (course) is rounded up to nearest 1 ns.
35 * Mini and Micro DIMM not supported. Only RDIMM, UDIMM, SO-DIMM defined types
39 static u8 ReconfigureDIMMspare_D(struct MCTStatStruc *pMCTstat,
40 struct DCTStatStruc *pDCTstatA);
41 static void DQSTiming_D(struct MCTStatStruc *pMCTstat,
42 struct DCTStatStruc *pDCTstatA);
43 static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
44 struct DCTStatStruc *pDCTstatA);
45 static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
46 struct DCTStatStruc *pDCTstatA);
47 static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
48 struct DCTStatStruc *pDCTstatA);
49 static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
50 struct DCTStatStruc *pDCTstatA);
51 static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
52 struct DCTStatStruc *pDCTstat);
53 static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
54 struct DCTStatStruc *pDCTstat);
55 static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
56 struct DCTStatStruc *pDCTstatA);
57 static u8 NodePresent_D(u8 Node);
58 static void SyncDCTsReady_D(struct MCTStatStruc *pMCTstat,
59 struct DCTStatStruc *pDCTstatA);
60 static void StartupDCT_D(struct MCTStatStruc *pMCTstat,
61 struct DCTStatStruc *pDCTstat, u8 dct);
62 static void ClearDCT_D(struct MCTStatStruc *pMCTstat,
63 struct DCTStatStruc *pDCTstat, u8 dct);
64 static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
65 struct DCTStatStruc *pDCTstat, u8 dct);
66 static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
67 struct DCTStatStruc *pDCTstat);
68 static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
69 struct DCTStatStruc *pDCTstat, u8 dct);
70 static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
71 struct DCTStatStruc *pDCTstat, u8 dct);
72 static u8 PlatformSpec_D(struct MCTStatStruc *pMCTstat,
73 struct DCTStatStruc *pDCTstat, u8 dct);
74 static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
75 struct DCTStatStruc *pDCTstat, u8 dct);
76 static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
77 struct DCTStatStruc *pDCTstat, u8 dct);
78 static u8 Get_DefTrc_k_D(u8 k);
79 static u16 Get_40Tk_D(u8 k);
80 static u16 Get_Fk_D(u8 k);
81 static u8 Dimm_Supports_D(struct DCTStatStruc *pDCTstat, u8 i, u8 j, u8 k);
82 static u8 Sys_Capability_D(struct MCTStatStruc *pMCTstat,
83 struct DCTStatStruc *pDCTstat, int j, int k);
84 static u8 Get_DIMMAddress_D(struct DCTStatStruc *pDCTstat, u8 i);
85 static void mct_initDCT(struct MCTStatStruc *pMCTstat,
86 struct DCTStatStruc *pDCTstat);
87 static void mct_DramInit(struct MCTStatStruc *pMCTstat,
88 struct DCTStatStruc *pDCTstat, u8 dct);
89 static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
90 struct DCTStatStruc *pDCTstat, u8 dct);
91 static void mct_SyncDCTsReady(struct DCTStatStruc *pDCTstat);
92 static void Get_Trdrd(struct MCTStatStruc *pMCTstat,
93 struct DCTStatStruc *pDCTstat, u8 dct);
94 static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
95 struct DCTStatStruc *pDCTstat, u8 dct);
96 static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,\
97 struct DCTStatStruc *pDCTstat, u8 dct);
98 static void mct_AfterStitchMemory(struct MCTStatStruc *pMCTstat,
99 struct DCTStatStruc *pDCTstat, u8 dct);
100 static u8 mct_DIMMPresence(struct MCTStatStruc *pMCTstat,
101 struct DCTStatStruc *pDCTstat, u8 dct);
102 static void Set_OtherTiming(struct MCTStatStruc *pMCTstat,
103 struct DCTStatStruc *pDCTstat, u8 dct);
104 static void Get_Twrwr(struct MCTStatStruc *pMCTstat,
105 struct DCTStatStruc *pDCTstat, u8 dct);
106 static void Get_Twrrd(struct MCTStatStruc *pMCTstat,
107 struct DCTStatStruc *pDCTstat, u8 dct);
108 static void Get_TrwtTO(struct MCTStatStruc *pMCTstat,
109 struct DCTStatStruc *pDCTstat, u8 dct);
110 static void Get_TrwtWB(struct MCTStatStruc *pMCTstat,
111 struct DCTStatStruc *pDCTstat);
112 static u8 Check_DqsRcvEn_Diff(struct DCTStatStruc *pDCTstat, u8 dct,
113 u32 dev, u32 index_reg, u32 index);
114 static u8 Get_DqsRcvEnGross_Diff(struct DCTStatStruc *pDCTstat,
115 u32 dev, u32 index_reg);
116 static u8 Get_WrDatGross_Diff(struct DCTStatStruc *pDCTstat, u8 dct,
117 u32 dev, u32 index_reg);
118 static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
119 u32 dev, u32 index_reg, u32 index);
120 static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
121 struct DCTStatStruc *pDCTstat);
122 static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat, u8 dct,
123 u32 dev, u32 index_reg, u32 index);
124 static void mct_InitialMCT_D(struct MCTStatStruc *pMCTstat,
125 struct DCTStatStruc *pDCTstat);
126 static void mct_init(struct MCTStatStruc *pMCTstat,
127 struct DCTStatStruc *pDCTstat);
128 static void clear_legacy_Mode(struct MCTStatStruc *pMCTstat,
129 struct DCTStatStruc *pDCTstat);
130 static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
131 struct DCTStatStruc *pDCTstatA);
132 static void SetCSTriState(struct MCTStatStruc *pMCTstat,
133 struct DCTStatStruc *pDCTstat, u8 dct);
134 static void SetODTTriState(struct MCTStatStruc *pMCTstat,
135 struct DCTStatStruc *pDCTstat, u8 dct);
136 static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
137 struct DCTStatStruc *pDCTstat, u8 dct);
138 static u32 mct_NodePresent_D(void);
139 static void WaitRoutine_D(u32 time);
140 static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
141 struct DCTStatStruc *pDCTstatA);
142 static void mct_ResetDataStruct_D(struct MCTStatStruc *pMCTstat,
143 struct DCTStatStruc *pDCTstatA);
144 static void mct_EarlyArbEn_D(struct MCTStatStruc *pMCTstat,
145 struct DCTStatStruc *pDCTstat);
146 static void mct_BeforeDramInit_Prod_D(struct MCTStatStruc *pMCTstat,
147 struct DCTStatStruc *pDCTstat);
148 void mct_ClrClToNB_D(struct MCTStatStruc *pMCTstat,
149 struct DCTStatStruc *pDCTstat);
150 static u8 CheckNBCOFEarlyArbEn(struct MCTStatStruc *pMCTstat,
151 struct DCTStatStruc *pDCTstat);
152 void mct_ClrWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
153 struct DCTStatStruc *pDCTstat);
154 static void mct_BeforeDQSTrain_D(struct MCTStatStruc *pMCTstat,
155 struct DCTStatStruc *pDCTstatA);
156 static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct);
157 static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat,
158 struct DCTStatStruc *pDCTstat, u8 dct);
161 /*See mctAutoInitMCT header for index relationships to CL and T*/
162 static const u16 Table_F_k[] = {00,200,266,333,400,533 };
163 static const u8 Table_T_k[] = {0x00,0x50,0x3D,0x30,0x25, 0x18 };
164 static const u8 Table_CL2_j[] = {0x04,0x08,0x10,0x20,0x40, 0x80 };
165 static const u8 Tab_defTrc_k[] = {0x0,0x41,0x3C,0x3C,0x3A, 0x3A };
166 static const u16 Tab_40T_k[] = {00,200,150,120,100,75 };
167 static const u8 Tab_TrefT_k[] = {00,0,1,1,2,2,3,4,5,6,0,0};
168 static const u8 Tab_BankAddr[] = {0x0,0x08,0x09,0x10,0x0C,0x0D,0x11,0x0E,0x15,0x16,0x0F,0x17};
169 static const u8 Tab_tCL_j[] = {0,2,3,4,5};
170 static const u8 Tab_1KTfawT_k[] = {00,8,10,13,14,20};
171 static const u8 Tab_2KTfawT_k[] = {00,10,14,17,18,24};
172 static const u8 Tab_L1CLKDis[] = {8,8,6,4,2,0,8,8};
173 static const u8 Tab_M2CLKDis[] = {2,0,8,8,2,0,2,0};
174 static const u8 Tab_S1CLKDis[] = {8,0,8,8,8,0,8,0};
175 static const u8 Table_Comp_Rise_Slew_20x[] = {7, 3, 2, 2, 0xFF};
176 static const u8 Table_Comp_Rise_Slew_15x[] = {7, 7, 3, 2, 0xFF};
177 static const u8 Table_Comp_Fall_Slew_20x[] = {7, 5, 3, 2, 0xFF};
178 static const u8 Table_Comp_Fall_Slew_15x[] = {7, 7, 5, 3, 0xFF};
180 static void mctAutoInitMCT_D(struct MCTStatStruc *pMCTstat,
181 struct DCTStatStruc *pDCTstatA)
184 * Memory may be mapped contiguously all the way up to 4GB (depending
185 * on setup options). It is the responsibility of PCI subsystem to
186 * create an uncacheable IO region below 4GB and to adjust TOP_MEM
187 * downward prior to any IO mapping or accesses. It is the same
188 * responsibility of the CPU sub-system prior toaccessing LAPIC.
190 * Slot Number is an external convention, and is determined by OEM with
191 * accompanying silk screening. OEM may choose to use Slot number
192 * convention which is consistent with DIMM number conventions.
193 * All AMD engineering
196 * Run-Time Requirements:
197 * 1. Complete Hypertransport Bus Configuration
198 * 2. SMBus Controller Initialized
199 * 3. Checksummed or Valid NVRAM bits
200 * 4. MCG_CTL=-1, MC4_CTL_EN=0 for all CPUs
201 * 5. MCi_STS from shutdown/warm reset recorded (if desired) prior to
203 * 6. All var MTRRs reset to zero
204 * 7. State of NB_CFG.DisDatMsk set properly on all CPUs
205 * 8. All CPUs at 2Ghz Speed (unless DQS training is not installed).
206 * 9. All cHT links at max Speed/Width (unless DQS training is not
210 * Global relationship between index values and item values:
212 * --------------------------
224 mctInitMemGPIOs_A_D(); /* Set any required GPIOs*/
227 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
228 struct DCTStatStruc *pDCTstat;
229 pDCTstat = pDCTstatA + Node;
230 pDCTstat->Node_ID = Node;
231 pDCTstat->dev_host = PA_HOST(Node);
232 pDCTstat->dev_map = PA_MAP(Node);
233 pDCTstat->dev_dct = PA_DCT(Node);
234 pDCTstat->dev_nbmisc = PA_NBMISC(Node);
235 pDCTstat->NodeSysBase = node_sys_base;
237 print_tx("mctAutoInitMCT_D: mct_init Node ", Node);
238 mct_init(pMCTstat, pDCTstat);
239 mctNodeIDDebugPort_D();
240 pDCTstat->NodePresent = NodePresent_D(Node);
241 if (pDCTstat->NodePresent) { /* See if Node is there*/
242 print_t("mctAutoInitMCT_D: clear_legacy_Mode\n");
243 clear_legacy_Mode(pMCTstat, pDCTstat);
244 pDCTstat->LogicalCPUID = mctGetLogicalCPUID_D(Node);
246 print_t("mctAutoInitMCT_D: mct_InitialMCT_D\n");
247 mct_InitialMCT_D(pMCTstat, pDCTstat);
249 print_t("mctAutoInitMCT_D: mctSMBhub_Init\n");
250 mctSMBhub_Init(Node); /* Switch SMBUS crossbar to proper node*/
252 print_t("mctAutoInitMCT_D: mct_initDCT\n");
253 mct_initDCT(pMCTstat, pDCTstat);
254 if (pDCTstat->ErrCode == SC_FatalErr) {
255 goto fatalexit; /* any fatal errors?*/
256 } else if (pDCTstat->ErrCode < SC_StopError) {
259 } /* if Node present */
260 node_sys_base = pDCTstat->NodeSysBase;
261 node_sys_base += (pDCTstat->NodeSysLimit + 2) & ~0x0F;
263 if (NodesWmem == 0) {
264 print_debug("No Nodes?!\n");
268 print_t("mctAutoInitMCT_D: SyncDCTsReady_D\n");
269 SyncDCTsReady_D(pMCTstat, pDCTstatA); /* Make sure DCTs are ready for accesses.*/
271 print_t("mctAutoInitMCT_D: HTMemMapInit_D\n");
272 HTMemMapInit_D(pMCTstat, pDCTstatA); /* Map local memory into system address space.*/
275 print_t("mctAutoInitMCT_D: CPUMemTyping_D\n");
276 CPUMemTyping_D(pMCTstat, pDCTstatA); /* Map dram into WB/UC CPU cacheability */
277 mctHookAfterCPU(); /* Setup external northbridge(s) */
279 print_t("mctAutoInitMCT_D: DQSTiming_D\n");
280 DQSTiming_D(pMCTstat, pDCTstatA); /* Get Receiver Enable and DQS signal timing*/
282 print_t("mctAutoInitMCT_D: UMAMemTyping_D\n");
283 UMAMemTyping_D(pMCTstat, pDCTstatA); /* Fix up for UMA sizing */
285 print_t("mctAutoInitMCT_D: :OtherTiming\n");
286 mct_OtherTiming(pMCTstat, pDCTstatA);
288 if (ReconfigureDIMMspare_D(pMCTstat, pDCTstatA)) { /* RESET# if 1st pass of DIMM spare enabled*/
292 InterleaveNodes_D(pMCTstat, pDCTstatA);
293 InterleaveChannels_D(pMCTstat, pDCTstatA);
295 print_t("mctAutoInitMCT_D: ECCInit_D\n");
296 if (ECCInit_D(pMCTstat, pDCTstatA)) { /* Setup ECC control and ECC check-bits*/
297 print_t("mctAutoInitMCT_D: MCTMemClr_D\n");
298 MCTMemClr_D(pMCTstat,pDCTstatA);
301 mct_FinalMCT_D(pMCTstat, (pDCTstatA + 0) ); // Node 0
302 print_t("All Done\n");
306 die("mct_d: fatalexit");
310 static u8 ReconfigureDIMMspare_D(struct MCTStatStruc *pMCTstat,
311 struct DCTStatStruc *pDCTstatA)
315 if (mctGet_NVbits(NV_CS_SpareCTL)) {
316 if (MCT_DIMM_SPARE_NO_WARM) {
317 /* Do no warm-reset DIMM spare */
318 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
319 LoadDQSSigTmgRegs_D(pMCTstat, pDCTstatA);
322 mct_ResetDataStruct_D(pMCTstat, pDCTstatA);
323 pMCTstat->GStatus |= 1 << GSB_EnDIMMSpareNW;
327 /* Do warm-reset DIMM spare */
328 if (mctGet_NVbits(NV_DQSTrainCTL))
342 static void DQSTiming_D(struct MCTStatStruc *pMCTstat,
343 struct DCTStatStruc *pDCTstatA)
347 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
350 nv_DQSTrainCTL = mctGet_NVbits(NV_DQSTrainCTL);
351 /* FIXME: BOZO- DQS training every time*/
354 print_t("DQSTiming_D: mct_BeforeDQSTrain_D:\n");
355 mct_BeforeDQSTrain_D(pMCTstat, pDCTstatA);
356 phyAssistedMemFnceTraining(pMCTstat, pDCTstatA);
358 if (nv_DQSTrainCTL) {
359 mctHookBeforeAnyTraining(pMCTstat, pDCTstatA);
361 print_t("DQSTiming_D: TrainReceiverEn_D FirstPass:\n");
362 TrainReceiverEn_D(pMCTstat, pDCTstatA, FirstPass);
364 print_t("DQSTiming_D: mct_TrainDQSPos_D\n");
365 mct_TrainDQSPos_D(pMCTstat, pDCTstatA);
367 // Second Pass never used for Barcelona!
368 //print_t("DQSTiming_D: TrainReceiverEn_D SecondPass:\n");
369 //TrainReceiverEn_D(pMCTstat, pDCTstatA, SecondPass);
371 print_t("DQSTiming_D: mctSetEccDQSRcvrEn_D\n");
372 mctSetEccDQSRcvrEn_D(pMCTstat, pDCTstatA);
374 print_t("DQSTiming_D: TrainMaxReadLatency_D\n");
375 //FIXME - currently uses calculated value TrainMaxReadLatency_D(pMCTstat, pDCTstatA);
376 mctHookAfterAnyTraining();
377 mctSaveDQSSigTmg_D();
379 print_t("DQSTiming_D: mct_EndDQSTraining_D\n");
380 mct_EndDQSTraining_D(pMCTstat, pDCTstatA);
382 print_t("DQSTiming_D: MCTMemClr_D\n");
383 MCTMemClr_D(pMCTstat, pDCTstatA);
385 mctGetDQSSigTmg_D(); /* get values into data structure */
386 LoadDQSSigTmgRegs_D(pMCTstat, pDCTstatA); /* load values into registers.*/
387 //mctDoWarmResetMemClr_D();
388 MCTMemClr_D(pMCTstat, pDCTstatA);
393 static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
394 struct DCTStatStruc *pDCTstatA)
396 u8 Node, Receiver, Channel, Dir, DIMM;
404 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
405 struct DCTStatStruc *pDCTstat;
406 pDCTstat = pDCTstatA + Node;
408 if (pDCTstat->DCTSysLimit) {
409 dev = pDCTstat->dev_dct;
410 for (Channel = 0;Channel < 2; Channel++) {
411 /* there are four receiver pairs,
412 loosely associated with chipselects.*/
413 index_reg = 0x98 + Channel * 0x100;
414 for (Receiver = 0; Receiver < 8; Receiver += 2) {
415 /* Set Receiver Enable Values */
416 mct_SetRcvrEnDly_D(pDCTstat,
418 1, /* FinalValue, From stack */
422 (Receiver >> 1) * 3 + 0x10, /* Addl_Index */
423 2); /* Pass Second Pass ? */
427 for (Channel = 0; Channel<2; Channel++) {
428 SetEccDQSRcvrEn_D(pDCTstat, Channel);
431 for (Channel = 0; Channel < 2; Channel++) {
433 index_reg = 0x98 + Channel * 0x100;
436 * when 400, 533, 667, it will support dimm0/1/2/3,
437 * and set conf for dimm0, hw will copy to dimm1/2/3
438 * set for dimm1, hw will copy to dimm3
439 * Rev A/B only support DIMM0/1 when 800Mhz and above
440 * + 0x100 to next dimm
441 * Rev C support DIMM0/1/2/3 when 800Mhz and above
442 * + 0x100 to next dimm
444 for (DIMM = 0; DIMM < 2; DIMM++) {
446 index = 0; /* CHA Write Data Timing Low */
448 if (pDCTstat->Speed >= 4) {
449 index = 0x100 * DIMM;
454 for (Dir=0;Dir<2;Dir++) {//RD/WR
455 p = pDCTstat->CH_D_DIR_B_DQS[Channel][DIMM][Dir];
456 val = stream_to_int(p); /* CHA Read Data Timing High */
457 Set_NB32_index_wait(dev, index_reg, index+1, val);
458 val = stream_to_int(p+4); /* CHA Write Data Timing High */
459 Set_NB32_index_wait(dev, index_reg, index+2, val);
460 val = *(p+8); /* CHA Write ECC Timing */
461 Set_NB32_index_wait(dev, index_reg, index+3, val);
467 for (Channel = 0; Channel<2; Channel++) {
468 reg = 0x78 + Channel * 0x100;
469 val = Get_NB32(dev, reg);
471 val |= ((u32) pDCTstat->CH_MaxRdLat[Channel] << 22);
472 val &= ~(1<<DqsRcvEnTrain);
473 Set_NB32(dev, reg, val); /* program MaxRdLatency to correspond with current delay*/
480 static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
481 struct DCTStatStruc *pDCTstatA)
483 /* Clear MC4_STS for all Nodes in the system. This is required in some
484 * circumstances to clear left over garbage from cold reset, shutdown,
485 * or normal ECC memory conditioning.
488 //FIXME: this function depends on pDCTstat Array ( with Node id ) - Is this really a problem?
493 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
494 struct DCTStatStruc *pDCTstat;
495 pDCTstat = pDCTstatA + Node;
497 if (pDCTstat->NodePresent) {
498 dev = pDCTstat->dev_nbmisc;
499 /*MCA NB Status Low (alias to MC4_STS[31:0] */
500 Set_NB32(dev, 0x48, 0);
501 /* MCA NB Status High (alias to MC4_STS[63:32] */
502 Set_NB32(dev, 0x4C, 0);
508 static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
509 struct DCTStatStruc *pDCTstatA)
512 u32 NextBase, BottomIO;
513 u8 _MemHoleRemap, DramHoleBase, DramHoleOffset;
514 u32 HoleSize, DramSelBaseAddr;
520 struct DCTStatStruc *pDCTstat;
522 _MemHoleRemap = mctGet_NVbits(NV_MemHole);
524 if (pMCTstat->HoleBase == 0) {
525 DramHoleBase = mctGet_NVbits(NV_BottomIO);
527 DramHoleBase = pMCTstat->HoleBase >> (24-8);
530 BottomIO = DramHoleBase << (24-8);
533 pDCTstat = pDCTstatA + 0;
534 dev = pDCTstat->dev_map;
537 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
538 pDCTstat = pDCTstatA + Node;
539 devx = pDCTstat->dev_map;
541 pDCTstat = pDCTstatA + Node;
542 if (!pDCTstat->GangedMode) {
543 DramSelBaseAddr = pDCTstat->NodeSysLimit - pDCTstat->DCTSysLimit;
544 /*In unganged mode, we must add DCT0 and DCT1 to DCTSysLimit */
545 val = pDCTstat->NodeSysLimit;
546 if ((val & 0xFF) == 0xFE) {
550 pDCTstat->DCTSysLimit = val;
553 base = pDCTstat->DCTSysBase;
554 limit = pDCTstat->DCTSysLimit;
558 DramSelBaseAddr += NextBase;
559 printk(BIOS_DEBUG, " Node: %02x base: %02x limit: %02x BottomIO: %02x\n", Node, base, limit, BottomIO);
562 if ((base < BottomIO) && (limit >= BottomIO)) {
564 pDCTstat->Status |= 1 << SB_HWHole;
565 pMCTstat->GStatus |= 1 << GSB_HWHole;
566 pDCTstat->DCTSysBase = base;
567 pDCTstat->DCTSysLimit = limit;
568 pDCTstat->DCTHoleBase = BottomIO;
569 pMCTstat->HoleBase = BottomIO;
570 HoleSize = _4GB_RJ8 - BottomIO; /* HoleSize[39:8] */
571 if ((DramSelBaseAddr > 0) && (DramSelBaseAddr < BottomIO))
572 base = DramSelBaseAddr;
573 val = ((base + HoleSize) >> (24-8)) & 0xFF;
574 DramHoleOffset = val;
575 val <<= 8; /* shl 16, rol 24 */
576 val |= DramHoleBase << 24;
577 val |= 1 << DramHoleValid;
578 Set_NB32(devx, 0xF0, val); /* Dram Hole Address Reg */
579 pDCTstat->DCTSysLimit += HoleSize;
580 base = pDCTstat->DCTSysBase;
581 limit = pDCTstat->DCTSysLimit;
582 } else if (base == BottomIO) {
584 pMCTstat->GStatus |= 1<<GSB_SpIntRemapHole;
585 pDCTstat->Status |= 1<<SB_SWNodeHole;
586 pMCTstat->GStatus |= 1<<GSB_SoftHole;
587 pMCTstat->HoleBase = base;
591 pDCTstat->DCTSysBase = base;
592 pDCTstat->DCTSysLimit = limit;
594 /* No Remapping. Normal Contiguous mapping */
595 pDCTstat->DCTSysBase = base;
596 pDCTstat->DCTSysLimit = limit;
599 /*No Remapping. Normal Contiguous mapping*/
600 pDCTstat->DCTSysBase = base;
601 pDCTstat->DCTSysLimit = limit;
603 base |= 3; /* set WE,RE fields*/
604 pMCTstat->SysLimit = limit;
606 Set_NB32(dev, 0x40 + (Node << 3), base); /* [Node] + Dram Base 0 */
608 /* if Node limit > 1GB then set it to 1GB boundary for each node */
609 if ((mctSetNodeBoundary_D()) && (limit > 0x00400000)) {
614 val = limit & 0xFFFF0000;
616 Set_NB32(dev, 0x44 + (Node << 3), val); /* set DstNode */
618 limit = pDCTstat->DCTSysLimit;
620 NextBase = (limit & 0xFFFF0000) + 0x10000;
621 if ((mctSetNodeBoundary_D()) && (NextBase > 0x00400000)) {
623 NextBase &= 0xFFC00000;
629 /* Copy dram map from Node 0 to Node 1-7 */
630 for (Node = 1; Node < MAX_NODES_SUPPORTED; Node++) {
632 pDCTstat = pDCTstatA + Node;
633 devx = pDCTstat->dev_map;
635 if (pDCTstat->NodePresent) {
636 printk(BIOS_DEBUG, " Copy dram map from Node 0 to Node %02x \n", Node);
637 reg = 0x40; /*Dram Base 0*/
639 val = Get_NB32(dev, reg);
640 Set_NB32(devx, reg, val);
642 } while ( reg < 0x80);
644 break; /* stop at first absent Node */
648 /*Copy dram map to F1x120/124*/
649 mct_HTMemMapExt(pMCTstat, pDCTstatA);
653 static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
654 struct DCTStatStruc *pDCTstatA)
657 /* Initiates a memory clear operation for all node. The mem clr
658 * is done in paralel. After the memclr is complete, all processors
659 * status are checked to ensure that memclr has completed.
662 struct DCTStatStruc *pDCTstat;
664 if (!mctGet_NVbits(NV_DQSTrainCTL)){
665 // FIXME: callback to wrapper: mctDoWarmResetMemClr_D
666 } else { // NV_DQSTrainCTL == 1
667 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
668 pDCTstat = pDCTstatA + Node;
670 if (pDCTstat->NodePresent) {
671 DCTMemClr_Init_D(pMCTstat, pDCTstat);
674 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
675 pDCTstat = pDCTstatA + Node;
677 if (pDCTstat->NodePresent) {
678 DCTMemClr_Sync_D(pMCTstat, pDCTstat);
685 static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
686 struct DCTStatStruc *pDCTstat)
692 /* Initiates a memory clear operation on one node */
693 if (pDCTstat->DCTSysLimit) {
694 dev = pDCTstat->dev_dct;
698 val = Get_NB32(dev, reg);
699 } while (val & (1 << MemClrBusy));
701 val |= (1 << MemClrInit);
702 Set_NB32(dev, reg, val);
708 static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
709 struct DCTStatStruc *pDCTstatA)
711 /* Ensures that memory clear has completed on all node.*/
713 struct DCTStatStruc *pDCTstat;
715 if (!mctGet_NVbits(NV_DQSTrainCTL)){
716 // callback to wrapper: mctDoWarmResetMemClr_D
717 } else { // NV_DQSTrainCTL == 1
718 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
719 pDCTstat = pDCTstatA + Node;
721 if (pDCTstat->NodePresent) {
722 DCTMemClr_Sync_D(pMCTstat, pDCTstat);
729 static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
730 struct DCTStatStruc *pDCTstat)
733 u32 dev = pDCTstat->dev_dct;
736 /* Ensure that a memory clear operation has completed on one node */
737 if (pDCTstat->DCTSysLimit){
741 val = Get_NB32(dev, reg);
742 } while (val & (1 << MemClrBusy));
745 val = Get_NB32(dev, reg);
746 } while (!(val & (1 << Dr_MemClrStatus)));
749 val = 0x0FE40FC0; // BKDG recommended
750 val |= MCCH_FlushWrOnStpGnt; // Set for S3
751 Set_NB32(dev, 0x11C, val);
755 static u8 NodePresent_D(u8 Node)
758 * Determine if a single Hammer Node exists within the network.
766 dev = PA_HOST(Node); /*test device/vendor id at host bridge */
767 val = Get_NB32(dev, 0);
768 dword = mct_NodePresent_D(); /* FIXME: BOZO -11001022h rev for F */
769 if (val == dword) { /* AMD Hammer Family CPU HT Configuration */
770 if (oemNodePresent_D(Node, &ret))
772 /* Node ID register */
773 val = Get_NB32(dev, 0x60);
776 if (val == dword) /* current nodeID = requested nodeID ? */
786 static void DCTInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 dct)
789 * Initialize DRAM on single Athlon 64/Opteron Node.
795 ClearDCT_D(pMCTstat, pDCTstat, dct);
796 stopDCTflag = 1; /*preload flag with 'disable' */
797 if (mct_DIMMPresence(pMCTstat, pDCTstat, dct) < SC_StopError) {
798 print_t("\t\tDCTInit_D: mct_DIMMPresence Done\n");
799 if (mct_SPDCalcWidth(pMCTstat, pDCTstat, dct) < SC_StopError) {
800 print_t("\t\tDCTInit_D: mct_SPDCalcWidth Done\n");
801 if (AutoCycTiming_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
802 print_t("\t\tDCTInit_D: AutoCycTiming_D Done\n");
803 if (AutoConfig_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
804 print_t("\t\tDCTInit_D: AutoConfig_D Done\n");
805 if (PlatformSpec_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
806 print_t("\t\tDCTInit_D: PlatformSpec_D Done\n");
808 if (!(pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW))) {
809 print_t("\t\tDCTInit_D: StartupDCT_D\n");
810 StartupDCT_D(pMCTstat, pDCTstat, dct); /*yeaahhh! */
818 u32 reg_off = dct * 0x100;
819 val = 1<<DisDramInterface;
820 Set_NB32(pDCTstat->dev_dct, reg_off+0x94, val);
821 /*To maximize power savings when DisDramInterface=1b,
822 all of the MemClkDis bits should also be set.*/
824 Set_NB32(pDCTstat->dev_dct, reg_off+0x88, val);
829 static void SyncDCTsReady_D(struct MCTStatStruc *pMCTstat,
830 struct DCTStatStruc *pDCTstatA)
832 /* Wait (and block further access to dram) for all DCTs to be ready,
833 * by polling all InitDram bits and waiting for possible memory clear
834 * operations to be complete. Read MemClkFreqVal bit to see if
835 * the DIMMs are present in this node.
840 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
841 struct DCTStatStruc *pDCTstat;
842 pDCTstat = pDCTstatA + Node;
843 mct_SyncDCTsReady(pDCTstat);
848 static void StartupDCT_D(struct MCTStatStruc *pMCTstat,
849 struct DCTStatStruc *pDCTstat, u8 dct)
851 /* Read MemClkFreqVal bit to see if the DIMMs are present in this node.
852 * If the DIMMs are present then set the DRAM Enable bit for this node.
854 * Setting dram init starts up the DCT state machine, initializes the
855 * dram devices with MRS commands, and kicks off any
856 * HW memory clear process that the chip is capable of. The sooner
857 * that dram init is set for all nodes, the faster the memory system
858 * initialization can complete. Thus, the init loop is unrolled into
859 * two loops so as to start the processeses for non BSP nodes sooner.
860 * This procedure will not wait for the process to finish.
861 * Synchronization is handled elsewhere.
868 u32 reg_off = dct * 0x100;
870 dev = pDCTstat->dev_dct;
871 val = Get_NB32(dev, 0x94 + reg_off);
872 if (val & (1<<MemClkFreqVal)) {
873 print_t("\t\t\tStartupDCT_D: MemClkFreqVal\n");
874 byte = mctGet_NVbits(NV_DQSTrainCTL);
876 /* Enable DQSRcvEn training mode */
877 print_t("\t\t\tStartupDCT_D: DqsRcvEnTrain set \n");
878 reg = 0x78 + reg_off;
879 val = Get_NB32(dev, reg);
880 /* Setting this bit forces a 1T window with hard left
881 * pass/fail edge and a probabalistic right pass/fail
882 * edge. LEFT edge is referenced for final
883 * receiver enable position.*/
884 val |= 1 << DqsRcvEnTrain;
885 Set_NB32(dev, reg, val);
887 mctHookBeforeDramInit(); /* generalized Hook */
888 print_t("\t\t\tStartupDCT_D: DramInit \n");
889 mct_DramInit(pMCTstat, pDCTstat, dct);
890 AfterDramInit_D(pDCTstat, dct);
891 mctHookAfterDramInit(); /* generalized Hook*/
896 static void ClearDCT_D(struct MCTStatStruc *pMCTstat,
897 struct DCTStatStruc *pDCTstat, u8 dct)
900 u32 dev = pDCTstat->dev_dct;
901 u32 reg = 0x40 + 0x100 * dct;
904 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
905 reg_end = 0x78 + 0x100 * dct;
907 reg_end = 0xA4 + 0x100 * dct;
910 while(reg < reg_end) {
911 Set_NB32(dev, reg, val);
916 dev = pDCTstat->dev_map;
918 Set_NB32(dev, reg, val);
922 static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
923 struct DCTStatStruc *pDCTstat, u8 dct)
925 /* Initialize DCT Timing registers as per DIMM SPD.
926 * For primary timing (T, CL) use best case T value.
927 * For secondary timing params., use most aggressive settings
930 * There are three components to determining "maximum frequency":
931 * SPD component, Bus load component, and "Preset" max frequency
934 * The SPD component is a function of the min cycle time specified
935 * by each DIMM, and the interaction of cycle times from all DIMMs
936 * in conjunction with CAS latency. The SPD component only applies
937 * when user timing mode is 'Auto'.
939 * The Bus load component is a limiting factor determined by electrical
940 * characteristics on the bus as a result of varying number of device
941 * loads. The Bus load component is specific to each platform but may
942 * also be a function of other factors. The bus load component only
943 * applies when user timing mode is 'Auto'.
945 * The Preset component is subdivided into three items and is the
946 * minimum of the set: Silicon revision, user limit setting when user
947 * timing mode is 'Auto' and memclock mode is 'Limit', OEM build
948 * specification of the maximum frequency. The Preset component is only
949 * applies when user timing mode is 'Auto'.
954 u8 Trp, Trrd, Trcd, Tras, Trc, Trfc[4], Rows;
955 u32 DramTimingLo, DramTimingHi;
968 /* Get primary timing (CAS Latency and Cycle Time) */
969 if (pDCTstat->Speed == 0) {
970 mctGet_MaxLoadFreq(pDCTstat);
972 /* and Factor in presets (setup options, Si cap, etc.) */
973 GetPresetmaxF_D(pMCTstat, pDCTstat);
975 /* Go get best T and CL as specified by DIMM mfgs. and OEM */
976 SPDGetTCL_D(pMCTstat, pDCTstat, dct);
977 /* skip callback mctForce800to1067_D */
978 pDCTstat->Speed = pDCTstat->DIMMAutoSpeed;
979 pDCTstat->CASL = pDCTstat->DIMMCASL;
981 /* if "manual" memclock mode */
982 if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 2)
983 pDCTstat->Speed = mctGet_NVbits(NV_MemCkVal) + 1;
986 mct_AfterGetCLT(pMCTstat, pDCTstat, dct);
988 /* Gather all DIMM mini-max values for cycle timing data */
998 for (i=0; i < 4; i++)
1001 for ( i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
1003 if (pDCTstat->DIMMValid & (1 << i)) {
1004 smbaddr = Get_DIMMAddress_D(pDCTstat, dct + i);
1005 byte = mctRead_SPD(smbaddr, SPD_ROWSZ);
1007 Rows = byte; /* keep track of largest row sz */
1009 byte = mctRead_SPD(smbaddr, SPD_TRP);
1013 byte = mctRead_SPD(smbaddr, SPD_TRRD);
1017 byte = mctRead_SPD(smbaddr, SPD_TRCD);
1021 byte = mctRead_SPD(smbaddr, SPD_TRTP);
1025 byte = mctRead_SPD(smbaddr, SPD_TWR);
1029 byte = mctRead_SPD(smbaddr, SPD_TWTR);
1033 val = mctRead_SPD(smbaddr, SPD_TRC);
1034 if ((val == 0) || (val == 0xFF)) {
1035 pDCTstat->ErrStatus |= 1<<SB_NoTrcTrfc;
1036 pDCTstat->ErrCode = SC_VarianceErr;
1037 val = Get_DefTrc_k_D(pDCTstat->Speed);
1039 byte = mctRead_SPD(smbaddr, SPD_TRCRFC);
1041 val++; /* round up in case fractional extention is non-zero.*/
1047 /* dev density=rank size/#devs per rank */
1048 byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
1050 val = ((byte >> 5) | (byte << 3)) & 0xFF;
1053 byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE; /* dev density=2^(rows+columns+banks) */
1056 } else if (byte == 8) {
1058 } else if (byte == 16) {
1064 if (Trfc[LDIMM] < byte)
1067 byte = mctRead_SPD(smbaddr, SPD_TRAS);
1070 } /* Dimm Present */
1073 /* Convert DRAM CycleTiming values and store into DCT structure */
1075 byte = pDCTstat->Speed;
1078 Tk40 = Get_40Tk_D(byte);
1082 1. All secondary time values given in SPDs are in binary with units of ns.
1083 2. Some time values are scaled by four, in order to have least count of 0.25 ns
1084 (more accuracy). JEDEC SPD spec. shows which ones are x1 and x4.
1085 3. Internally to this SW, cycle time, Tk, is scaled by 10 to affect a
1086 least count of 0.1 ns (more accuracy).
1087 4. SPD values not scaled are multiplied by 10 and then divided by 10T to find
1088 equivalent minimum number of bus clocks (a remainder causes round-up of clocks).
1089 5. SPD values that are prescaled by 4 are multiplied by 10 and then divided by 40T to find
1090 equivalent minimum number of bus clocks (a remainder causes round-up of clocks).*/
1094 pDCTstat->DIMMTras = (u16)dword;
1096 if (dword % Tk40) { /* round up number of busclocks */
1100 if (val < Min_TrasT_1066)
1101 val = Min_TrasT_1066;
1102 else if (val > Max_TrasT_1066)
1103 val = Max_TrasT_1066;
1105 if (val < Min_TrasT)
1107 else if (val > Max_TrasT)
1110 pDCTstat->Tras = val;
1114 pDCTstat->DIMMTrp = dword;
1116 if (dword % Tk40) { /* round up number of busclocks */
1120 if (val < Min_TrasT_1066)
1121 val = Min_TrpT_1066;
1122 else if (val > Max_TrpT_1066)
1123 val = Max_TrpT_1066;
1127 else if (val > Max_TrpT)
1130 pDCTstat->Trp = val;
1134 pDCTstat->DIMMTrrd = dword;
1136 if (dword % Tk40) { /* round up number of busclocks */
1140 if (val < Min_TrrdT_1066)
1141 val = Min_TrrdT_1066;
1142 else if (val > Max_TrrdT_1066)
1143 val = Max_TrrdT_1066;
1145 if (val < Min_TrrdT)
1147 else if (val > Max_TrrdT)
1150 pDCTstat->Trrd = val;
1154 pDCTstat->DIMMTrcd = dword;
1156 if (dword % Tk40) { /* round up number of busclocks */
1160 if (val < Min_TrcdT_1066)
1161 val = Min_TrcdT_1066;
1162 else if (val > Max_TrcdT_1066)
1163 val = Max_TrcdT_1066;
1165 if (val < Min_TrcdT)
1167 else if (val > Max_TrcdT)
1170 pDCTstat->Trcd = val;
1174 pDCTstat->DIMMTrc = dword;
1176 if (dword % Tk40) { /* round up number of busclocks */
1180 if (val < Min_TrcT_1066)
1181 val = Min_TrcT_1066;
1182 else if (val > Max_TrcT_1066)
1183 val = Max_TrcT_1066;
1187 else if (val > Max_TrcT)
1190 pDCTstat->Trc = val;
1194 pDCTstat->DIMMTrtp = dword;
1195 val = pDCTstat->Speed;
1196 if (val <= 2) { /* 7.75ns / Speed in ns to get clock # */
1197 val = 2; /* for DDR400/DDR533 */
1198 } else { /* Note a speed of 3 will be a Trtp of 3 */
1199 val = 3; /* for DDR667/DDR800/DDR1066 */
1201 pDCTstat->Trtp = val;
1205 pDCTstat->DIMMTwr = dword;
1207 if (dword % Tk40) { /* round up number of busclocks */
1211 if (val < Min_TwrT_1066)
1212 val = Min_TwrT_1066;
1213 else if (val > Max_TwrT_1066)
1214 val = Max_TwrT_1066;
1218 else if (val > Max_TwrT)
1221 pDCTstat->Twr = val;
1225 pDCTstat->DIMMTwtr = dword;
1227 if (dword % Tk40) { /* round up number of busclocks */
1231 if (val < Min_TwrT_1066)
1232 val = Min_TwtrT_1066;
1233 else if (val > Max_TwtrT_1066)
1234 val = Max_TwtrT_1066;
1236 if (val < Min_TwtrT)
1238 else if (val > Max_TwtrT)
1241 pDCTstat->Twtr = val;
1246 pDCTstat->Trfc[i] = Trfc[i];
1248 mctAdjustAutoCycTmg_D();
1250 /* Program DRAM Timing values */
1251 DramTimingLo = 0; /* Dram Timing Low init */
1252 val = pDCTstat->CASL;
1253 val = Tab_tCL_j[val];
1254 DramTimingLo |= val;
1256 val = pDCTstat->Trcd;
1258 val -= Bias_TrcdT_1066;
1262 DramTimingLo |= val<<4;
1264 val = pDCTstat->Trp;
1266 val -= Bias_TrpT_1066;
1271 DramTimingLo |= val<<7;
1273 val = pDCTstat->Trtp;
1275 DramTimingLo |= val<<11;
1277 val = pDCTstat->Tras;
1279 val -= Bias_TrasT_1066;
1282 DramTimingLo |= val<<12;
1284 val = pDCTstat->Trc;
1286 DramTimingLo |= val<<16;
1289 val = pDCTstat->Twr;
1291 DramTimingLo |= val<<20;
1294 val = pDCTstat->Trrd;
1296 val -= Bias_TrrdT_1066;
1299 DramTimingLo |= val<<22;
1302 DramTimingHi = 0; /* Dram Timing Low init */
1303 val = pDCTstat->Twtr;
1305 val -= Bias_TwtrT_1066;
1308 DramTimingHi |= val<<8;
1311 DramTimingHi |= val<<16;
1318 DramTimingHi |= val << 20;
1321 dev = pDCTstat->dev_dct;
1322 reg_off = 0x100 * dct;
1323 print_tx("AutoCycTiming: DramTimingLo ", DramTimingLo);
1324 print_tx("AutoCycTiming: DramTimingHi ", DramTimingHi);
1326 Set_NB32(dev, 0x88 + reg_off, DramTimingLo); /*DCT Timing Low*/
1327 DramTimingHi |=0x0000FC77;
1328 Set_NB32(dev, 0x8c + reg_off, DramTimingHi); /*DCT Timing Hi*/
1332 dword = pDCTstat->Twr;
1333 dword -= Bias_TwrT_1066;
1335 reg = 0x84 + reg_off;
1336 val = Get_NB32(dev, reg);
1339 Set_NB32(dev, reg, val);
1341 // dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
1343 print_tx("AutoCycTiming: Status ", pDCTstat->Status);
1344 print_tx("AutoCycTiming: ErrStatus ", pDCTstat->ErrStatus);
1345 print_tx("AutoCycTiming: ErrCode ", pDCTstat->ErrCode);
1346 print_t("AutoCycTiming: Done\n");
1348 mctHookAfterAutoCycTmg();
1350 return pDCTstat->ErrCode;
1354 static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
1355 struct DCTStatStruc *pDCTstat)
1357 /* Get max frequency from OEM platform definition, from any user
1358 * override (limiting) of max frequency, and from any Si Revision
1359 * Specific information. Return the least of these three in
1360 * DCTStatStruc.PresetmaxFreq.
1366 /* Get CPU Si Revision defined limit (NPT) */
1367 proposedFreq = 533; /* Rev F0 programmable max memclock is */
1369 /*Get User defined limit if "limit" mode */
1370 if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 1) {
1371 word = Get_Fk_D(mctGet_NVbits(NV_MemCkVal) + 1);
1372 if (word < proposedFreq)
1373 proposedFreq = word;
1375 /* Get Platform defined limit */
1376 word = mctGet_NVbits(NV_MAX_MEMCLK);
1377 if (word < proposedFreq)
1378 proposedFreq = word;
1380 word = pDCTstat->PresetmaxFreq;
1381 if (word > proposedFreq)
1382 word = proposedFreq;
1384 pDCTstat->PresetmaxFreq = word;
1390 static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
1391 struct DCTStatStruc *pDCTstat, u8 dct)
1393 /* Find the best T and CL primary timing parameter pair, per Mfg.,
1394 * for the given set of DIMMs, and store into DCTStatStruc
1395 * (.DIMMAutoSpeed and .DIMMCASL). See "Global relationship between
1396 * index values and item values" for definition of CAS latency
1397 * index (j) and Frequency index (k).
1402 /* i={0..7} (std. physical DIMM number)
1403 * j is an integer which enumerates increasing CAS latency.
1404 * k is an integer which enumerates decreasing cycle time.
1405 * CL no. {0,1,2} corresponds to CL X, CL X-.5, or CL X-1 (per individual DIMM)
1406 * Max timing values are per parameter, of all DIMMs, spec'd in ns like the SPD.
1411 for (k=K_MAX; k >= K_MIN; k--) {
1412 for (j = J_MIN; j <= J_MAX; j++) {
1413 if (Sys_Capability_D(pMCTstat, pDCTstat, j, k) ) {
1414 /* 1. check to see if DIMMi is populated.
1415 2. check if DIMMi supports CLj and Tjk */
1416 for (i = 0; i < MAX_DIMMS_SUPPORTED; i++) {
1417 if (pDCTstat->DIMMValid & (1 << i)) {
1418 if (Dimm_Supports_D(pDCTstat, i, j, k))
1422 if (i == MAX_DIMMS_SUPPORTED) {
1432 if (T1min != 0xFF) {
1433 pDCTstat->DIMMCASL = CL1min; /*mfg. optimized */
1434 pDCTstat->DIMMAutoSpeed = T1min;
1435 print_tx("SPDGetTCL_D: DIMMCASL ", pDCTstat->DIMMCASL);
1436 print_tx("SPDGetTCL_D: DIMMAutoSpeed ", pDCTstat->DIMMAutoSpeed);
1439 pDCTstat->DIMMCASL = CL_DEF; /* failsafe values (running in min. mode) */
1440 pDCTstat->DIMMAutoSpeed = T_DEF;
1441 pDCTstat->ErrStatus |= 1 << SB_DimmMismatchT;
1442 pDCTstat->ErrStatus |= 1 << SB_MinimumMode;
1443 pDCTstat->ErrCode = SC_VarianceErr;
1445 print_tx("SPDGetTCL_D: Status ", pDCTstat->Status);
1446 print_tx("SPDGetTCL_D: ErrStatus ", pDCTstat->ErrStatus);
1447 print_tx("SPDGetTCL_D: ErrCode ", pDCTstat->ErrCode);
1448 print_t("SPDGetTCL_D: Done\n");
1452 static u8 PlatformSpec_D(struct MCTStatStruc *pMCTstat,
1453 struct DCTStatStruc *pDCTstat, u8 dct)
1459 mctGet_PS_Cfg_D(pMCTstat, pDCTstat, dct);
1461 if (pDCTstat->GangedMode) {
1462 mctGet_PS_Cfg_D(pMCTstat, pDCTstat, 1);
1465 if ( pDCTstat->_2Tmode == 2) {
1466 dev = pDCTstat->dev_dct;
1467 reg = 0x94 + 0x100 * dct; /* Dram Configuration Hi */
1468 val = Get_NB32(dev, reg);
1469 val |= 1 << 20; /* 2T CMD mode */
1470 Set_NB32(dev, reg, val);
1473 mct_PlatformSpec(pMCTstat, pDCTstat, dct);
1474 InitPhyCompensation(pMCTstat, pDCTstat, dct);
1475 mctHookAfterPSCfg();
1476 return pDCTstat->ErrCode;
1480 static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
1481 struct DCTStatStruc *pDCTstat, u8 dct)
1483 u32 DramControl, DramTimingLo, Status;
1484 u32 DramConfigLo, DramConfigHi, DramConfigMisc, DramConfigMisc2;
1492 print_tx("AutoConfig_D: DCT: ", dct);
1497 DramConfigMisc2 = 0;
1499 /* set bank addessing and Masks, plus CS pops */
1500 SPDSetBanks_D(pMCTstat, pDCTstat, dct);
1501 if (pDCTstat->ErrCode == SC_StopError)
1502 goto AutoConfig_exit;
1504 /* map chip-selects into local address space */
1505 StitchMemory_D(pMCTstat, pDCTstat, dct);
1506 InterleaveBanks_D(pMCTstat, pDCTstat, dct);
1508 /* temp image of status (for convenience). RO usage! */
1509 Status = pDCTstat->Status;
1511 dev = pDCTstat->dev_dct;
1512 reg_off = 0x100 * dct;
1515 /* Build Dram Control Register Value */
1516 DramConfigMisc2 = Get_NB32 (dev, 0xA8 + reg_off); /* Dram Control*/
1517 DramControl = Get_NB32 (dev, 0x78 + reg_off); /* Dram Control*/
1519 if (mctGet_NVbits(NV_CLKHZAltVidC3))
1520 DramControl |= 1<<16;
1522 // FIXME: Add support(skip) for Ax and Cx versions
1523 DramControl |= 5; /* RdPtrInit */
1526 /* Build Dram Config Lo Register Value */
1527 DramConfigLo |= 1 << 4; /* 75 Ohms ODT */
1528 if (mctGet_NVbits(NV_MAX_DIMMS) == 8) {
1529 if (pDCTstat->Speed == 3) {
1530 if ((pDCTstat->MAdimms[dct] == 4))
1531 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1532 } else if (pDCTstat->Speed == 4){
1533 if ((pDCTstat->MAdimms[dct] != 1))
1534 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1537 // FIXME: Skip for Ax versions
1538 if ((pDCTstat->MAdimms[dct] == 4)) {
1539 if ( pDCTstat->DimmQRPresent != 0) {
1540 if ((pDCTstat->Speed == 3) || (pDCTstat->Speed == 4)) {
1541 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1543 } else if ((pDCTstat->MAdimms[dct] == 4)) {
1544 if (pDCTstat->Speed == 4) {
1545 if ( pDCTstat->DimmQRPresent != 0) {
1546 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1550 } else if ((pDCTstat->MAdimms[dct] == 2)) {
1551 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1556 // FIXME: Skip for Ax versions
1557 /* callback not required - if (!mctParityControl_D()) */
1558 if (Status & (1 << SB_PARDIMMs)) {
1559 DramConfigLo |= 1 << ParEn;
1560 DramConfigMisc2 |= 1 << ActiveCmdAtRst;
1562 DramConfigLo &= ~(1 << ParEn);
1563 DramConfigMisc2 &= ~(1 << ActiveCmdAtRst);
1566 if (mctGet_NVbits(NV_BurstLen32)) {
1567 if (!pDCTstat->GangedMode)
1568 DramConfigLo |= 1 << BurstLength32;
1571 if (Status & (1 << SB_128bitmode))
1572 DramConfigLo |= 1 << Width128; /* 128-bit mode (normal) */
1577 if (pDCTstat->Dimmx4Present & (1 << word))
1578 DramConfigLo |= 1 << dword; /* X4Dimm[3:0] */
1584 if (!(Status & (1 << SB_Registered)))
1585 DramConfigLo |= 1 << UnBuffDimm; /* Unbufferd DIMMs */
1587 if (mctGet_NVbits(NV_ECC_CAP))
1588 if (Status & (1 << SB_ECCDIMMs))
1589 if ( mctGet_NVbits(NV_ECC))
1590 DramConfigLo |= 1 << DimmEcEn;
1594 /* Build Dram Config Hi Register Value */
1595 dword = pDCTstat->Speed;
1596 DramConfigHi |= dword - 1; /* get MemClk encoding */
1597 DramConfigHi |= 1 << MemClkFreqVal;
1599 if (Status & (1 << SB_Registered))
1600 if ((pDCTstat->Dimmx4Present != 0) && (pDCTstat->Dimmx8Present != 0))
1601 /* set only if x8 Registered DIMMs in System*/
1602 DramConfigHi |= 1 << RDqsEn;
1604 if (mctGet_NVbits(NV_CKE_PDEN)) {
1605 DramConfigHi |= 1 << 15; /* PowerDownEn */
1606 if (mctGet_NVbits(NV_CKE_CTL))
1607 /*Chip Select control of CKE*/
1608 DramConfigHi |= 1 << 16;
1611 /* Control Bank Swizzle */
1612 if (0) /* call back not needed mctBankSwizzleControl_D()) */
1613 DramConfigHi &= ~(1 << BankSwizzleMode);
1615 DramConfigHi |= 1 << BankSwizzleMode; /* recommended setting (default) */
1617 /* Check for Quadrank DIMM presence */
1618 if ( pDCTstat->DimmQRPresent != 0) {
1619 byte = mctGet_NVbits(NV_4RANKType);
1621 DramConfigHi |= 1 << 17; /* S4 (4-Rank SO-DIMMs) */
1623 DramConfigHi |= 1 << 18; /* R4 (4-Rank Registered DIMMs) */
1626 if (0) /* call back not needed mctOverrideDcqBypMax_D ) */
1627 val = mctGet_NVbits(NV_BYPMAX);
1629 val = 0x0f; // recommended setting (default)
1630 DramConfigHi |= val << 24;
1632 val = pDCTstat->DIMM2Kpage;
1633 if (pDCTstat->GangedMode != 0) {
1641 val = Tab_2KTfawT_k[pDCTstat->Speed];
1643 val = Tab_1KTfawT_k[pDCTstat->Speed];
1645 if (pDCTstat->Speed == 5)
1650 DramConfigHi |= val; /* Tfaw for 1K or 2K paged drams */
1652 // FIXME: Skip for Ax versions
1653 DramConfigHi |= 1 << DcqArbBypassEn;
1656 /* Build MemClkDis Value from Dram Timing Lo and
1657 Dram Config Misc Registers
1658 1. We will assume that MemClkDis field has been preset prior to this
1660 2. We will only set MemClkDis bits if a DIMM is NOT present AND if:
1661 NV_AllMemClks <>0 AND SB_DiagClks ==0 */
1664 /* Dram Timing Low (owns Clock Enable bits) */
1665 DramTimingLo = Get_NB32(dev, 0x88 + reg_off);
1666 if (mctGet_NVbits(NV_AllMemClks) == 0) {
1667 /* Special Jedec SPD diagnostic bit - "enable all clocks" */
1668 if (!(pDCTstat->Status & (1<<SB_DiagClks))) {
1670 byte = mctGet_NVbits(NV_PACK_TYPE);
1673 else if (byte == PT_M2)
1679 while(dword < MAX_DIMMS_SUPPORTED) {
1681 print_tx("DramTimingLo: val=", val);
1682 if (!(pDCTstat->DIMMValid & (1<<val)))
1684 DramTimingLo |= 1<<(dword+24);
1690 print_tx("AutoConfig_D: DramControl: ", DramControl);
1691 print_tx("AutoConfig_D: DramTimingLo: ", DramTimingLo);
1692 print_tx("AutoConfig_D: DramConfigMisc: ", DramConfigMisc);
1693 print_tx("AutoConfig_D: DramConfigMisc2: ", DramConfigMisc2);
1694 print_tx("AutoConfig_D: DramConfigLo: ", DramConfigLo);
1695 print_tx("AutoConfig_D: DramConfigHi: ", DramConfigHi);
1697 /* Write Values to the registers */
1698 Set_NB32(dev, 0x78 + reg_off, DramControl);
1699 Set_NB32(dev, 0x88 + reg_off, DramTimingLo);
1700 Set_NB32(dev, 0xA0 + reg_off, DramConfigMisc);
1701 Set_NB32(dev, 0xA8 + reg_off, DramConfigMisc2);
1702 Set_NB32(dev, 0x90 + reg_off, DramConfigLo);
1703 mct_SetDramConfigHi_D(pDCTstat, dct, DramConfigHi);
1704 mct_ForceAutoPrecharge_D(pDCTstat, dct);
1705 mct_EarlyArbEn_D(pMCTstat, pDCTstat);
1706 mctHookAfterAutoCfg();
1708 // dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
1710 print_tx("AutoConfig: Status ", pDCTstat->Status);
1711 print_tx("AutoConfig: ErrStatus ", pDCTstat->ErrStatus);
1712 print_tx("AutoConfig: ErrCode ", pDCTstat->ErrCode);
1713 print_t("AutoConfig: Done\n");
1715 return pDCTstat->ErrCode;
1719 static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
1720 struct DCTStatStruc *pDCTstat, u8 dct)
1722 /* Set bank addressing, program Mask values and build a chip-select
1723 * population map. This routine programs PCI 0:24N:2x80 config register
1724 * and PCI 0:24N:2x60,64,68,6C config registers (CS Mask 0-3).
1727 u8 ChipSel, Rows, Cols, Ranks ,Banks, DevWidth;
1728 u32 BankAddrReg, csMask;
1739 dev = pDCTstat->dev_dct;
1740 reg_off = 0x100 * dct;
1743 for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel+=2) {
1745 if ((pDCTstat->Status & (1 << SB_64MuxedMode)) && ChipSel >=4)
1748 if (pDCTstat->DIMMValid & (1<<byte)) {
1749 smbaddr = Get_DIMMAddress_D(pDCTstat, (ChipSel + dct));
1751 byte = mctRead_SPD(smbaddr, SPD_ROWSZ);
1754 byte = mctRead_SPD(smbaddr, SPD_COLSZ);
1757 Banks = mctRead_SPD(smbaddr, SPD_LBANKS);
1759 byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH);
1760 DevWidth = byte & 0x7f; /* bits 0-6 = bank 0 width */
1762 byte = mctRead_SPD(smbaddr, SPD_DMBANKS);
1763 Ranks = (byte & 7) + 1;
1765 /* Configure Bank encoding
1766 * Use a 6-bit key into a lookup table.
1767 * Key (index) = CCCBRR, where CCC is the number of
1768 * Columns minus 9,RR is the number of Rows minus 13,
1769 * and B is the number of banks minus 2.
1770 * See "6-bit Bank Addressing Table" at the end of
1772 byte = Cols - 9; /* 9 Cols is smallest dev size */
1773 byte <<= 3; /* make room for row and bank bits*/
1777 /* 13 Rows is smallest dev size */
1778 byte |= Rows - 13; /* CCCBRR internal encode */
1780 for (dword=0; dword < 12; dword++) {
1781 if (byte == Tab_BankAddr[dword])
1787 /* bit no. of CS field in address mapping reg.*/
1788 dword <<= (ChipSel<<1);
1789 BankAddrReg |= dword;
1791 /* Mask value=(2pow(rows+cols+banks+3)-1)>>8,
1792 or 2pow(rows+cols+banks-5)-1*/
1795 byte = Rows + Cols; /* cl=rows+cols*/
1797 byte -= 2; /* 3 banks - 5 */
1799 byte -= 3; /* 2 banks - 5 */
1800 /* mask size (64-bit rank only) */
1802 if (pDCTstat->Status & (1 << SB_128bitmode))
1803 byte++; /* double mask size if in 128-bit mode*/
1805 csMask |= 1 << byte;
1808 /*set ChipSelect population indicator even bits*/
1809 pDCTstat->CSPresent |= (1<<ChipSel);
1811 /*set ChipSelect population indicator odd bits*/
1812 pDCTstat->CSPresent |= 1 << (ChipSel + 1);
1814 reg = 0x60+(ChipSel<<1) + reg_off; /*Dram CS Mask Register */
1816 val &= 0x1FF83FE0; /* Mask out reserved bits.*/
1817 Set_NB32(dev, reg, val);
1820 if (pDCTstat->DIMMSPDCSE & (1<<ChipSel))
1821 pDCTstat->CSTestFail |= (1<<ChipSel);
1823 } /* while ChipSel*/
1825 SetCSTriState(pMCTstat, pDCTstat, dct);
1826 /* SetCKETriState */
1827 SetODTTriState(pMCTstat, pDCTstat, dct);
1829 if (pDCTstat->Status & (1 << SB_128bitmode)) {
1830 SetCSTriState(pMCTstat, pDCTstat, 1); /* force dct1) */
1831 SetODTTriState(pMCTstat, pDCTstat, 1); /* force dct1) */
1833 word = pDCTstat->CSPresent;
1834 mctGetCS_ExcludeMap(); /* mask out specified chip-selects */
1835 word ^= pDCTstat->CSPresent;
1836 pDCTstat->CSTestFail |= word; /* enable ODT to disabled DIMMs */
1837 if (!pDCTstat->CSPresent)
1838 pDCTstat->ErrCode = SC_StopError;
1840 reg = 0x80 + reg_off; /* Bank Addressing Register */
1841 Set_NB32(dev, reg, BankAddrReg);
1843 // dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
1845 print_tx("SPDSetBanks: Status ", pDCTstat->Status);
1846 print_tx("SPDSetBanks: ErrStatus ", pDCTstat->ErrStatus);
1847 print_tx("SPDSetBanks: ErrCode ", pDCTstat->ErrCode);
1848 print_t("SPDSetBanks: Done\n");
1852 static void SPDCalcWidth_D(struct MCTStatStruc *pMCTstat,
1853 struct DCTStatStruc *pDCTstat)
1855 /* Per SPDs, check the symmetry of DIMM pairs (DIMM on Channel A
1856 * matching with DIMM on Channel B), the overall DIMM population,
1857 * and determine the width mode: 64-bit, 64-bit muxed, 128-bit.
1861 u8 smbaddr, smbaddr1;
1864 /* Check Symmetry of Channel A and Channel B DIMMs
1865 (must be matched for 128-bit mode).*/
1866 for (i=0; i < MAX_DIMMS_SUPPORTED; i += 2) {
1867 if ((pDCTstat->DIMMValid & (1 << i)) && (pDCTstat->DIMMValid & (1<<(i+1)))) {
1868 smbaddr = Get_DIMMAddress_D(pDCTstat, i);
1869 smbaddr1 = Get_DIMMAddress_D(pDCTstat, i+1);
1871 byte = mctRead_SPD(smbaddr, SPD_ROWSZ) & 0x1f;
1872 byte1 = mctRead_SPD(smbaddr1, SPD_ROWSZ) & 0x1f;
1873 if (byte != byte1) {
1874 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1878 byte = mctRead_SPD(smbaddr, SPD_COLSZ) & 0x1f;
1879 byte1 = mctRead_SPD(smbaddr1, SPD_COLSZ) & 0x1f;
1880 if (byte != byte1) {
1881 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1885 byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
1886 byte1 = mctRead_SPD(smbaddr1, SPD_BANKSZ);
1887 if (byte != byte1) {
1888 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1892 byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0x7f;
1893 byte1 = mctRead_SPD(smbaddr1, SPD_DEVWIDTH) & 0x7f;
1894 if (byte != byte1) {
1895 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1899 byte = mctRead_SPD(smbaddr, SPD_DMBANKS) & 7; /* #ranks-1 */
1900 byte1 = mctRead_SPD(smbaddr1, SPD_DMBANKS) & 7; /* #ranks-1 */
1901 if (byte != byte1) {
1902 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1912 static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
1913 struct DCTStatStruc *pDCTstat, u8 dct)
1915 /* Requires that Mask values for each bank be programmed first and that
1916 * the chip-select population indicator is correctly set.
1920 u32 nxtcsBase, curcsBase;
1922 u32 Sizeq, BiggestBank;
1932 dev = pDCTstat->dev_dct;
1933 reg_off = 0x100 * dct;
1937 /* CS Sparing 1=enabled, 0=disabled */
1938 if (mctGet_NVbits(NV_CS_SpareCTL) & 1) {
1939 if (MCT_DIMM_SPARE_NO_WARM) {
1940 /* Do no warm-reset DIMM spare */
1941 if (pMCTstat->GStatus & 1 << GSB_EnDIMMSpareNW) {
1942 word = pDCTstat->CSPresent;
1946 /* Make sure at least two chip-selects are available */
1949 pDCTstat->ErrStatus |= 1 << SB_SpareDis;
1952 if (!mctGet_NVbits(NV_DQSTrainCTL)) { /*DQS Training 1=enabled, 0=disabled */
1953 word = pDCTstat->CSPresent;
1955 word &= ~(1 << val);
1957 /* Make sure at least two chip-selects are available */
1960 pDCTstat->ErrStatus |= 1 << SB_SpareDis;
1965 nxtcsBase = 0; /* Next available cs base ADDR[39:8] */
1966 for (p=0; p < MAX_DIMMS_SUPPORTED; p++) {
1968 for (q = 0; q < MAX_CS_SUPPORTED; q++) { /* from DIMMS to CS */
1969 if (pDCTstat->CSPresent & (1 << q)) { /* bank present? */
1970 reg = 0x40 + (q << 2) + reg_off; /* Base[q] reg.*/
1971 val = Get_NB32(dev, reg);
1972 if (!(val & 3)) { /* (CSEnable|Spare==1)bank is enabled already? */
1973 reg = 0x60 + (q << 1) + reg_off; /*Mask[q] reg.*/
1974 val = Get_NB32(dev, reg);
1978 Sizeq = val; //never used
1979 if (val > BiggestBank) {
1980 /*Bingo! possibly Map this chip-select next! */
1985 } /*if bank present */
1987 if (BiggestBank !=0) {
1988 curcsBase = nxtcsBase; /* curcsBase=nxtcsBase*/
1989 /* DRAM CS Base b Address Register offset */
1990 reg = 0x40 + (b << 2) + reg_off;
1993 val = 1 << Spare; /* Spare Enable*/
1996 val |= 1 << CSEnable; /* Bank Enable */
1998 Set_NB32(dev, reg, val);
2002 /* let nxtcsBase+=Size[b] */
2003 nxtcsBase += BiggestBank;
2006 /* bank present but disabled?*/
2007 if ( pDCTstat->CSTestFail & (1 << p)) {
2008 /* DRAM CS Base b Address Register offset */
2009 reg = (p << 2) + 0x40 + reg_off;
2010 val = 1 << TestFail;
2011 Set_NB32(dev, reg, val);
2016 pDCTstat->DCTSysLimit = nxtcsBase - 1;
2017 mct_AfterStitchMemory(pMCTstat, pDCTstat, dct);
2020 // dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
2022 print_tx("StitchMemory: Status ", pDCTstat->Status);
2023 print_tx("StitchMemory: ErrStatus ", pDCTstat->ErrStatus);
2024 print_tx("StitchMemory: ErrCode ", pDCTstat->ErrCode);
2025 print_t("StitchMemory: Done\n");
2029 static u8 Get_Tk_D(u8 k)
2031 return Table_T_k[k];
2035 static u8 Get_CLj_D(u8 j)
2037 return Table_CL2_j[j];
2040 static u8 Get_DefTrc_k_D(u8 k)
2042 return Tab_defTrc_k[k];
2046 static u16 Get_40Tk_D(u8 k)
2048 return Tab_40T_k[k]; /* FIXME: k or k<<1 ?*/
2052 static u16 Get_Fk_D(u8 k)
2054 return Table_F_k[k]; /* FIXME: k or k<<1 ? */
2058 static u8 Dimm_Supports_D(struct DCTStatStruc *pDCTstat,
2068 DIMMi = Get_DIMMAddress_D(pDCTstat, i);
2072 /* check if DIMMi supports CLj */
2073 CL_i = mctRead_SPD(DIMMi, SPD_CASLAT);
2076 /*find out if its CL X, CLX-1, or CLX-2 */
2077 word = bsr(byte); /* bit position of CLj */
2078 wordx = bsr(CL_i); /* bit position of CLX of CLi */
2079 wordx -= word; /* CL number (CL no. = 0,1, 2, or 3) */
2080 wordx <<= 3; /* 8 bits per SPD byte index */
2081 /*get T from SPD byte 9, 23, 25*/
2082 word = (EncodedTSPD >> wordx) & 0xFF;
2084 byte = mctRead_SPD(DIMMi, word); /* DIMMi speed */
2087 } else if (byte == 0){
2088 pDCTstat->ErrStatus |= 1<<SB_NoCycTime;
2091 ret = 0; /* DIMM is capable! */
2100 static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
2101 struct DCTStatStruc *pDCTstat)
2103 /* Check DIMMs present, verify checksum, flag SDRAM type,
2104 * build population indicator bitmaps, and preload bus loading
2105 * of DIMMs into DCTStatStruc.
2106 * MAAload=number of devices on the "A" bus.
2107 * MABload=number of devices on the "B" bus.
2108 * MAAdimms=number of DIMMs on the "A" bus slots.
2109 * MABdimms=number of DIMMs on the "B" bus slots.
2110 * DATAAload=number of ranks on the "A" bus slots.
2111 * DATABload=number of ranks on the "B" bus slots.
2118 u16 RegDIMMPresent, MaxDimms;
2124 /* preload data structure with addrs */
2125 mctGet_DIMMAddr(pDCTstat, pDCTstat->Node_ID);
2127 DimmSlots = MaxDimms = mctGet_NVbits(NV_MAX_DIMMS);
2129 SPDCtrl = mctGet_NVbits(NV_SPDCHK_RESTRT);
2132 pDCTstat->DimmQRPresent = 0;
2134 for (i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
2138 if ((pDCTstat->DimmQRPresent & (1 << i)) || (i < DimmSlots)) {
2139 print_tx("\t DIMMPresence: i=", i);
2140 smbaddr = Get_DIMMAddress_D(pDCTstat, i);
2141 print_tx("\t DIMMPresence: smbaddr=", smbaddr);
2144 for (Index=0; Index < 64; Index++){
2146 status = mctRead_SPD(smbaddr, Index);
2149 byte = status & 0xFF;
2155 pDCTstat->DIMMPresent |= 1 << i;
2156 if ((Checksum & 0xFF) == byte) {
2157 byte = mctRead_SPD(smbaddr, SPD_TYPE);
2158 if (byte == JED_DDR2SDRAM) {
2159 /*Dimm is 'Present'*/
2160 pDCTstat->DIMMValid |= 1 << i;
2163 pDCTstat->DIMMSPDCSE = 1 << i;
2165 pDCTstat->ErrStatus |= 1 << SB_DIMMChkSum;
2166 pDCTstat->ErrCode = SC_StopError;
2168 /*if NV_SPDCHK_RESTRT is set to 1, ignore faulty SPD checksum*/
2169 pDCTstat->ErrStatus |= 1<<SB_DIMMChkSum;
2170 byte = mctRead_SPD(smbaddr, SPD_TYPE);
2171 if (byte == JED_DDR2SDRAM)
2172 pDCTstat->DIMMValid |= 1 << i;
2175 /* Check module type */
2176 byte = mctRead_SPD(smbaddr, SPD_DIMMTYPE);
2177 if (byte & JED_REGADCMSK)
2178 RegDIMMPresent |= 1 << i;
2179 /* Check ECC capable */
2180 byte = mctRead_SPD(smbaddr, SPD_EDCTYPE);
2181 if (byte & JED_ECC) {
2182 /* DIMM is ECC capable */
2183 pDCTstat->DimmECCPresent |= 1 << i;
2185 if (byte & JED_ADRCPAR) {
2186 /* DIMM is ECC capable */
2187 pDCTstat->DimmPARPresent |= 1 << i;
2189 /* Check if x4 device */
2190 devwidth = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE;
2191 if (devwidth == 4) {
2192 /* DIMM is made with x4 or x16 drams */
2193 pDCTstat->Dimmx4Present |= 1 << i;
2194 } else if (devwidth == 8) {
2195 pDCTstat->Dimmx8Present |= 1 << i;
2196 } else if (devwidth == 16) {
2197 pDCTstat->Dimmx16Present |= 1 << i;
2199 /* check page size */
2200 byte = mctRead_SPD(smbaddr, SPD_COLSZ);
2204 word *= devwidth; /* (((2^COLBITS) / 8) * ORG) / 2048 */
2207 pDCTstat->DIMM2Kpage |= 1 << i;
2209 /*Check if SPD diag bit 'analysis probe installed' is set */
2210 byte = mctRead_SPD(smbaddr, SPD_ATTRIB);
2211 if ( byte & JED_PROBEMSK )
2212 pDCTstat->Status |= 1<<SB_DiagClks;
2214 byte = mctRead_SPD(smbaddr, SPD_DMBANKS);
2215 if (!(byte & (1<< SPDPLBit)))
2216 pDCTstat->DimmPlPresent |= 1 << i;
2220 /* if any DIMMs are QR, we have to make two passes through DIMMs*/
2221 if ( pDCTstat->DimmQRPresent == 0) {
2224 if (i < DimmSlots) {
2225 pDCTstat->DimmQRPresent |= (1 << i) | (1 << (i+4));
2227 byte = 2; /* upper two ranks of QR DIMM will be counted on another DIMM number iteration*/
2228 } else if (byte == 2) {
2229 pDCTstat->DimmDRPresent |= 1 << i;
2234 else if (devwidth == 4)
2238 bytex <<= 1; /*double Addr bus load value for dual rank DIMMs*/
2241 pDCTstat->DATAload[j] += byte; /*number of ranks on DATA bus*/
2242 pDCTstat->MAload[j] += bytex; /*number of devices on CMD/ADDR bus*/
2243 pDCTstat->MAdimms[j]++; /*number of DIMMs on A bus */
2244 /*check for DRAM package Year <= 06*/
2245 byte = mctRead_SPD(smbaddr, SPD_MANDATEYR);
2246 if (byte < MYEAR06) {
2247 /*Year < 06 and hence Week < 24 of 06 */
2248 pDCTstat->DimmYr06 |= 1 << i;
2249 pDCTstat->DimmWk2406 |= 1 << i;
2250 } else if (byte == MYEAR06) {
2251 /*Year = 06, check if Week <= 24 */
2252 pDCTstat->DimmYr06 |= 1 << i;
2253 byte = mctRead_SPD(smbaddr, SPD_MANDATEWK);
2254 if (byte <= MWEEK24)
2255 pDCTstat->DimmWk2406 |= 1 << i;
2261 print_tx("\t DIMMPresence: DIMMValid=", pDCTstat->DIMMValid);
2262 print_tx("\t DIMMPresence: DIMMPresent=", pDCTstat->DIMMPresent);
2263 print_tx("\t DIMMPresence: RegDIMMPresent=", RegDIMMPresent);
2264 print_tx("\t DIMMPresence: DimmECCPresent=", pDCTstat->DimmECCPresent);
2265 print_tx("\t DIMMPresence: DimmPARPresent=", pDCTstat->DimmPARPresent);
2266 print_tx("\t DIMMPresence: Dimmx4Present=", pDCTstat->Dimmx4Present);
2267 print_tx("\t DIMMPresence: Dimmx8Present=", pDCTstat->Dimmx8Present);
2268 print_tx("\t DIMMPresence: Dimmx16Present=", pDCTstat->Dimmx16Present);
2269 print_tx("\t DIMMPresence: DimmPlPresent=", pDCTstat->DimmPlPresent);
2270 print_tx("\t DIMMPresence: DimmDRPresent=", pDCTstat->DimmDRPresent);
2271 print_tx("\t DIMMPresence: DimmQRPresent=", pDCTstat->DimmQRPresent);
2272 print_tx("\t DIMMPresence: DATAload[0]=", pDCTstat->DATAload[0]);
2273 print_tx("\t DIMMPresence: MAload[0]=", pDCTstat->MAload[0]);
2274 print_tx("\t DIMMPresence: MAdimms[0]=", pDCTstat->MAdimms[0]);
2275 print_tx("\t DIMMPresence: DATAload[1]=", pDCTstat->DATAload[1]);
2276 print_tx("\t DIMMPresence: MAload[1]=", pDCTstat->MAload[1]);
2277 print_tx("\t DIMMPresence: MAdimms[1]=", pDCTstat->MAdimms[1]);
2279 if (pDCTstat->DIMMValid != 0) { /* If any DIMMs are present...*/
2280 if (RegDIMMPresent != 0) {
2281 if ((RegDIMMPresent ^ pDCTstat->DIMMValid) !=0) {
2282 /* module type DIMM mismatch (reg'ed, unbuffered) */
2283 pDCTstat->ErrStatus |= 1<<SB_DimmMismatchM;
2284 pDCTstat->ErrCode = SC_StopError;
2286 /* all DIMMs are registered */
2287 pDCTstat->Status |= 1<<SB_Registered;
2290 if (pDCTstat->DimmECCPresent != 0) {
2291 if ((pDCTstat->DimmECCPresent ^ pDCTstat->DIMMValid )== 0) {
2292 /* all DIMMs are ECC capable */
2293 pDCTstat->Status |= 1<<SB_ECCDIMMs;
2296 if (pDCTstat->DimmPARPresent != 0) {
2297 if ((pDCTstat->DimmPARPresent ^ pDCTstat->DIMMValid) == 0) {
2298 /*all DIMMs are Parity capable */
2299 pDCTstat->Status |= 1<<SB_PARDIMMs;
2303 /* no DIMMs present or no DIMMs that qualified. */
2304 pDCTstat->ErrStatus |= 1<<SB_NoDimms;
2305 pDCTstat->ErrCode = SC_StopError;
2308 print_tx("\t DIMMPresence: Status ", pDCTstat->Status);
2309 print_tx("\t DIMMPresence: ErrStatus ", pDCTstat->ErrStatus);
2310 print_tx("\t DIMMPresence: ErrCode ", pDCTstat->ErrCode);
2311 print_t("\t DIMMPresence: Done\n");
2313 mctHookAfterDIMMpre();
2315 return pDCTstat->ErrCode;
2319 static u8 Sys_Capability_D(struct MCTStatStruc *pMCTstat,
2320 struct DCTStatStruc *pDCTstat, int j, int k)
2322 /* Determine if system is capable of operating at given input
2323 * parameters for CL, and T. There are three components to
2324 * determining "maximum frequency" in AUTO mode: SPD component,
2325 * Bus load component, and "Preset" max frequency component.
2326 * This procedure is used to help find the SPD component and relies
2327 * on pre-determination of the bus load component and the Preset
2328 * components. The generalized algorithm for finding maximum
2329 * frequency is structured this way so as to optimize for CAS
2330 * latency (which might get better as a result of reduced frequency).
2331 * See "Global relationship between index values and item values"
2332 * for definition of CAS latency index (j) and Frequency index (k).
2337 if (Get_Fk_D(k) > pDCTstat->PresetmaxFreq)
2342 /* compare proposed CAS latency with AMD Si capabilities */
2343 if ((j < J_MIN) || (j > J_MAX))
2355 static u8 Get_DIMMAddress_D(struct DCTStatStruc *pDCTstat, u8 i)
2359 p = pDCTstat->DIMMAddr;
2360 //mct_BeforeGetDIMMAddress();
2365 static void mct_initDCT(struct MCTStatStruc *pMCTstat,
2366 struct DCTStatStruc *pDCTstat)
2371 /* Config. DCT0 for Ganged or unganged mode */
2372 print_t("\tmct_initDCT: DCTInit_D 0\n");
2373 DCTInit_D(pMCTstat, pDCTstat, 0);
2374 if (pDCTstat->ErrCode == SC_FatalErr) {
2375 // Do nothing goto exitDCTInit; /* any fatal errors? */
2377 /* Configure DCT1 if unganged and enabled*/
2378 if (!pDCTstat->GangedMode) {
2379 if ( pDCTstat->DIMMValidDCT[1] > 0) {
2380 print_t("\tmct_initDCT: DCTInit_D 1\n");
2381 err_code = pDCTstat->ErrCode; /* save DCT0 errors */
2382 pDCTstat->ErrCode = 0;
2383 DCTInit_D(pMCTstat, pDCTstat, 1);
2384 if (pDCTstat->ErrCode == 2) /* DCT1 is not Running */
2385 pDCTstat->ErrCode = err_code; /* Using DCT0 Error code to update pDCTstat.ErrCode */
2387 val = 1 << DisDramInterface;
2388 Set_NB32(pDCTstat->dev_dct, 0x100 + 0x94, val);
2396 static void mct_DramInit(struct MCTStatStruc *pMCTstat,
2397 struct DCTStatStruc *pDCTstat, u8 dct)
2401 mct_BeforeDramInit_Prod_D(pMCTstat, pDCTstat);
2402 // FIXME: for rev A: mct_BeforeDramInit_D(pDCTstat, dct);
2404 /* Disable auto refresh before Dram init when in ganged mode (Erratum 278) */
2405 if (pDCTstat->LogicalCPUID & AMD_DR_LT_B2) {
2406 if (pDCTstat->GangedMode) {
2407 val = Get_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct));
2408 val |= 1 << DisAutoRefresh;
2409 Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
2413 mct_DramInit_Hw_D(pMCTstat, pDCTstat, dct);
2415 /* Re-enable auto refresh after Dram init when in ganged mode
2416 * to ensure both DCTs are in sync (Erratum 278)
2419 if (pDCTstat->LogicalCPUID & AMD_DR_LT_B2) {
2420 if (pDCTstat->GangedMode) {
2422 val = Get_NB32(pDCTstat->dev_dct, 0x90 + (0x100 * dct));
2423 } while (!(val & (1 << InitDram)));
2427 val = Get_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct));
2428 val &= ~(1 << DisAutoRefresh);
2429 val |= 1 << DisAutoRefresh;
2430 val &= ~(1 << DisAutoRefresh);
2436 static u8 mct_setMode(struct MCTStatStruc *pMCTstat,
2437 struct DCTStatStruc *pDCTstat)
2444 byte = bytex = pDCTstat->DIMMValid;
2445 bytex &= 0x55; /* CHA DIMM pop */
2446 pDCTstat->DIMMValidDCT[0] = bytex;
2448 byte &= 0xAA; /* CHB DIMM popa */
2450 pDCTstat->DIMMValidDCT[1] = byte;
2452 if (byte != bytex) {
2453 pDCTstat->ErrStatus &= ~(1 << SB_DimmMismatchO);
2455 if ( mctGet_NVbits(NV_Unganged) )
2456 pDCTstat->ErrStatus |= (1 << SB_DimmMismatchO);
2458 if (!(pDCTstat->ErrStatus & (1 << SB_DimmMismatchO))) {
2459 pDCTstat->GangedMode = 1;
2460 /* valid 128-bit mode population. */
2461 pDCTstat->Status |= 1 << SB_128bitmode;
2463 val = Get_NB32(pDCTstat->dev_dct, reg);
2464 val |= 1 << DctGangEn;
2465 Set_NB32(pDCTstat->dev_dct, reg, val);
2466 print_tx("setMode: DRAM Controller Select Low Register = ", val);
2469 return pDCTstat->ErrCode;
2473 u32 Get_NB32(u32 dev, u32 reg)
2475 return pci_read_config32(dev, reg);
2479 void Set_NB32(u32 dev, u32 reg, u32 val)
2481 pci_write_config32(dev, reg, val);
2485 u32 Get_NB32_index(u32 dev, u32 index_reg, u32 index)
2489 Set_NB32(dev, index_reg, index);
2490 dword = Get_NB32(dev, index_reg+0x4);
2495 void Set_NB32_index(u32 dev, u32 index_reg, u32 index, u32 data)
2497 Set_NB32(dev, index_reg, index);
2498 Set_NB32(dev, index_reg + 0x4, data);
2502 u32 Get_NB32_index_wait(u32 dev, u32 index_reg, u32 index)
2508 index &= ~(1 << DctAccessWrite);
2509 Set_NB32(dev, index_reg, index);
2511 dword = Get_NB32(dev, index_reg);
2512 } while (!(dword & (1 << DctAccessDone)));
2513 dword = Get_NB32(dev, index_reg + 0x4);
2519 void Set_NB32_index_wait(u32 dev, u32 index_reg, u32 index, u32 data)
2524 Set_NB32(dev, index_reg + 0x4, data);
2525 index |= (1 << DctAccessWrite);
2526 Set_NB32(dev, index_reg, index);
2528 dword = Get_NB32(dev, index_reg);
2529 } while (!(dword & (1 << DctAccessDone)));
2534 static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
2535 struct DCTStatStruc *pDCTstat, u8 dct)
2537 /* Get platform specific config/timing values from the interface layer
2538 * and program them into DCT.
2541 u32 dev = pDCTstat->dev_dct;
2543 u8 i, i_start, i_end;
2545 if (pDCTstat->GangedMode) {
2546 SyncSetting(pDCTstat);
2553 for (i=i_start; i<i_end; i++) {
2554 index_reg = 0x98 + (i * 0x100);
2555 Set_NB32_index_wait(dev, index_reg, 0x00, pDCTstat->CH_ODC_CTL[i]); /* Channel A Output Driver Compensation Control */
2556 Set_NB32_index_wait(dev, index_reg, 0x04, pDCTstat->CH_ADDR_TMG[i]); /* Channel A Output Driver Compensation Control */
2559 return pDCTstat->ErrCode;
2564 static void mct_SyncDCTsReady(struct DCTStatStruc *pDCTstat)
2569 if (pDCTstat->NodePresent) {
2570 print_tx("mct_SyncDCTsReady: Node ", pDCTstat->Node_ID);
2571 dev = pDCTstat->dev_dct;
2573 if ((pDCTstat->DIMMValidDCT[0] ) || (pDCTstat->DIMMValidDCT[1])) { /* This Node has dram */
2575 val = Get_NB32(dev, 0x110);
2576 } while (!(val & (1 << DramEnabled)));
2577 print_t("mct_SyncDCTsReady: DramEnabled\n");
2579 } /* Node is present */
2583 static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
2584 struct DCTStatStruc *pDCTstat, u8 dct)
2586 if (!pDCTstat->GangedMode) {
2588 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
2589 if (pDCTstat->DIMMValidDCT[dct] == 0)
2590 pDCTstat->ErrCode = SC_StopError;
2592 pDCTstat->CSPresent = 0;
2593 pDCTstat->CSTestFail = 0;
2594 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
2595 if (pDCTstat->DIMMValidDCT[dct] == 0)
2596 pDCTstat->ErrCode = SC_StopError;
2601 static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,
2602 struct DCTStatStruc *pDCTstat, u8 dct)
2607 SPDCalcWidth_D(pMCTstat, pDCTstat);
2608 ret = mct_setMode(pMCTstat, pDCTstat);
2610 ret = pDCTstat->ErrCode;
2613 print_tx("SPDCalcWidth: Status ", pDCTstat->Status);
2614 print_tx("SPDCalcWidth: ErrStatus ", pDCTstat->ErrStatus);
2615 print_tx("SPDCalcWidth: ErrCode ", pDCTstat->ErrCode);
2616 print_t("SPDCalcWidth: Done\n");
2622 static void mct_AfterStitchMemory(struct MCTStatStruc *pMCTstat,
2623 struct DCTStatStruc *pDCTstat, u8 dct)
2632 _MemHoleRemap = mctGet_NVbits(NV_MemHole);
2633 DramHoleBase = mctGet_NVbits(NV_BottomIO);
2635 /* Increase hole size so;[31:24]to[31:16]
2636 * it has granularity of 128MB shl eax,8
2637 * Set 'effective' bottom IOmov DramHoleBase,eax
2639 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2641 /* In unganged mode, we must add DCT0 and DCT1 to DCTSysLimit */
2642 if (!pDCTstat->GangedMode) {
2643 dev = pDCTstat->dev_dct;
2644 pDCTstat->NodeSysLimit += pDCTstat->DCTSysLimit;
2645 /* if DCT0 and DCT1 both exist, set DctSelBaseAddr[47:27] to the top of DCT0 */
2647 if (pDCTstat->DIMMValidDCT[1] > 0) {
2648 dword = pDCTstat->DCTSysLimit + 1;
2649 dword += pDCTstat->NodeSysBase;
2650 dword >>= 8; /* scale [39:8] to [47:27],and to F2x110[31:11] */
2651 if ((dword >= DramHoleBase) && _MemHoleRemap) {
2652 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2653 val = pMCTstat->HoleBase;
2655 val = (((~val) & 0xFF) + 1);
2660 val = Get_NB32(dev, reg);
2663 val |= 3; /* Set F2x110[DctSelHiRngEn], F2x110[DctSelHi] */
2664 Set_NB32(dev, reg, val);
2665 print_tx("AfterStitch DCT0 and DCT1: DRAM Controller Select Low Register = ", val);
2666 print_tx("AfterStitch DCT0 and DCT1: DRAM Controller Select High Register = ", dword);
2670 Set_NB32(dev, reg, val);
2673 /* Program the DctSelBaseAddr value to 0
2674 if DCT 0 is disabled */
2675 if (pDCTstat->DIMMValidDCT[0] == 0) {
2676 dword = pDCTstat->NodeSysBase;
2678 if ((dword >= DramHoleBase) && _MemHoleRemap) {
2679 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2680 val = pMCTstat->HoleBase;
2683 val |= (((~val) & 0xFFFF) + 1);
2688 Set_NB32(dev, reg, val);
2691 val |= 3; /* Set F2x110[DctSelHiRngEn], F2x110[DctSelHi] */
2692 Set_NB32(dev, reg, val);
2693 print_tx("AfterStitch DCT1 only: DRAM Controller Select Low Register = ", val);
2694 print_tx("AfterStitch DCT1 only: DRAM Controller Select High Register = ", dword);
2698 pDCTstat->NodeSysLimit += pDCTstat->DCTSysLimit;
2700 print_tx("AfterStitch pDCTstat->NodeSysBase = ", pDCTstat->NodeSysBase);
2701 print_tx("mct_AfterStitchMemory: pDCTstat->NodeSysLimit ", pDCTstat->NodeSysLimit);
2705 static u8 mct_DIMMPresence(struct MCTStatStruc *pMCTstat,
2706 struct DCTStatStruc *pDCTstat, u8 dct)
2711 ret = DIMMPresence_D(pMCTstat, pDCTstat);
2713 ret = pDCTstat->ErrCode;
2719 /* mct_BeforeGetDIMMAddress inline in C */
2722 static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
2723 struct DCTStatStruc *pDCTstatA)
2727 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
2728 struct DCTStatStruc *pDCTstat;
2729 pDCTstat = pDCTstatA + Node;
2730 if (pDCTstat->NodePresent) {
2731 if (pDCTstat->DIMMValidDCT[0]) {
2732 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[0];
2733 Set_OtherTiming(pMCTstat, pDCTstat, 0);
2735 if (pDCTstat->DIMMValidDCT[1] && !pDCTstat->GangedMode ) {
2736 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[1];
2737 Set_OtherTiming(pMCTstat, pDCTstat, 1);
2739 } /* Node is present*/
2744 static void Set_OtherTiming(struct MCTStatStruc *pMCTstat,
2745 struct DCTStatStruc *pDCTstat, u8 dct)
2748 u32 reg_off = 0x100 * dct;
2751 u32 dev = pDCTstat->dev_dct;
2753 Get_Trdrd(pMCTstat, pDCTstat, dct);
2754 Get_Twrwr(pMCTstat, pDCTstat, dct);
2755 Get_Twrrd(pMCTstat, pDCTstat, dct);
2756 Get_TrwtTO(pMCTstat, pDCTstat, dct);
2757 Get_TrwtWB(pMCTstat, pDCTstat);
2759 reg = 0x8C + reg_off; /* Dram Timing Hi */
2760 val = Get_NB32(dev, reg);
2762 dword = pDCTstat->TrwtTO; //0x07
2764 dword = pDCTstat->Twrrd; //0x03
2766 dword = pDCTstat->Twrwr; //0x03
2768 dword = pDCTstat->Trdrd; //0x03
2770 dword = pDCTstat->TrwtWB; //0x07
2772 val = OtherTiming_A_D(pDCTstat, val);
2773 Set_NB32(dev, reg, val);
2778 static void Get_Trdrd(struct MCTStatStruc *pMCTstat,
2779 struct DCTStatStruc *pDCTstat, u8 dct)
2785 u32 index_reg = 0x98 + 0x100 * dct;
2786 u32 dev = pDCTstat->dev_dct;
2788 if ((pDCTstat->Dimmx4Present != 0) && (pDCTstat->Dimmx8Present != 0)) {
2789 /* mixed (x4 or x8) DIMM types
2790 the largest DqsRcvEnGrossDelay of any DIMM minus the DqsRcvEnGrossDelay
2791 of any other DIMM is equal to the Critical Gross Delay Difference (CGDD) for Trdrd.*/
2792 byte = Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
2800 Trdrd with non-mixed DIMM types
2801 RdDqsTime are the same for all DIMMs and DqsRcvEn difference between
2802 any two DIMMs is less than half of a MEMCLK, BIOS should program Trdrd to 0000b,
2803 else BIOS should program Trdrd to 0001b.
2805 RdDqsTime are the same for all DIMMs
2806 DDR400~DDR667 only use one set register
2807 DDR800 have two set register for DIMM0 and DIMM1 */
2809 if (pDCTstat->Speed > 3) {
2810 /* DIMM0+DIMM1 exist */ //NOTE it should be 5
2811 val = bsf(pDCTstat->DIMMValid);
2812 dword = bsr(pDCTstat->DIMMValid);
2813 if (dword != val && dword != 0) {
2814 /* DCT Read DQS Timing Control - DIMM0 - Low */
2815 dword = Get_NB32_index_wait(dev, index_reg, 0x05);
2816 /* DCT Read DQS Timing Control - DIMM1 - Low */
2817 val = Get_NB32_index_wait(dev, index_reg, 0x105);
2821 /* DCT Read DQS Timing Control - DIMM0 - High */
2822 dword = Get_NB32_index_wait(dev, index_reg, 0x06);
2823 /* DCT Read DQS Timing Control - DIMM1 - High */
2824 val = Get_NB32_index_wait(dev, index_reg, 0x106);
2830 /* DqsRcvEn difference between any two DIMMs is
2831 less than half of a MEMCLK */
2832 /* DqsRcvEn byte 1,0*/
2833 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x10))
2835 /* DqsRcvEn byte 3,2*/
2836 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x11))
2838 /* DqsRcvEn byte 5,4*/
2839 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x20))
2841 /* DqsRcvEn byte 7,6*/
2842 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x21))
2845 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x12))
2851 pDCTstat->Trdrd = Trdrd;
2856 static void Get_Twrwr(struct MCTStatStruc *pMCTstat,
2857 struct DCTStatStruc *pDCTstat, u8 dct)
2860 u32 index_reg = 0x98 + 0x100 * dct;
2861 u32 dev = pDCTstat->dev_dct;
2865 /* WrDatGrossDlyByte only use one set register when DDR400~DDR667
2866 DDR800 have two set register for DIMM0 and DIMM1 */
2867 if (pDCTstat->Speed > 3) {
2868 val = bsf(pDCTstat->DIMMValid);
2869 dword = bsr(pDCTstat->DIMMValid);
2870 if (dword != val && dword != 0) {
2871 /*the largest WrDatGrossDlyByte of any DIMM minus the
2872 WrDatGrossDlyByte of any other DIMM is equal to CGDD */
2873 val = Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
2880 pDCTstat->Twrwr = Twrwr;
2884 static void Get_Twrrd(struct MCTStatStruc *pMCTstat,
2885 struct DCTStatStruc *pDCTstat, u8 dct)
2887 u8 byte, bytex, val;
2888 u32 index_reg = 0x98 + 0x100 * dct;
2889 u32 dev = pDCTstat->dev_dct;
2891 /* On any given byte lane, the largest WrDatGrossDlyByte delay of
2892 any DIMM minus the DqsRcvEnGrossDelay delay of any other DIMM is
2893 equal to the Critical Gross Delay Difference (CGDD) for Twrrd.*/
2895 /* WrDatGrossDlyByte only use one set register when DDR400~DDR667
2896 DDR800 have two set register for DIMM0 and DIMM1 */
2897 if (pDCTstat->Speed > 3) {
2898 val = Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
2900 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 1); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM0 */
2901 pDCTstat->WrDatGrossH = (u8) val; /* low byte = max value */
2904 Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
2906 bytex = pDCTstat->DqsRcvEnGrossL;
2907 byte = pDCTstat->WrDatGrossH;
2917 pDCTstat->Twrrd = bytex;
2921 static void Get_TrwtTO(struct MCTStatStruc *pMCTstat,
2922 struct DCTStatStruc *pDCTstat, u8 dct)
2925 u32 index_reg = 0x98 + 0x100 * dct;
2926 u32 dev = pDCTstat->dev_dct;
2928 /* On any given byte lane, the largest WrDatGrossDlyByte delay of
2929 any DIMM minus the DqsRcvEnGrossDelay delay of any other DIMM is
2930 equal to the Critical Gross Delay Difference (CGDD) for TrwtTO. */
2931 Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
2932 Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
2933 bytex = pDCTstat->DqsRcvEnGrossL;
2934 byte = pDCTstat->WrDatGrossH;
2937 if ((bytex == 1) || (bytex == 2))
2943 if ((byte == 0) || (byte == 1))
2949 pDCTstat->TrwtTO = bytex;
2953 static void Get_TrwtWB(struct MCTStatStruc *pMCTstat,
2954 struct DCTStatStruc *pDCTstat)
2956 /* TrwtWB ensures read-to-write data-bus turnaround.
2957 This value should be one more than the programmed TrwtTO.*/
2958 pDCTstat->TrwtWB = pDCTstat->TrwtTO + 1;
2962 static u8 Check_DqsRcvEn_Diff(struct DCTStatStruc *pDCTstat,
2963 u8 dct, u32 dev, u32 index_reg,
2966 u8 Smallest_0, Largest_0, Smallest_1, Largest_1;
2980 for (i=0; i < 8; i+=2) {
2981 if ( pDCTstat->DIMMValid & (1 << i)) {
2982 val = Get_NB32_index_wait(dev, index_reg, index);
2984 if (byte < Smallest_0)
2986 if (byte > Largest_0)
2989 byte = (val >> 16) & 0xFF;
2990 if (byte < Smallest_1)
2992 if (byte > Largest_1)
2999 /* check if total DqsRcvEn delay difference between any
3000 two DIMMs is less than half of a MEMCLK */
3001 if ((Largest_0 - Smallest_0) > 31)
3004 if ((Largest_1 - Smallest_1) > 31)
3010 static u8 Get_DqsRcvEnGross_Diff(struct DCTStatStruc *pDCTstat,
3011 u32 dev, u32 index_reg)
3013 u8 Smallest, Largest;
3017 /* The largest DqsRcvEnGrossDelay of any DIMM minus the
3018 DqsRcvEnGrossDelay of any other DIMM is equal to the Critical
3019 Gross Delay Difference (CGDD) */
3020 /* DqsRcvEn byte 1,0 */
3021 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x10);
3022 Largest = val & 0xFF;
3023 Smallest = (val >> 8) & 0xFF;
3025 /* DqsRcvEn byte 3,2 */
3026 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x11);
3028 bytex = (val >> 8) & 0xFF;
3029 if (bytex < Smallest)
3034 /* DqsRcvEn byte 5,4 */
3035 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x20);
3037 bytex = (val >> 8) & 0xFF;
3038 if (bytex < Smallest)
3043 /* DqsRcvEn byte 7,6 */
3044 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x21);
3046 bytex = (val >> 8) & 0xFF;
3047 if (bytex < Smallest)
3052 if (pDCTstat->DimmECCPresent> 0) {
3054 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x12);
3056 bytex = (val >> 8) & 0xFF;
3057 if (bytex < Smallest)
3063 pDCTstat->DqsRcvEnGrossL = Largest;
3064 return Largest - Smallest;
3068 static u8 Get_WrDatGross_Diff(struct DCTStatStruc *pDCTstat,
3069 u8 dct, u32 dev, u32 index_reg)
3071 u8 Smallest, Largest;
3075 /* The largest WrDatGrossDlyByte of any DIMM minus the
3076 WrDatGrossDlyByte of any other DIMM is equal to CGDD */
3077 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x01); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM0 */
3078 Largest = val & 0xFF;
3079 Smallest = (val >> 8) & 0xFF;
3080 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x101); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM1 */
3082 bytex = (val >> 8) & 0xFF;
3083 if (bytex < Smallest)
3088 // FIXME: Add Cx support.
3090 pDCTstat->WrDatGrossH = Largest;
3091 return Largest - Smallest;
3094 static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
3095 u32 dev, u32 index_reg,
3098 u8 Smallest, Largest;
3111 for (i=0; i < 8; i+=2) {
3112 if ( pDCTstat->DIMMValid & (1 << i)) {
3113 val = Get_NB32_index_wait(dev, index_reg, index);
3115 byte = (val >> 5) & 0xFF;
3116 if (byte < Smallest)
3121 byte = (val >> (16 + 5)) & 0xFF;
3122 if (byte < Smallest)
3138 static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat,
3139 u8 dct, u32 dev, u32 index_reg,
3142 u8 Smallest, Largest;
3150 for (i=0; i < 2; i++) {
3151 val = Get_NB32_index_wait(dev, index_reg, index);
3154 for (j=0; j < 4; j++) {
3156 if (byte < Smallest)
3165 if (pDCTstat->DimmECCPresent > 0) {
3167 val = Get_NB32_index_wait(dev, index_reg, index);
3171 if (byte < Smallest)
3186 static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
3187 struct DCTStatStruc *pDCTstat)
3189 print_t("\tmct_FinalMCT_D: Clr Cl, Wb\n");
3192 mct_ClrClToNB_D(pMCTstat, pDCTstat);
3193 mct_ClrWbEnhWsbDis_D(pMCTstat, pDCTstat);
3197 static void mct_InitialMCT_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat)
3199 print_t("\tmct_InitialMCT_D: Set Cl, Wb\n");
3200 mct_SetClToNB_D(pMCTstat, pDCTstat);
3201 mct_SetWbEnhWsbDis_D(pMCTstat, pDCTstat);
3205 static u32 mct_NodePresent_D(void)
3213 static void mct_init(struct MCTStatStruc *pMCTstat,
3214 struct DCTStatStruc *pDCTstat)
3219 pDCTstat->GangedMode = 0;
3220 pDCTstat->DRPresent = 1;
3222 /* enable extend PCI configuration access */
3224 _RDMSR(addr, &lo, &hi);
3225 if (hi & (1 << (46-32))) {
3226 pDCTstat->Status |= 1 << SB_ExtConfig;
3229 _WRMSR(addr, lo, hi);
3234 static void clear_legacy_Mode(struct MCTStatStruc *pMCTstat,
3235 struct DCTStatStruc *pDCTstat)
3239 u32 dev = pDCTstat->dev_dct;
3241 /* Clear Legacy BIOS Mode bit */
3243 val = Get_NB32(dev, reg);
3244 val &= ~(1<<LegacyBiosMode);
3245 Set_NB32(dev, reg, val);
3249 static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
3250 struct DCTStatStruc *pDCTstatA)
3253 u32 Drambase, Dramlimit;
3259 struct DCTStatStruc *pDCTstat;
3261 pDCTstat = pDCTstatA + 0;
3262 dev = pDCTstat->dev_map;
3264 /* Copy dram map from F1x40/44,F1x48/4c,
3265 to F1x120/124(Node0),F1x120/124(Node1),...*/
3266 for (Node=0; Node < MAX_NODES_SUPPORTED; Node++) {
3267 pDCTstat = pDCTstatA + Node;
3268 devx = pDCTstat->dev_map;
3270 /* get base/limit from Node0 */
3271 reg = 0x40 + (Node << 3); /* Node0/Dram Base 0 */
3272 val = Get_NB32(dev, reg);
3273 Drambase = val >> ( 16 + 3);
3275 reg = 0x44 + (Node << 3); /* Node0/Dram Base 0 */
3276 val = Get_NB32(dev, reg);
3277 Dramlimit = val >> (16 + 3);
3279 /* set base/limit to F1x120/124 per Node */
3280 if (pDCTstat->NodePresent) {
3281 reg = 0x120; /* F1x120,DramBase[47:27] */
3282 val = Get_NB32(devx, reg);
3285 Set_NB32(devx, reg, val);
3288 val = Get_NB32(devx, reg);
3291 Set_NB32(devx, reg, val);
3293 if ( pMCTstat->GStatus & ( 1 << GSB_HWHole)) {
3295 val = Get_NB32(devx, reg);
3296 val |= (1 << DramMemHoistValid);
3297 val &= ~(0xFF << 24);
3298 dword = (pMCTstat->HoleBase >> (24 - 8)) & 0xFF;
3301 Set_NB32(devx, reg, val);
3308 static void SetCSTriState(struct MCTStatStruc *pMCTstat,
3309 struct DCTStatStruc *pDCTstat, u8 dct)
3312 u32 dev = pDCTstat->dev_dct;
3313 u32 index_reg = 0x98 + 0x100 * dct;
3318 /* Tri-state unused chipselects when motherboard
3319 termination is available */
3321 // FIXME: skip for Ax
3323 word = pDCTstat->CSPresent;
3324 if (pDCTstat->Status & (1 << SB_Registered)) {
3325 for (cs = 0; cs < 8; cs++) {
3326 if (word & (1 << cs)) {
3328 word |= 1 << (cs + 1);
3332 word = (~word) & 0xFF;
3334 val = Get_NB32_index_wait(dev, index_reg, index);
3336 Set_NB32_index_wait(dev, index_reg, index, val);
3341 static void SetCKETriState(struct MCTStatStruc *pMCTstat,
3342 struct DCTStatStruc *pDCTstat, u8 dct)
3346 u32 index_reg = 0x98 + 0x100 * dct;
3351 /* Tri-state unused CKEs when motherboard termination is available */
3353 // FIXME: skip for Ax
3355 dev = pDCTstat->dev_dct;
3357 for (cs = 0; cs < 8; cs++) {
3358 if (pDCTstat->CSPresent & (1 << cs)) {
3367 val = Get_NB32_index_wait(dev, index_reg, index);
3368 if ((word & 0x00FF) == 1)
3373 if ((word >> 8) == 1)
3378 Set_NB32_index_wait(dev, index_reg, index, val);
3382 static void SetODTTriState(struct MCTStatStruc *pMCTstat,
3383 struct DCTStatStruc *pDCTstat, u8 dct)
3387 u32 index_reg = 0x98 + 0x100 * dct;
3393 // FIXME: skip for Ax
3395 dev = pDCTstat->dev_dct;
3397 /* Tri-state unused ODTs when motherboard termination is available */
3398 max_dimms = (u8) mctGet_NVbits(NV_MAX_DIMMS);
3399 odt = 0x0F; /* tristate all the pins then clear the used ones. */
3401 for (cs = 0; cs < 8; cs += 2) {
3402 if (pDCTstat->CSPresent & (1 << cs)) {
3403 odt &= ~(1 << (cs / 2));
3405 /* if quad-rank capable platform clear adtitional pins */
3406 if (max_dimms != MAX_CS_SUPPORTED) {
3407 if (pDCTstat->CSPresent & (1 << (cs + 1)))
3408 odt &= ~(4 << (cs / 2));
3414 val = Get_NB32_index_wait(dev, index_reg, index);
3416 Set_NB32_index_wait(dev, index_reg, index, val);
3421 static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
3422 struct DCTStatStruc *pDCTstat, u8 dct)
3425 u32 index_reg = 0x98 + 0x100 * dct;
3426 u32 dev = pDCTstat->dev_dct;
3432 val = Get_NB32_index_wait(dev, index_reg, 0x00);
3434 for (i=0; i < 6; i++) {
3438 p = Table_Comp_Rise_Slew_15x;
3439 valx = p[(val >> 16) & 3];
3443 p = Table_Comp_Fall_Slew_15x;
3444 valx = p[(val >> 16) & 3];
3447 p = Table_Comp_Rise_Slew_20x;
3448 valx = p[(val >> 8) & 3];
3451 p = Table_Comp_Fall_Slew_20x;
3452 valx = p[(val >> 8) & 3];
3456 dword |= valx << (5 * i);
3459 /* Override/Exception */
3460 if (!pDCTstat->GangedMode) {
3461 i = 0; /* use i for the dct setting required */
3462 if (pDCTstat->MAdimms[0] < 4)
3464 if (((pDCTstat->Speed == 2) || (pDCTstat->Speed == 3)) && (pDCTstat->MAdimms[i] == 4))
3465 dword &= 0xF18FFF18;
3466 index_reg = 0x98; /* force dct = 0 */
3469 Set_NB32_index_wait(dev, index_reg, 0x0a, dword);
3473 static void WaitRoutine_D(u32 time)
3482 static void mct_EarlyArbEn_D(struct MCTStatStruc *pMCTstat,
3483 struct DCTStatStruc *pDCTstat)
3487 u32 dev = pDCTstat->dev_dct;
3489 /* GhEnhancement #18429 modified by askar: For low NB CLK :
3490 * Memclk ratio, the DCT may need to arbitrate early to avoid
3491 * unnecessary bubbles.
3492 * bit 19 of F2x[1,0]78 Dram Control Register, set this bit only when
3493 * NB CLK : Memclk ratio is between 3:1 (inclusive) to 4:5 (inclusive)
3497 val = Get_NB32(dev, reg);
3499 //FIXME: check for Cx
3500 if (CheckNBCOFEarlyArbEn(pMCTstat, pDCTstat))
3501 val |= (1 << EarlyArbEn);
3503 Set_NB32(dev, reg, val);
3508 static u8 CheckNBCOFEarlyArbEn(struct MCTStatStruc *pMCTstat,
3509 struct DCTStatStruc *pDCTstat)
3515 u32 dev = pDCTstat->dev_dct;
3519 /* Check if NB COF >= 4*Memclk, if it is not, return a fatal error
3522 /* 3*(Fn2xD4[NBFid]+4)/(2^NbDid)/(3+Fn2x94[MemClkFreq]) */
3523 _RDMSR(0xC0010071, &lo, &hi);
3529 val = Get_NB32(dev, reg);
3530 if (!(val & (1 << MemClkFreqVal)))
3531 val = Get_NB32(dev, reg * 0x100); /* get the DCT1 value */
3539 dev = pDCTstat->dev_nbmisc;
3541 val = Get_NB32(dev, reg);
3549 // Yes this could be nicer but this was how the asm was....
3550 if (val < 3) { /* NClk:MemClk < 3:1 */
3552 } else if (val > 4) { /* NClk:MemClk >= 5:1 */
3554 } else if ((val == 4) && (rem > tmp)) { /* NClk:MemClk > 4.5:1 */
3557 return 1; /* 3:1 <= NClk:MemClk <= 4.5:1*/
3562 static void mct_ResetDataStruct_D(struct MCTStatStruc *pMCTstat,
3563 struct DCTStatStruc *pDCTstatA)
3567 struct DCTStatStruc *pDCTstat;
3570 u16 host_serv1, host_serv2;
3572 /* Initialize Data structures by clearing all entries to 0 */
3573 p = (u8 *) pMCTstat;
3574 for (i = 0; i < sizeof(struct MCTStatStruc); i++) {
3578 for (Node = 0; Node < 8; Node++) {
3579 pDCTstat = pDCTstatA + Node;
3580 host_serv1 = pDCTstat->HostBiosSrvc1;
3581 host_serv2 = pDCTstat->HostBiosSrvc2;
3583 p = (u8 *) pDCTstat;
3585 stop = (u32)(&((struct DCTStatStruc *)0)->CH_MaxRdLat[2]);
3586 for (i = start; i < stop ; i++) {
3590 start = (u32)(&((struct DCTStatStruc *)0)->CH_D_BC_RCVRDLY[2][4]);
3591 stop = sizeof(struct DCTStatStruc);
3592 for (i = start; i < stop; i++) {
3595 pDCTstat->HostBiosSrvc1 = host_serv1;
3596 pDCTstat->HostBiosSrvc2 = host_serv2;
3601 static void mct_BeforeDramInit_Prod_D(struct MCTStatStruc *pMCTstat,
3602 struct DCTStatStruc *pDCTstat)
3606 u32 dev = pDCTstat->dev_dct;
3608 // FIXME: skip for Ax
3609 if ((pDCTstat->Speed == 3) || ( pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533Mhz
3610 for (i=0; i < 2; i++) {
3611 reg_off = 0x100 * i;
3612 Set_NB32(dev, 0x98 + reg_off, 0x0D000030);
3613 Set_NB32(dev, 0x9C + reg_off, 0x00000806);
3614 Set_NB32(dev, 0x98 + reg_off, 0x4D040F30);
3620 static void mct_AdjustDelayRange_D(struct MCTStatStruc *pMCTstat,
3621 struct DCTStatStruc *pDCTstat, u8 *dqs_pos)
3623 // FIXME: Skip for Ax
3624 if ((pDCTstat->Speed == 3) || ( pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533Mhz
3630 void mct_SetClToNB_D(struct MCTStatStruc *pMCTstat,
3631 struct DCTStatStruc *pDCTstat)
3636 // FIXME: Maybe check the CPUID? - not for now.
3637 // pDCTstat->LogicalCPUID;
3640 _RDMSR(msr, &lo, &hi);
3641 lo |= 1 << ClLinesToNbDis;
3642 _WRMSR(msr, lo, hi);
3646 void mct_ClrClToNB_D(struct MCTStatStruc *pMCTstat,
3647 struct DCTStatStruc *pDCTstat)
3653 // FIXME: Maybe check the CPUID? - not for now.
3654 // pDCTstat->LogicalCPUID;
3657 _RDMSR(msr, &lo, &hi);
3658 if (!pDCTstat->ClToNB_flag)
3659 lo &= ~(1<<ClLinesToNbDis);
3660 _WRMSR(msr, lo, hi);
3665 void mct_SetWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
3666 struct DCTStatStruc *pDCTstat)
3671 // FIXME: Maybe check the CPUID? - not for now.
3672 // pDCTstat->LogicalCPUID;
3675 _RDMSR(msr, &lo, &hi);
3676 hi |= (1 << WbEnhWsbDis_D);
3677 _WRMSR(msr, lo, hi);
3681 void mct_ClrWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
3682 struct DCTStatStruc *pDCTstat)
3687 // FIXME: Maybe check the CPUID? - not for now.
3688 // pDCTstat->LogicalCPUID;
3691 _RDMSR(msr, &lo, &hi);
3692 hi &= ~(1 << WbEnhWsbDis_D);
3693 _WRMSR(msr, lo, hi);
3697 void mct_SetDramConfigHi_D(struct DCTStatStruc *pDCTstat, u32 dct,
3700 /* Bug#15114: Comp. update interrupted by Freq. change can cause
3701 * subsequent update to be invalid during any MemClk frequency change:
3702 * Solution: From the bug report:
3703 * 1. A software-initiated frequency change should be wrapped into the
3704 * following sequence :
3705 * - a) Disable Compensation (F2[1, 0]9C_x08[30] )
3706 * b) Reset the Begin Compensation bit (D3CMP->COMP_CONFIG[0]) in all the compensation engines
3707 * c) Do frequency change
3708 * d) Enable Compensation (F2[1, 0]9C_x08[30] )
3709 * 2. A software-initiated Disable Compensation should always be
3710 * followed by step b) of the above steps.
3711 * Silicon Status: Fixed In Rev B0
3713 * Errata#177: DRAM Phy Automatic Compensation Updates May Be Invalid
3714 * Solution: BIOS should disable the phy automatic compensation prior
3715 * to initiating a memory clock frequency change as follows:
3716 * 1. Disable PhyAutoComp by writing 1'b1 to F2x[1, 0]9C_x08[30]
3717 * 2. Reset the Begin Compensation bits by writing 32'h0 to
3718 * F2x[1, 0]9C_x4D004F00
3719 * 3. Perform frequency change
3720 * 4. Enable PhyAutoComp by writing 1'b0 to F2x[1, 0]9C_08[30]
3721 * In addition, any time software disables the automatic phy
3722 * compensation it should reset the begin compensation bit per step 2.
3723 * Silicon Status: Fixed in DR-B0
3726 u32 dev = pDCTstat->dev_dct;
3727 u32 index_reg = 0x98 + 0x100 * dct;
3733 val = Get_NB32_index_wait(dev, index_reg, index);
3734 Set_NB32_index_wait(dev, index_reg, index, val | (1 << DisAutoComp));
3736 //FIXME: check for Bx Cx CPU
3737 // if Ax mct_SetDramConfigHi_Samp_D
3740 index = 0x4D014F00; /* F2x[1, 0]9C_x[D0FFFFF:D000000] DRAM Phy Debug Registers */
3741 index |= 1 << DctAccessWrite;
3743 Set_NB32_index_wait(dev, index_reg, index, val);
3745 Set_NB32(dev, 0x94 + 0x100 * dct, DramConfigHi);
3748 val = Get_NB32_index_wait(dev, index_reg, index);
3749 Set_NB32_index_wait(dev, index_reg, index, val & (~(1 << DisAutoComp)));
3752 static void mct_BeforeDQSTrain_D(struct MCTStatStruc *pMCTstat,
3753 struct DCTStatStruc *pDCTstatA)
3756 struct DCTStatStruc *pDCTstat;
3760 * Bug#15115: Uncertainty In The Sync Chain Leads To Setup Violations
3762 * Solution: BIOS should program DRAM Control Register[RdPtrInit] =
3763 * 5h, (F2x[1, 0]78[3:0] = 5h).
3764 * Silicon Status: Fixed In Rev B0
3766 * Bug#15880: Determine validity of reset settings for DDR PHY timing.
3767 * Solutiuon: At least, set WrDqs fine delay to be 0 for DDR2 training.
3770 for (Node = 0; Node < 8; Node++) {
3771 pDCTstat = pDCTstatA + Node;
3773 if (pDCTstat->NodePresent)
3774 mct_BeforeDQSTrain_Samp_D(pMCTstat, pDCTstat);
3775 mct_ResetDLL_D(pMCTstat, pDCTstat, 0);
3776 mct_ResetDLL_D(pMCTstat, pDCTstat, 1);
3781 static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat,
3782 struct DCTStatStruc *pDCTstat, u8 dct)
3785 u32 dev = pDCTstat->dev_dct;
3786 u32 reg_off = 0x100 * dct;
3792 /* Skip reset DLL for B3 */
3793 if (pDCTstat->LogicalCPUID & AMD_DR_B3) {
3798 _RDMSR(addr, &lo, &hi);
3799 if(lo & (1<<17)) { /* save the old value */
3802 lo |= (1<<17); /* HWCR.wrap32dis */
3803 lo &= ~(1<<15); /* SSEDIS */
3804 /* Setting wrap32dis allows 64-bit memory references in 32bit mode */
3805 _WRMSR(addr, lo, hi);
3808 pDCTstat->Channel = dct;
3809 Receiver = mct_InitReceiver_D(pDCTstat, dct);
3810 /* there are four receiver pairs, loosely associated with chipselects.*/
3811 for (; Receiver < 8; Receiver += 2) {
3812 if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, dct, Receiver)) {
3813 addr = mct_GetRcvrSysAddr_D(pMCTstat, pDCTstat, dct, Receiver, &valid);
3815 mct_Read1LTestPattern_D(pMCTstat, pDCTstat, addr); /* cache fills */
3817 /* Write 0000_8000h to register F2x[1,0]9C_xD080F0C */
3818 Set_NB32_index_wait(dev, 0x98 + reg_off, 0x4D080F0C, 0x00008000);
3819 mct_Wait(80); /* wait >= 300ns */
3821 /* Write 0000_0000h to register F2x[1,0]9C_xD080F0C */
3822 Set_NB32_index_wait(dev, 0x98 + reg_off, 0x4D080F0C, 0x00000000);
3823 mct_Wait(800); /* wait >= 2us */
3830 _RDMSR(addr, &lo, &hi);
3831 lo &= ~(1<<17); /* restore HWCR.wrap32dis */
3832 _WRMSR(addr, lo, hi);
3837 static void mct_EnableDatIntlv_D(struct MCTStatStruc *pMCTstat,
3838 struct DCTStatStruc *pDCTstat)
3840 u32 dev = pDCTstat->dev_dct;
3843 /* Enable F2x110[DctDatIntlv] */
3844 // Call back not required mctHookBeforeDatIntlv_D()
3845 // FIXME Skip for Ax
3846 if (!pDCTstat->GangedMode) {
3847 val = Get_NB32(dev, 0x110);
3848 val |= 1 << 5; // DctDatIntlv
3849 Set_NB32(dev, 0x110, val);
3851 // FIXME Skip for Cx
3852 dev = pDCTstat->dev_nbmisc;
3853 val = Get_NB32(dev, 0x8C); // NB Configuration Hi
3854 val |= 1 << (36-32); // DisDatMask
3855 Set_NB32(dev, 0x8C, val);
3860 static void mct_SetupSync_D(struct MCTStatStruc *pMCTstat,
3861 struct DCTStatStruc *pDCTstat)
3863 /* set F2x78[ChSetupSync] when F2x[1, 0]9C_x04[AddrCmdSetup, CsOdtSetup,
3864 * CkeSetup] setups for one DCT are all 0s and at least one of the setups,
3865 * F2x[1, 0]9C_x04[AddrCmdSetup, CsOdtSetup, CkeSetup], of the other
3869 u32 dev = pDCTstat->dev_dct;
3872 cha = pDCTstat->CH_ADDR_TMG[0] & 0x0202020;
3873 chb = pDCTstat->CH_ADDR_TMG[1] & 0x0202020;
3875 if ((cha != chb) && ((cha == 0) || (chb == 0))) {
3876 val = Get_NB32(dev, 0x78);
3878 Set_NB32(dev, 0x78, val);
3883 static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct) {
3886 u32 reg_off = 0x100 * dct;
3887 u32 dev = pDCTstat->dev_dct;
3889 if (pDCTstat->LogicalCPUID & (AMD_DR_B2 | AMD_DR_B3)) {
3890 mct_Wait(10000); /* Wait 50 us*/
3891 val = Get_NB32(dev, 0x110);
3892 if ( val & (1 << DramEnabled)) {
3893 /* If 50 us expires while DramEnable =0 then do the following */
3894 val = Get_NB32(dev, 0x90 + reg_off);
3895 val &= ~(1 << Width128); /* Program Width128 = 0 */
3896 Set_NB32(dev, 0x90 + reg_off, val);
3898 val = Get_NB32_index_wait(dev, 0x98 + reg_off, 0x05); /* Perform dummy CSR read to F2x09C_x05 */
3900 if (pDCTstat->GangedMode) {
3901 val = Get_NB32(dev, 0x90 + reg_off);
3902 val |= 1 << Width128; /* Program Width128 = 0 */
3903 Set_NB32(dev, 0x90 + reg_off, val);
3910 /* ==========================================================
3911 * 6-bit Bank Addressing Table
3914 * CCC=Columns-9 binary
3915 * ==========================================================
3916 * DCT CCCBRR Rows Banks Columns 64-bit CS Size
3918 * 0000 000000 13 2 9 128MB
3919 * 0001 001000 13 2 10 256MB
3920 * 0010 001001 14 2 10 512MB
3921 * 0011 010000 13 2 11 512MB
3922 * 0100 001100 13 3 10 512MB
3923 * 0101 001101 14 3 10 1GB
3924 * 0110 010001 14 2 11 1GB
3925 * 0111 001110 15 3 10 2GB
3926 * 1000 010101 14 3 11 2GB
3927 * 1001 010110 15 3 11 4GB
3928 * 1010 001111 16 3 10 4GB
3929 * 1011 010111 16 3 11 8GB