* ordinarily in 64-bit mode.
*
* Trc precision does not use extra Jedec defined fractional component.
- * InsteadTrc (course) is rounded up to nearest 1 ns.
+ * Instead Trc (course) is rounded up to nearest 1 ns.
*
* Mini and Micro DIMM not supported. Only RDIMM, UDIMM, SO-DIMM defined types
* supported.
struct DCTStatStruc *pDCTstatA);
static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstatA);
-static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
- struct DCTStatStruc *pDCTstatA);
static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstatA);
static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct);
static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat, u8 dct);
-
+static u32 mct_DisDllShutdownSR(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u32 DramConfigLo, u8 dct);
+static void mct_EnDllShutdownSR(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
/*See mctAutoInitMCT header for index relationships to CL and T*/
static const u16 Table_F_k[] = {00,200,266,333,400,533 };
static const u8 Table_Comp_Fall_Slew_20x[] = {7, 5, 3, 2, 0xFF};
static const u8 Table_Comp_Fall_Slew_15x[] = {7, 7, 5, 3, 0xFF};
-void mctAutoInitMCT_D(struct MCTStatStruc *pMCTstat,
+static void mctAutoInitMCT_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstatA)
{
/*
* on setup options). It is the responsibility of PCI subsystem to
* create an uncacheable IO region below 4GB and to adjust TOP_MEM
* downward prior to any IO mapping or accesses. It is the same
- * responsibility of the CPU sub-system prior toaccessing LAPIC.
+ * responsibility of the CPU sub-system prior to accessing LAPIC.
*
* Slot Number is an external convention, and is determined by OEM with
* accompanying silk screening. OEM may choose to use Slot number
}
mct_FinalMCT_D(pMCTstat, (pDCTstatA + 0) ); // Node 0
- print_t("All Done\n");
+ print_tx("mctAutoInitMCT_D Done: Global Status: ", pMCTstat->GStatus);
return;
fatalexit:
nv_DQSTrainCTL = 1;
print_t("DQSTiming_D: mct_BeforeDQSTrain_D:\n");
- mct_BeforeDQSTrain_D(pMCTstat, pDCTstatA);;
+ mct_BeforeDQSTrain_D(pMCTstat, pDCTstatA);
phyAssistedMemFnceTraining(pMCTstat, pDCTstatA);
if (nv_DQSTrainCTL) {
- mctHookBeforeAnyTraining();
+ mctHookBeforeAnyTraining(pMCTstat, pDCTstatA);
print_t("DQSTiming_D: TrainReceiverEn_D FirstPass:\n");
TrainReceiverEn_D(pMCTstat, pDCTstatA, FirstPass);
}
}
-
+#ifdef UNUSED_CODE
+static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA);
static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstatA)
{
}
}
}
-
+#endif
static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstatA)
base += NextBase;
limit += NextBase;
DramSelBaseAddr += NextBase;
- printk_debug(" Node: %02x base: %02x limit: %02x BottomIO: %02x\n", Node, base, limit, BottomIO);
+ printk(BIOS_DEBUG, " Node: %02x base: %02x limit: %02x BottomIO: %02x\n", Node, base, limit, BottomIO);
if (_MemHoleRemap) {
if ((base < BottomIO) && (limit >= BottomIO)) {
devx = pDCTstat->dev_map;
if (pDCTstat->NodePresent) {
- printk_debug(" Copy dram map from Node 0 to Node %02x \n", Node);
+ printk(BIOS_DEBUG, " Copy dram map from Node 0 to Node %02x \n", Node);
reg = 0x40; /*Dram Base 0*/
do {
val = Get_NB32(dev, reg);
{
/* Initiates a memory clear operation for all node. The mem clr
- * is done in paralel. After the memclr is complete, all processors
+ * is done in parallel. After the memclr is complete, all processors
* status are checked to ensure that memclr has completed.
*/
u8 Node;
if (val == dword) /* current nodeID = requested nodeID ? */
ret = 1;
finish:
- ;
+ ;
}
return ret;
all of the MemClkDis bits should also be set.*/
val = 0xFF000000;
Set_NB32(pDCTstat->dev_dct, reg_off+0x88, val);
+ } else {
+ mct_EnDllShutdownSR(pMCTstat, pDCTstat, dct);
}
}
* HW memory clear process that the chip is capable of. The sooner
* that dram init is set for all nodes, the faster the memory system
* initialization can complete. Thus, the init loop is unrolled into
- * two loops so as to start the processeses for non BSP nodes sooner.
+ * two loops so as to start the processes for non BSP nodes sooner.
* This procedure will not wait for the process to finish.
* Synchronization is handled elsewhere.
*/
reg = 0x78 + reg_off;
val = Get_NB32(dev, reg);
/* Setting this bit forces a 1T window with hard left
- * pass/fail edge and a probabalistic right pass/fail
+ * pass/fail edge and a probabilistic right pass/fail
* edge. LEFT edge is referenced for final
* receiver enable position.*/
val |= 1 << DqsRcvEnTrain;
if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 2)
pDCTstat->Speed = mctGet_NVbits(NV_MemCkVal) + 1;
- mct_AfterGetCLT(pMCTstat, pDCTstat, dct);
}
+ mct_AfterGetCLT(pMCTstat, pDCTstat, dct);
/* Gather all DIMM mini-max values for cycle timing data */
Rows = 0;
for ( i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
LDIMM = i >> 1;
if (pDCTstat->DIMMValid & (1 << i)) {
- smbaddr = Get_DIMMAddress_D(pDCTstat, i);
+ smbaddr = Get_DIMMAddress_D(pDCTstat, dct + i);
byte = mctRead_SPD(smbaddr, SPD_ROWSZ);
if (Rows < byte)
Rows = byte; /* keep track of largest row sz */
} else {
byte = mctRead_SPD(smbaddr, SPD_TRCRFC);
if (byte & 0xF0) {
- val++; /* round up in case fractional extention is non-zero.*/
+ val++; /* round up in case fractional extension is non-zero.*/
}
- }
- if (Trc < val)
- Trc = val;
-
- /* dev density=rank size/#devs per rank */
- byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
-
- val = ((byte >> 5) | (byte << 3)) & 0xFF;
- val <<= 2;
-
- byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE; /* dev density=2^(rows+columns+banks) */
- if (byte == 4) {
- val >>= 4;
- } else if (byte == 8) {
- val >>= 3;
- } else if (byte == 16) {
- val >>= 2;
- }
+ }
+ if (Trc < val)
+ Trc = val;
- byte = bsr(val);
+ /* dev density=rank size/#devs per rank */
+ byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
+
+ val = ((byte >> 5) | (byte << 3)) & 0xFF;
+ val <<= 2;
+
+ byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE; /* dev density=2^(rows+columns+banks) */
+ if (byte == 4) {
+ val >>= 4;
+ } else if (byte == 8) {
+ val >>= 3;
+ } else if (byte == 16) {
+ val >>= 2;
+ }
- if (Trfc[LDIMM] < byte)
- Trfc[LDIMM] = byte;
+ byte = bsr(val);
- byte = mctRead_SPD(smbaddr, SPD_TRAS);
- if (Tras < byte)
- Tras = byte;
+ if (Trfc[LDIMM] < byte)
+ Trfc[LDIMM] = byte;
+
+ byte = mctRead_SPD(smbaddr, SPD_TRAS);
+ if (Tras < byte)
+ Tras = byte;
} /* Dimm Present */
}
}
pDCTstat->Trp = val;
- /*Trrd*/
+ /*Trrd*/
dword = Trrd * 10;
pDCTstat->DIMMTrrd = dword;
val = dword / Tk40;
DramConfigMisc = 0;
DramConfigMisc2 = 0;
- /* set bank addessing and Masks, plus CS pops */
+ /* set bank addressing and Masks, plus CS pops */
SPDSetBanks_D(pMCTstat, pDCTstat, dct);
if (pDCTstat->ErrCode == SC_StopError)
goto AutoConfig_exit;
}
if (!(Status & (1 << SB_Registered)))
- DramConfigLo |= 1 << UnBuffDimm; /* Unbufferd DIMMs */
+ DramConfigLo |= 1 << UnBuffDimm; /* Unbuffered DIMMs */
if (mctGet_NVbits(NV_ECC_CAP))
if (Status & (1 << SB_ECCDIMMs))
if ( mctGet_NVbits(NV_ECC))
DramConfigLo |= 1 << DimmEcEn;
-
+ DramConfigLo = mct_DisDllShutdownSR(pMCTstat, pDCTstat, DramConfigLo, dct);
/* Build Dram Config Hi Register Value */
dword = pDCTstat->Speed;
reg = 0x40 + (q << 2) + reg_off; /* Base[q] reg.*/
val = Get_NB32(dev, reg);
if (!(val & 3)) { /* (CSEnable|Spare==1)bank is enabled already? */
- reg = 0x60 + (q << 1) + reg_off; /*Mask[q] reg.*/
+ reg = 0x60 + ((q << 1) & 0xc) + reg_off; /*Mask[q] reg.*/
val = Get_NB32(dev, reg);
val >>= 19;
val++;
pDCTstat->DimmECCPresent |= 1 << i;
}
if (byte & JED_ADRCPAR) {
- /* DIMM is ECC capable */
- pDCTstat->DimmPARPresent |= 1 << i;
+ /* DIMM is ECC capable */
+ pDCTstat->DimmPARPresent |= 1 << i;
}
/* Check if x4 device */
devwidth = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE;
// FIXME: for rev A: mct_BeforeDramInit_D(pDCTstat, dct);
/* Disable auto refresh before Dram init when in ganged mode (Erratum 278) */
- if (pDCTstat->LogicalCPUID & AMD_DR_LT_B2) {
+ if (pDCTstat->LogicalCPUID & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_BA)) {
if (pDCTstat->GangedMode) {
val = Get_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct));
val |= 1 << DisAutoRefresh;
* to ensure both DCTs are in sync (Erratum 278)
*/
- if (pDCTstat->LogicalCPUID & AMD_DR_LT_B2) {
+ if (pDCTstat->LogicalCPUID & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_BA)) {
if (pDCTstat->GangedMode) {
do {
val = Get_NB32(pDCTstat->dev_dct, 0x90 + (0x100 * dct));
val = Get_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct));
val &= ~(1 << DisAutoRefresh);
+ Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
val |= 1 << DisAutoRefresh;
+ Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
val &= ~(1 << DisAutoRefresh);
+ Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
}
}
}
if (byte != bytex) {
pDCTstat->ErrStatus &= ~(1 << SB_DimmMismatchO);
} else {
- if ( mctGet_NVbits(NV_Unganged) )
- pDCTstat->ErrStatus |= (1 << SB_DimmMismatchO);
+ if ( mctGet_NVbits(NV_Unganged) )
+ pDCTstat->ErrStatus |= (1 << SB_DimmMismatchO);
if (!(pDCTstat->ErrStatus & (1 << SB_DimmMismatchO))) {
pDCTstat->GangedMode = 1;
u32 Get_NB32(u32 dev, u32 reg)
{
- u32 addr;
-
- addr = (dev>>4) | (reg & 0xFF) | ((reg & 0xf00)<<16);
- outl((1<<31) | (addr & ~3), 0xcf8);
-
- return inl(0xcfc);
+ return pci_read_config32(dev, reg);
}
void Set_NB32(u32 dev, u32 reg, u32 val)
{
- u32 addr;
-
- addr = (dev>>4) | (reg & 0xFF) | ((reg & 0xf00)<<16);
- outl((1<<31) | (addr & ~3), 0xcf8);
- outl(val, 0xcfc);
+ pci_write_config32(dev, reg, val);
}
dword = bsr(pDCTstat->DIMMValid);
if (dword != val && dword != 0) {
/*the largest WrDatGrossDlyByte of any DIMM minus the
- WrDatGrossDlyByte of any other DIMM is equal to CGDD */
+ WrDatGrossDlyByte of any other DIMM is equal to CGDD */
val = Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
}
if (val == 0)
Largest = byte;
}
}
- index += 3;
+ index += 3;
} /* while ++i */
word = Smallest;
print_t("\tmct_FinalMCT_D: Clr Cl, Wb\n");
- mct_ClrClToNB_D(pMCTstat, pDCTstat);
+ /* ClrClToNB_D postponed until we're done executing from ROM */
mct_ClrWbEnhWsbDis_D(pMCTstat, pDCTstat);
}
dev = pDCTstat->dev_map;
/* Copy dram map from F1x40/44,F1x48/4c,
- to F1x120/124(Node0),F1x120/124(Node1),...*/
+ to F1x120/124(Node0),F1x120/124(Node1),...*/
for (Node=0; Node < MAX_NODES_SUPPORTED; Node++) {
pDCTstat = pDCTstatA + Node;
devx = pDCTstat->dev_map;
}
-
+#ifdef UNUSED_CODE
static void SetCKETriState(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat, u8 dct)
{
Set_NB32_index_wait(dev, index_reg, index, val);
}
-
+#endif
static void SetODTTriState(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat, u8 dct)
u8 max_dimms;
// FIXME: skip for Ax
-
+
dev = pDCTstat->dev_dct;
/* Tri-state unused ODTs when motherboard termination is available */
if (pDCTstat->CSPresent & (1 << cs)) {
odt &= ~(1 << (cs / 2));
- /* if quad-rank capable platform clear adtitional pins */
+ /* if quad-rank capable platform clear additional pins */
if (max_dimms != MAX_CS_SUPPORTED) {
if (pDCTstat->CSPresent & (1 << (cs + 1)))
odt &= ~(4 << (cs / 2));
i = 0; /* use i for the dct setting required */
if (pDCTstat->MAdimms[0] < 4)
i = 1;
- if (((pDCTstat->Speed == 2) || (pDCTstat->Speed == 3)) && (pDCTstat->MAdimms[i] == 4))
+ if (((pDCTstat->Speed == 2) || (pDCTstat->Speed == 3)) && (pDCTstat->MAdimms[i] == 4)) {
dword &= 0xF18FFF18;
index_reg = 0x98; /* force dct = 0 */
+ }
}
Set_NB32_index_wait(dev, index_reg, 0x0a, dword);
u8 Node;
u32 i;
struct DCTStatStruc *pDCTstat;
- u16 start, stop;
+ u32 start, stop;
u8 *p;
u16 host_serv1, host_serv2;
p = (u8 *) pDCTstat;
start = 0;
- stop = ((u16) &((struct DCTStatStruc *)0)->CH_MaxRdLat[2]);
+ stop = (u32)(&((struct DCTStatStruc *)0)->CH_MaxRdLat[2]);
for (i = start; i < stop ; i++) {
p[i] = 0;
}
- start = ((u16) &((struct DCTStatStruc *)0)->CH_D_BC_RCVRDLY[2][4]);
+ start = (u32)(&((struct DCTStatStruc *)0)->CH_D_BC_RCVRDLY[2][4]);
stop = sizeof(struct DCTStatStruc);
for (i = start; i < stop; i++) {
p[i] = 0;
}
-void mct_AdjustDelayRange_D(struct MCTStatStruc *pMCTstat,
+static void mct_AdjustDelayRange_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat, u8 *dqs_pos)
{
// FIXME: Skip for Ax
}
}
+static u32 mct_DisDllShutdownSR(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u32 DramConfigLo, u8 dct)
+{
+ u32 reg_off = 0x100 * dct;
+ u32 dev = pDCTstat->dev_dct;
+
+ /* Write 0000_07D0h to register F2x[1, 0]98_x4D0FE006 */
+ if (pDCTstat->LogicalCPUID & (AMD_DA_C2 | AMD_RB_C3)) {
+ Set_NB32(dev, 0x9C + reg_off, 0x7D0);
+ Set_NB32(dev, 0x98 + reg_off, 0x4D0FE006);
+ Set_NB32(dev, 0x9C + reg_off, 0x190);
+ Set_NB32(dev, 0x98 + reg_off, 0x4D0FE007);
+ }
+
+ return DramConfigLo | /* DisDllShutdownSR */ 1 << 27;
+}
+
+static void mct_EnDllShutdownSR(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u32 reg_off = 0x100 * dct;
+ u32 dev = pDCTstat->dev_dct, val;
+
+ /* Write 0000_07D0h to register F2x[1, 0]98_x4D0FE006 */
+ if (pDCTstat->LogicalCPUID & (AMD_DA_C2 | AMD_RB_C3)) {
+ Set_NB32(dev, 0x9C + reg_off, 0x1C);
+ Set_NB32(dev, 0x98 + reg_off, 0x4D0FE006);
+ Set_NB32(dev, 0x9C + reg_off, 0x13D);
+ Set_NB32(dev, 0x98 + reg_off, 0x4D0FE007);
+
+ val = Get_NB32(dev, 0x90 + reg_off);
+ val &= ~(1 << 27/* DisDllShutdownSR */);
+ Set_NB32(dev, 0x90 + reg_off, val);
+ }
+}
void mct_SetClToNB_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat)
Set_NB32_index_wait(dev, index_reg, index, val | (1 << DisAutoComp));
//FIXME: check for Bx Cx CPU
- // if Ax mct_SetDramConfigHi_Samp_D
+ // if Ax mct_SetDramConfigHi_Samp_D
/* errata#177 */
index = 0x4D014F00; /* F2x[1, 0]9C_x[D0FFFFF:D000000] DRAM Phy Debug Registers */
* Silicon Status: Fixed In Rev B0
*
* Bug#15880: Determine validity of reset settings for DDR PHY timing.
- * Solutiuon: At least, set WrDqs fine delay to be 0 for DDR2 training.
+ * Solution: At least, set WrDqs fine delay to be 0 for DDR2 training.
*/
for (Node = 0; Node < 8; Node++) {
pDCTstat = pDCTstatA + Node;
- if (pDCTstat->NodePresent)
+ if (pDCTstat->NodePresent) {
mct_BeforeDQSTrain_Samp_D(pMCTstat, pDCTstat);
mct_ResetDLL_D(pMCTstat, pDCTstat, 0);
mct_ResetDLL_D(pMCTstat, pDCTstat, 1);
-
+ }
}
}
}
}
-
+#ifdef UNUSED_CODE
static void mct_SetupSync_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat)
{
Set_NB32(dev, 0x78, val);
}
}
+#endif
static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct) {