2 * This file is part of the coreboot project.
4 * Copyright (C) 2010 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 static void setSyncOnUnEccEn_D(struct MCTStatStruc *pMCTstat,
23 struct DCTStatStruc *pDCTstatA);
24 static u8 isDramECCEn_D(struct DCTStatStruc *pDCTstat);
26 /* Initialize ECC modes of Integrated Dram+Memory Controllers of a network of
27 * Hammer processors. Use Dram background scrubber to fast initialize ECC bits
32 * Order that items are set:
40 * Conditions for setting background scrubber.
42 * 2. node has dram functioning (WE=RE=1)
43 * 3. all eccdimms (or bit 17 of offset 90,fn 2)
44 * 4. no chip-select gap exists
46 * The dram background scrubber is used under very controlled circumstances to
47 * initialize all the ECC bits on the DIMMs of the entire dram address map
48 * (including hidden or lost dram and dram above 4GB). We will turn the scrub
49 * rate up to maximum, which should clear 4GB of dram in about 2.7 seconds.
50 * We will activate the scrubbers of all nodes with ecc dram and let them run in
51 * parallel, thereby reducing even further the time required to condition dram.
52 * Finally, we will go through each node and either disable background scrubber,
53 * or set the scrub rate to the user setup specified rate.
55 * To allow the NB to scrub, we need to wait a time period long enough to
56 * guarantee that the NB scrubs the entire dram on its node. Do do this, we
57 * simply sample the scrub ADDR once, for an initial value, then we sample and poll until the polled value of scrub ADDR
58 * has wrapped around at least once: Scrub ADDRi+1 < Scrub ADDRi. Since we let all
59 * Nodes run in parallel, we need to guarantee that all nodes have wrapped. To do
60 * this efficiently, we need only to sample one of the nodes, the node with the
61 * largest ammount of dram populated is the one which will take the longest amount
62 * of time (the scrub rate is set to max, the same rate, on all nodes). So,
63 * during setup of scrub Base, we determine how much memory and which node has
64 * the largest memory installed.
66 * Scrubbing should not ordinarily be enabled on a Node with a chip-select gap
67 * (aka SW memhole, cs hoisting, etc..).To init ECC memory on this node, the
68 * scrubber is used in two steps. First, the Dram Limit for the node is adjusted
69 * down to the bottom of the gap, and that ECC dram is initialized. Second, the
70 * orignal Limit is restored, the Scrub base is set to 4GB, and scrubber is
71 * allowed to run until the Scrub Addr wraps around to zero.
73 u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
92 /* Construct these booleans, based on setup options, for easy handling
93 later in this procedure */
94 OB_NBECC = mctGet_NVbits(NV_NBECC); /* MCA ECC (MCE) enable bit */
96 OB_ECCRedir = mctGet_NVbits(NV_ECCRedir); /* ECC Redirection */
98 OB_ChipKill = mctGet_NVbits(NV_ChipKill); /* ECC Chip-kill mode */
100 OF_ScrubCTL = 0; /* Scrub CTL for Dcache, L2, and dram */
101 nvbits = mctGet_NVbits(NV_DCBKScrub);
102 /* mct_AdjustScrub_D(pDCTstatA, &nvbits); */ /* Need not adjust */
103 OF_ScrubCTL |= (u32) nvbits << 16;
105 nvbits = mctGet_NVbits(NV_L2BKScrub);
106 OF_ScrubCTL |= (u32) nvbits << 8;
108 nvbits = mctGet_NVbits(NV_DramBKScrub);
109 OF_ScrubCTL |= nvbits;
113 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
114 struct DCTStatStruc *pDCTstat;
115 pDCTstat = pDCTstatA + Node;
117 if (NodePresent_D(Node)) { /*If Node is present */
118 dev = pDCTstat->dev_map;
119 reg = 0x40+(Node << 3); /* Dram Base Node 0 + index */
120 val = Get_NB32(dev, reg);
122 /* WE/RE is checked */
123 if((val & 3)==3) { /* Node has dram populated */
124 /* Negate 'all nodes/dimms ECC' flag if non ecc
126 if( pDCTstat->Status & (1<<SB_ECCDIMMs)) {
127 LDramECC = isDramECCEn_D(pDCTstat);
128 if(pDCTstat->ErrCode != SC_RunningOK) {
129 pDCTstat->Status &= ~(1 << SB_ECCDIMMs);
131 pDCTstat->ErrStatus |= (1 << SB_DramECCDis);
139 if(LDramECC) { /* if ECC is enabled on this dram */
141 mct_EnableDatIntlv_D(pMCTstat, pDCTstat);
142 dev = pDCTstat->dev_nbmisc;
143 reg =0x44; /* MCA NB Configuration */
144 val = Get_NB32(dev, reg);
145 val |= 1 << 22; /* EccEn */
146 Set_NB32(dev, reg, val);
147 DCTMemClr_Init_D(pMCTstat, pDCTstat);
150 } /* this node has ECC enabled dram */
153 } /* Node has Dram */
156 MCTMemClrSync_D(pMCTstat, pDCTstatA);
158 } /* if Node present */
162 pMCTstat->GStatus |= 1<<GSB_ECCDIMMs;
164 pMCTstat->GStatus &= ~(1<<GSB_ECCDIMMs);
166 /* Program the Dram BKScrub CTL to the proper (user selected) value.*/
168 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
169 struct DCTStatStruc *pDCTstat;
170 pDCTstat = pDCTstatA + Node;
172 if (NodePresent_D(Node)) { /* If Node is present */
173 reg = 0x40+(Node<<3); /* Dram Base Node 0 + index */
174 val = Get_NB32(pDCTstat->dev_map, reg);
175 curBase = val & 0xffff0000;
176 /*WE/RE is checked because memory config may have been */
177 if((val & 3)==3) { /* Node has dram populated */
178 if (isDramECCEn_D(pDCTstat)) { /* if ECC is enabled on this dram */
179 dev = pDCTstat->dev_nbmisc;
182 val |= (1<<0); /* enable redirection */
184 Set_NB32(dev, 0x5C, val); /* Dram Scrub Addr Low */
186 Set_NB32(dev, 0x60, val); /* Dram Scrub Addr High */
187 Set_NB32(dev, 0x58, OF_ScrubCTL); /*Scrub Control */
189 /* Divisor should not be set deeper than
190 * divide by 16 when Dcache scrubber or
191 * L2 scrubber is enabled.
193 if ((OF_ScrubCTL & (0x1F << 16)) || (OF_ScrubCTL & (0x1F << 8))) {
194 val = Get_NB32(dev, 0x84);
195 if ((val & 0xE0000000) > 0x80000000) { /* Get F3x84h[31:29]ClkDivisor for C1 */
196 val &= 0x1FFFFFFF; /* If ClkDivisor is deeper than divide-by-16 */
197 val |= 0x80000000; /* set it to divide-by-16 */
198 Set_NB32(dev, 0x84, val);
201 } /* this node has ECC enabled dram */
203 } /*if Node present */
206 if(mctGet_NVbits(NV_SyncOnUnEccEn))
207 setSyncOnUnEccEn_D(pMCTstat, pDCTstatA);
213 static void setSyncOnUnEccEn_D(struct MCTStatStruc *pMCTstat,
214 struct DCTStatStruc *pDCTstatA)
221 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
222 struct DCTStatStruc *pDCTstat;
223 pDCTstat = pDCTstatA + Node;
224 if (NodePresent_D(Node)) { /* If Node is present*/
225 reg = 0x40+(Node<<3); /* Dram Base Node 0 + index*/
226 val = Get_NB32(pDCTstat->dev_map, reg);
227 /*WE/RE is checked because memory config may have been*/
228 if((val & 3)==3) { /* Node has dram populated*/
229 if( isDramECCEn_D(pDCTstat)) {
230 /*if ECC is enabled on this dram*/
231 dev = pDCTstat->dev_nbmisc;
232 reg = 0x44; /* MCA NB Configuration*/
233 val = Get_NB32(dev, reg);
234 val |= (1<<SyncOnUcEccEn);
235 Set_NB32(dev, reg, val);
238 } /* if Node present*/
242 static u8 isDramECCEn_D(struct DCTStatStruc *pDCTstat)
247 u32 dev = pDCTstat->dev_dct;
251 if(pDCTstat->GangedMode) {
256 for(i=0; i<ch_end; i++) {
257 if(pDCTstat->DIMMValidDCT[i] > 0){
258 reg = 0x90 + i * 0x100; /* Dram Config Low */
259 val = Get_NB32(dev, reg);
260 if(val & (1<<DimmEcEn)) {
261 /* set local flag 'dram ecc capable' */