1 #include <cpu/k8/mtrr.h>
5 #define DRAM_CSBASE 0x40
6 #define DRAM_CSMASK 0x60
7 #define DRAM_BANK_ADDR_MAP 0x80
8 #define DRAM_TIMING_LOW 0x88
9 #define DTL_TCL_SHIFT 0
10 #define DTL_TCL_MASK 0x7
14 #define DTL_TRC_SHIFT 4
15 #define DTL_TRC_MASK 0xf
16 #define DTL_TRC_BASE 7
18 #define DTL_TRC_MAX 22
19 #define DTL_TRFC_SHIFT 8
20 #define DTL_TRFC_MASK 0xf
21 #define DTL_TRFC_BASE 9
22 #define DTL_TRFC_MIN 9
23 #define DTL_TRFC_MAX 24
24 #define DTL_TRCD_SHIFT 12
25 #define DTL_TRCD_MASK 0x7
26 #define DTL_TRCD_BASE 0
27 #define DTL_TRCD_MIN 2
28 #define DTL_TRCD_MAX 6
29 #define DTL_TRRD_SHIFT 16
30 #define DTL_TRRD_MASK 0x7
31 #define DTL_TRRD_BASE 0
32 #define DTL_TRRD_MIN 2
33 #define DTL_TRRD_MAX 4
34 #define DTL_TRAS_SHIFT 20
35 #define DTL_TRAS_MASK 0xf
36 #define DTL_TRAS_BASE 0
37 #define DTL_TRAS_MIN 5
38 #define DTL_TRAS_MAX 15
39 #define DTL_TRP_SHIFT 24
40 #define DTL_TRP_MASK 0x7
41 #define DTL_TRP_BASE 0
44 #define DTL_TWR_SHIFT 28
45 #define DTL_TWR_MASK 0x1
46 #define DTL_TWR_BASE 2
49 #define DRAM_TIMING_HIGH 0x8c
50 #define DTH_TWTR_SHIFT 0
51 #define DTH_TWTR_MASK 0x1
52 #define DTH_TWTR_BASE 1
53 #define DTH_TWTR_MIN 1
54 #define DTH_TWTR_MAX 2
55 #define DTH_TRWT_SHIFT 4
56 #define DTH_TRWT_MASK 0x7
57 #define DTH_TRWT_BASE 1
58 #define DTH_TRWT_MIN 1
59 #define DTH_TRWT_MAX 6
60 #define DTH_TREF_SHIFT 8
61 #define DTH_TREF_MASK 0x1f
62 #define DTH_TREF_100MHZ_4K 0x00
63 #define DTH_TREF_133MHZ_4K 0x01
64 #define DTH_TREF_166MHZ_4K 0x02
65 #define DTH_TREF_200MHZ_4K 0x03
66 #define DTH_TREF_100MHZ_8K 0x08
67 #define DTH_TREF_133MHZ_8K 0x09
68 #define DTH_TREF_166MHZ_8K 0x0A
69 #define DTH_TREF_200MHZ_8K 0x0B
70 #define DTH_TWCL_SHIFT 20
71 #define DTH_TWCL_MASK 0x7
72 #define DTH_TWCL_BASE 1
73 #define DTH_TWCL_MIN 1
74 #define DTH_TWCL_MAX 2
75 #define DRAM_CONFIG_LOW 0x90
76 #define DCL_DLL_Disable (1<<0)
77 #define DCL_D_DRV (1<<1)
78 #define DCL_QFC_EN (1<<2)
79 #define DCL_DisDqsHys (1<<3)
80 #define DCL_DramInit (1<<8)
81 #define DCL_DramEnable (1<<10)
82 #define DCL_MemClrStatus (1<<11)
83 #define DCL_ESR (1<<12)
84 #define DCL_SRS (1<<13)
85 #define DCL_128BitEn (1<<16)
86 #define DCL_DimmEccEn (1<<17)
87 #define DCL_UnBufDimm (1<<18)
88 #define DCL_32ByteEn (1<<19)
89 #define DCL_x4DIMM_SHIFT 20
90 #define DRAM_CONFIG_HIGH 0x94
91 #define DCH_ASYNC_LAT_SHIFT 0
92 #define DCH_ASYNC_LAT_MASK 0xf
93 #define DCH_ASYNC_LAT_BASE 0
94 #define DCH_ASYNC_LAT_MIN 0
95 #define DCH_ASYNC_LAT_MAX 15
96 #define DCH_RDPREAMBLE_SHIFT 8
97 #define DCH_RDPREAMBLE_MASK 0xf
98 #define DCH_RDPREAMBLE_BASE ((2<<1)+0) /* 2.0 ns */
99 #define DCH_RDPREAMBLE_MIN ((2<<1)+0) /* 2.0 ns */
100 #define DCH_RDPREAMBLE_MAX ((9<<1)+1) /* 9.5 ns */
101 #define DCH_IDLE_LIMIT_SHIFT 16
102 #define DCH_IDLE_LIMIT_MASK 0x7
103 #define DCH_IDLE_LIMIT_0 0
104 #define DCH_IDLE_LIMIT_4 1
105 #define DCH_IDLE_LIMIT_8 2
106 #define DCH_IDLE_LIMIT_16 3
107 #define DCH_IDLE_LIMIT_32 4
108 #define DCH_IDLE_LIMIT_64 5
109 #define DCH_IDLE_LIMIT_128 6
110 #define DCH_IDLE_LIMIT_256 7
111 #define DCH_DYN_IDLE_CTR_EN (1 << 19)
112 #define DCH_MEMCLK_SHIFT 20
113 #define DCH_MEMCLK_MASK 0x7
114 #define DCH_MEMCLK_100MHZ 0
115 #define DCH_MEMCLK_133MHZ 2
116 #define DCH_MEMCLK_166MHZ 5
117 #define DCH_MEMCLK_200MHZ 7
118 #define DCH_MEMCLK_VALID (1 << 25)
119 #define DCH_MEMCLK_EN0 (1 << 26)
120 #define DCH_MEMCLK_EN1 (1 << 27)
121 #define DCH_MEMCLK_EN2 (1 << 28)
122 #define DCH_MEMCLK_EN3 (1 << 29)
125 #define SCRUB_CONTROL 0x58
129 #define SCRUB_160ns 3
130 #define SCRUB_320ns 4
131 #define SCRUB_640ns 5
132 #define SCRUB_1_28us 6
133 #define SCRUB_2_56us 7
134 #define SCRUB_5_12us 8
135 #define SCRUB_10_2us 9
136 #define SCRUB_20_5us 10
137 #define SCRUB_41_0us 11
138 #define SCRUB_81_9us 12
139 #define SCRUB_163_8us 13
140 #define SCRUB_327_7us 14
141 #define SCRUB_655_4us 15
142 #define SCRUB_1_31ms 16
143 #define SCRUB_2_62ms 17
144 #define SCRUB_5_24ms 18
145 #define SCRUB_10_49ms 19
146 #define SCRUB_20_97ms 20
147 #define SCRUB_42ms 21
148 #define SCRUB_84ms 22
149 #define SC_DRAM_SCRUB_RATE_SHFIT 0
150 #define SC_DRAM_SCRUB_RATE_MASK 0x1f
151 #define SC_L2_SCRUB_RATE_SHIFT 8
152 #define SC_L2_SCRUB_RATE_MASK 0x1f
153 #define SC_L1D_SCRUB_RATE_SHIFT 16
154 #define SC_L1D_SCRUB_RATE_MASK 0x1f
155 #define SCRUB_ADDR_LOW 0x5C
156 #define SCRUB_ADDR_HIGH 0x60
157 #define NORTHBRIDGE_CAP 0xE8
158 #define NBCAP_128Bit 0x0001
159 #define NBCAP_MP 0x0002
160 #define NBCAP_BIG_MP 0x0004
161 #define NBCAP_ECC 0x0004
162 #define NBCAP_CHIPKILL_ECC 0x0010
163 #define NBCAP_MEMCLK_SHIFT 5
164 #define NBCAP_MEMCLK_MASK 3
165 #define NBCAP_MEMCLK_100MHZ 3
166 #define NBCAP_MEMCLK_133MHZ 2
167 #define NBCAP_MEMCLK_166MHZ 1
168 #define NBCAP_MEMCLK_200MHZ 0
169 #define NBCAP_MEMCTRL 0x0100
172 static void setup_resource_map(const unsigned int *register_values, int max)
175 print_debug("setting up resource map....\r\n");
176 for(i = 0; i < max; i += 3) {
181 print_debug_hex32(register_values[i]);
183 print_debug_hex32(register_values[i+2]);
186 dev = register_values[i] & ~0xff;
187 where = register_values[i] & 0xff;
188 reg = pci_read_config32(dev, where);
189 reg &= register_values[i+1];
190 reg |= register_values[i+2];
191 pci_write_config32(dev, where, reg);
193 reg = pci_read_config32(register_values[i]);
194 reg &= register_values[i+1];
195 reg |= register_values[i+2] & ~register_values[i+1];
196 pci_write_config32(register_values[i], reg);
199 print_debug("done.\r\n");
202 static void setup_default_resource_map(void)
204 static const unsigned int register_values[] = {
205 /* Careful set limit registers before base registers which contain the enables */
206 /* DRAM Limit i Registers
215 * [ 2: 0] Destination Node ID
225 * [10: 8] Interleave select
226 * specifies the values of A[14:12] to use with interleave enable.
228 * [31:16] DRAM Limit Address i Bits 39-24
229 * This field defines the upper address bits of a 40 bit address
230 * that define the end of the DRAM region.
232 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
233 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
234 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
235 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
236 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
237 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
238 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
239 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
240 /* DRAM Base i Registers
249 * [ 0: 0] Read Enable
252 * [ 1: 1] Write Enable
253 * 0 = Writes Disabled
256 * [10: 8] Interleave Enable
257 * 000 = No interleave
258 * 001 = Interleave on A[12] (2 nodes)
260 * 011 = Interleave on A[12] and A[14] (4 nodes)
264 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
266 * [13:16] DRAM Base Address i Bits 39-24
267 * This field defines the upper address bits of a 40-bit address
268 * that define the start of the DRAM region.
270 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
271 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
272 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
273 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
274 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
275 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
276 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
277 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
279 /* Memory-Mapped I/O Limit i Registers
288 * [ 2: 0] Destination Node ID
298 * [ 5: 4] Destination Link ID
305 * 0 = CPU writes may be posted
306 * 1 = CPU writes must be non-posted
307 * [31: 8] Memory-Mapped I/O Limit Address i (39-16)
308 * This field defines the upp adddress bits of a 40-bit address that
309 * defines the end of a memory-mapped I/O region n
311 PCI_ADDR(0, 0x18, 1, 0x84), 0x00000048, 0x00000000,
312 PCI_ADDR(0, 0x18, 1, 0x8C), 0x00000048, 0x00000000,
313 PCI_ADDR(0, 0x18, 1, 0x94), 0x00000048, 0x00000000,
314 PCI_ADDR(0, 0x18, 1, 0x9C), 0x00000048, 0x00000000,
315 PCI_ADDR(0, 0x18, 1, 0xA4), 0x00000048, 0x00000000,
316 PCI_ADDR(0, 0x18, 1, 0xAC), 0x00000048, 0x00000000,
317 PCI_ADDR(0, 0x18, 1, 0xB4), 0x00000048, 0x00000000,
318 PCI_ADDR(0, 0x18, 1, 0xBC), 0x00000048, 0x00ffff00,
320 /* Memory-Mapped I/O Base i Registers
329 * [ 0: 0] Read Enable
332 * [ 1: 1] Write Enable
333 * 0 = Writes disabled
335 * [ 2: 2] Cpu Disable
336 * 0 = Cpu can use this I/O range
337 * 1 = Cpu requests do not use this I/O range
339 * 0 = base/limit registers i are read/write
340 * 1 = base/limit registers i are read-only
342 * [31: 8] Memory-Mapped I/O Base Address i (39-16)
343 * This field defines the upper address bits of a 40bit address
344 * that defines the start of memory-mapped I/O region i
346 PCI_ADDR(0, 0x18, 1, 0x80), 0x000000f0, 0x00000000,
347 PCI_ADDR(0, 0x18, 1, 0x88), 0x000000f0, 0x00000000,
348 PCI_ADDR(0, 0x18, 1, 0x90), 0x000000f0, 0x00000000,
349 PCI_ADDR(0, 0x18, 1, 0x98), 0x000000f0, 0x00000000,
350 PCI_ADDR(0, 0x18, 1, 0xA0), 0x000000f0, 0x00000000,
351 PCI_ADDR(0, 0x18, 1, 0xA8), 0x000000f0, 0x00000000,
352 PCI_ADDR(0, 0x18, 1, 0xB0), 0x000000f0, 0x00000000,
353 PCI_ADDR(0, 0x18, 1, 0xB8), 0x000000f0, 0x00fc0003,
355 /* PCI I/O Limit i Registers
360 * [ 2: 0] Destination Node ID
370 * [ 5: 4] Destination Link ID
376 * [24:12] PCI I/O Limit Address i
377 * This field defines the end of PCI I/O region n
380 PCI_ADDR(0, 0x18, 1, 0xC4), 0xFE000FC8, 0x01fff000,
381 PCI_ADDR(0, 0x18, 1, 0xCC), 0xFE000FC8, 0x00000000,
382 PCI_ADDR(0, 0x18, 1, 0xD4), 0xFE000FC8, 0x00000000,
383 PCI_ADDR(0, 0x18, 1, 0xDC), 0xFE000FC8, 0x00000000,
385 /* PCI I/O Base i Registers
390 * [ 0: 0] Read Enable
393 * [ 1: 1] Write Enable
394 * 0 = Writes Disabled
398 * 0 = VGA matches Disabled
399 * 1 = matches all address < 64K and where A[9:0] is in the
400 * range 3B0-3BB or 3C0-3DF independen of the base & limit registers
402 * 0 = ISA matches Disabled
403 * 1 = Blocks address < 64K and in the last 768 bytes of eack 1K block
404 * from matching agains this base/limit pair
406 * [24:12] PCI I/O Base i
407 * This field defines the start of PCI I/O region n
410 PCI_ADDR(0, 0x18, 1, 0xC0), 0xFE000FCC, 0x00000003,
411 PCI_ADDR(0, 0x18, 1, 0xC8), 0xFE000FCC, 0x00000000,
412 PCI_ADDR(0, 0x18, 1, 0xD0), 0xFE000FCC, 0x00000000,
413 PCI_ADDR(0, 0x18, 1, 0xD8), 0xFE000FCC, 0x00000000,
415 /* Config Base and Limit i Registers
420 * [ 0: 0] Read Enable
423 * [ 1: 1] Write Enable
424 * 0 = Writes Disabled
426 * [ 2: 2] Device Number Compare Enable
427 * 0 = The ranges are based on bus number
428 * 1 = The ranges are ranges of devices on bus 0
430 * [ 6: 4] Destination Node
440 * [ 9: 8] Destination Link
446 * [23:16] Bus Number Base i
447 * This field defines the lowest bus number in configuration region i
448 * [31:24] Bus Number Limit i
449 * This field defines the highest bus number in configuration regin i
451 PCI_ADDR(0, 0x18, 1, 0xE0), 0x0000FC88, 0xff000003,
452 PCI_ADDR(0, 0x18, 1, 0xE4), 0x0000FC88, 0x00000000,
453 PCI_ADDR(0, 0x18, 1, 0xE8), 0x0000FC88, 0x00000000,
454 PCI_ADDR(0, 0x18, 1, 0xEC), 0x0000FC88, 0x00000000,
457 max = sizeof(register_values)/sizeof(register_values[0]);
458 setup_resource_map(register_values, max);
461 static void sdram_set_registers(const struct mem_controller *ctrl)
463 static const unsigned int register_values[] = {
465 /* Careful set limit registers before base registers which contain the enables */
466 /* DRAM Limit i Registers
475 * [ 2: 0] Destination Node ID
485 * [10: 8] Interleave select
486 * specifies the values of A[14:12] to use with interleave enable.
488 * [31:16] DRAM Limit Address i Bits 39-24
489 * This field defines the upper address bits of a 40 bit address
490 * that define the end of the DRAM region.
492 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
493 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
494 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
495 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
496 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
497 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
498 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
499 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
500 /* DRAM Base i Registers
509 * [ 0: 0] Read Enable
512 * [ 1: 1] Write Enable
513 * 0 = Writes Disabled
516 * [10: 8] Interleave Enable
517 * 000 = No interleave
518 * 001 = Interleave on A[12] (2 nodes)
520 * 011 = Interleave on A[12] and A[14] (4 nodes)
524 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
526 * [13:16] DRAM Base Address i Bits 39-24
527 * This field defines the upper address bits of a 40-bit address
528 * that define the start of the DRAM region.
530 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
531 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
532 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
533 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
534 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
535 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
536 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
537 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
539 /* DRAM CS Base Address i Registers
548 * [ 0: 0] Chip-Select Bank Enable
552 * [15: 9] Base Address (19-13)
553 * An optimization used when all DIMM are the same size...
555 * [31:21] Base Address (35-25)
556 * This field defines the top 11 addresses bit of a 40-bit
557 * address that define the memory address space. These
558 * bits decode 32-MByte blocks of memory.
560 PCI_ADDR(0, 0x18, 2, 0x40), 0x001f01fe, 0x00000000,
561 PCI_ADDR(0, 0x18, 2, 0x44), 0x001f01fe, 0x00000000,
562 PCI_ADDR(0, 0x18, 2, 0x48), 0x001f01fe, 0x00000000,
563 PCI_ADDR(0, 0x18, 2, 0x4C), 0x001f01fe, 0x00000000,
564 PCI_ADDR(0, 0x18, 2, 0x50), 0x001f01fe, 0x00000000,
565 PCI_ADDR(0, 0x18, 2, 0x54), 0x001f01fe, 0x00000000,
566 PCI_ADDR(0, 0x18, 2, 0x58), 0x001f01fe, 0x00000000,
567 PCI_ADDR(0, 0x18, 2, 0x5C), 0x001f01fe, 0x00000000,
568 /* DRAM CS Mask Address i Registers
577 * Select bits to exclude from comparison with the DRAM Base address register.
579 * [15: 9] Address Mask (19-13)
580 * Address to be excluded from the optimized case
582 * [29:21] Address Mask (33-25)
583 * The bits with an address mask of 1 are excluded from address comparison
587 PCI_ADDR(0, 0x18, 2, 0x60), 0xC01f01ff, 0x00000000,
588 PCI_ADDR(0, 0x18, 2, 0x64), 0xC01f01ff, 0x00000000,
589 PCI_ADDR(0, 0x18, 2, 0x68), 0xC01f01ff, 0x00000000,
590 PCI_ADDR(0, 0x18, 2, 0x6C), 0xC01f01ff, 0x00000000,
591 PCI_ADDR(0, 0x18, 2, 0x70), 0xC01f01ff, 0x00000000,
592 PCI_ADDR(0, 0x18, 2, 0x74), 0xC01f01ff, 0x00000000,
593 PCI_ADDR(0, 0x18, 2, 0x78), 0xC01f01ff, 0x00000000,
594 PCI_ADDR(0, 0x18, 2, 0x7C), 0xC01f01ff, 0x00000000,
595 /* DRAM Bank Address Mapping Register
597 * Specify the memory module size
602 * 000 = 32Mbyte (Rows = 12 & Col = 8)
603 * 001 = 64Mbyte (Rows = 12 & Col = 9)
604 * 010 = 128Mbyte (Rows = 13 & Col = 9)|(Rows = 12 & Col = 10)
605 * 011 = 256Mbyte (Rows = 13 & Col = 10)|(Rows = 12 & Col = 11)
606 * 100 = 512Mbyte (Rows = 13 & Col = 11)|(Rows = 14 & Col = 10)
607 * 101 = 1Gbyte (Rows = 14 & Col = 11)|(Rows = 13 & Col = 12)
608 * 110 = 2Gbyte (Rows = 14 & Col = 12)
615 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff8888, 0x00000000,
616 /* DRAM Timing Low Register
618 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
628 * [ 7: 4] Trc (Row Cycle Time, Ras#-active to Ras#-active/bank auto refresh)
629 * 0000 = 7 bus clocks
630 * 0001 = 8 bus clocks
632 * 1110 = 21 bus clocks
633 * 1111 = 22 bus clocks
634 * [11: 8] Trfc (Row refresh Cycle time, Auto-refresh-active to RAS#-active or RAS#auto-refresh)
635 * 0000 = 9 bus clocks
636 * 0010 = 10 bus clocks
638 * 1110 = 23 bus clocks
639 * 1111 = 24 bus clocks
640 * [14:12] Trcd (Ras#-active to Case#-read/write Delay)
650 * [18:16] Trrd (Ras# to Ras# Delay)
660 * [23:20] Tras (Minmum Ras# Active Time)
661 * 0000 to 0100 = reserved
662 * 0101 = 5 bus clocks
664 * 1111 = 15 bus clocks
665 * [26:24] Trp (Row Precharge Time)
675 * [28:28] Twr (Write Recovery Time)
680 PCI_ADDR(0, 0x18, 2, 0x88), 0xe8088008, 0x02522001 /* 0x03623125 */ ,
681 /* DRAM Timing High Register
683 * [ 0: 0] Twtr (Write to Read Delay)
687 * [ 6: 4] Trwt (Read to Write Delay)
697 * [12: 8] Tref (Refresh Rate)
698 * 00000 = 100Mhz 4K rows
699 * 00001 = 133Mhz 4K rows
700 * 00010 = 166Mhz 4K rows
701 * 00011 = 200Mhz 4K rows
702 * 01000 = 100Mhz 8K/16K rows
703 * 01001 = 133Mhz 8K/16K rows
704 * 01010 = 166Mhz 8K/16K rows
705 * 01011 = 200Mhz 8K/16K rows
707 * [22:20] Twcl (Write CAS Latency)
708 * 000 = 1 Mem clock after CAS# (Unbuffered Dimms)
709 * 001 = 2 Mem clocks after CAS# (Registered Dimms)
712 PCI_ADDR(0, 0x18, 2, 0x8c), 0xff8fe08e, (0 << 20)|(0 << 8)|(0 << 4)|(0 << 0),
713 /* DRAM Config Low Register
715 * [ 0: 0] DLL Disable
724 * [ 3: 3] Disable DQS Hystersis (FIXME handle this one carefully)
725 * 0 = Enable DQS input filter
726 * 1 = Disable DQS input filtering
729 * 0 = Initialization done or not yet started.
730 * 1 = Initiate DRAM intialization sequence
731 * [ 9: 9] SO-Dimm Enable
733 * 1 = SO-Dimms present
735 * 0 = DRAM not enabled
736 * 1 = DRAM initialized and enabled
737 * [11:11] Memory Clear Status
738 * 0 = Memory Clear function has not completed
739 * 1 = Memory Clear function has completed
740 * [12:12] Exit Self-Refresh
741 * 0 = Exit from self-refresh done or not yet started
742 * 1 = DRAM exiting from self refresh
743 * [13:13] Self-Refresh Status
744 * 0 = Normal Operation
745 * 1 = Self-refresh mode active
746 * [15:14] Read/Write Queue Bypass Count
751 * [16:16] 128-bit/64-Bit
752 * 0 = 64bit Interface to DRAM
753 * 1 = 128bit Interface to DRAM
754 * [17:17] DIMM ECC Enable
755 * 0 = Some DIMMs do not have ECC
756 * 1 = ALL DIMMS have ECC bits
757 * [18:18] UnBuffered DIMMs
759 * 1 = Unbuffered DIMMS
760 * [19:19] Enable 32-Byte Granularity
761 * 0 = Optimize for 64byte bursts
762 * 1 = Optimize for 32byte bursts
763 * [20:20] DIMM 0 is x4
764 * [21:21] DIMM 1 is x4
765 * [22:22] DIMM 2 is x4
766 * [23:23] DIMM 3 is x4
768 * 1 = x4 DIMM present
769 * [24:24] Disable DRAM Receivers
770 * 0 = Receivers enabled
771 * 1 = Receivers disabled
773 * 000 = Arbiters chois is always respected
774 * 001 = Oldest entry in DCQ can be bypassed 1 time
775 * 010 = Oldest entry in DCQ can be bypassed 2 times
776 * 011 = Oldest entry in DCQ can be bypassed 3 times
777 * 100 = Oldest entry in DCQ can be bypassed 4 times
778 * 101 = Oldest entry in DCQ can be bypassed 5 times
779 * 110 = Oldest entry in DCQ can be bypassed 6 times
780 * 111 = Oldest entry in DCQ can be bypassed 7 times
783 PCI_ADDR(0, 0x18, 2, 0x90), 0xf0000000,
785 (0 << 23)|(0 << 22)|(0 << 21)|(0 << 20)|
786 (1 << 19)|(0 << 18)|(1 << 17)|(0 << 16)|
787 (2 << 14)|(0 << 13)|(0 << 12)|
788 (0 << 11)|(0 << 10)|(0 << 9)|(0 << 8)|
789 (0 << 3) |(0 << 1) |(0 << 0),
790 /* DRAM Config High Register
792 * [ 0: 3] Maximum Asynchronous Latency
797 * [11: 8] Read Preamble
815 * [18:16] Idle Cycle Limit
824 * [19:19] Dynamic Idle Cycle Center Enable
825 * 0 = Use Idle Cycle Limit
826 * 1 = Generate a dynamic Idle cycle limit
827 * [22:20] DRAM MEMCLK Frequency
837 * [25:25] Memory Clock Ratio Valid (FIXME carefully enable memclk)
838 * 0 = Disable MemClks
840 * [26:26] Memory Clock 0 Enable
843 * [27:27] Memory Clock 1 Enable
846 * [28:28] Memory Clock 2 Enable
849 * [29:29] Memory Clock 3 Enable
854 PCI_ADDR(0, 0x18, 2, 0x94), 0xc180f0f0,
855 (0 << 29)|(0 << 28)|(0 << 27)|(0 << 26)|(0 << 25)|
856 (0 << 20)|(0 << 19)|(DCH_IDLE_LIMIT_16 << 16)|(0 << 8)|(0 << 0),
857 /* DRAM Delay Line Register
859 * Adjust the skew of the input DQS strobe relative to DATA
861 * [23:16] Delay Line Adjust
862 * Adjusts the DLL derived PDL delay by one or more delay stages
863 * in either the faster or slower direction.
864 * [24:24} Adjust Slower
866 * 1 = Adj is used to increase the PDL delay
867 * [25:25] Adjust Faster
869 * 1 = Adj is used to decrease the PDL delay
872 PCI_ADDR(0, 0x18, 2, 0x98), 0xfc00ffff, 0x00000000,
873 /* DRAM Scrub Control Register
875 * [ 4: 0] DRAM Scrube Rate
877 * [12: 8] L2 Scrub Rate
879 * [20:16] Dcache Scrub
882 * 00000 = Do not scrub
904 * All Others = Reserved
906 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
907 /* DRAM Scrub Address Low Register
909 * [ 0: 0] DRAM Scrubber Redirect Enable
911 * 1 = Scrubber Corrects errors found in normal operation
913 * [31: 6] DRAM Scrub Address 31-6
915 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
916 /* DRAM Scrub Address High Register
918 * [ 7: 0] DRAM Scrubb Address 39-32
921 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
922 //BY LYH add IOMMU 64M APERTURE
923 PCI_ADDR(0, 0x18, 3, 0x94), 0xffff8000, 0x00000f70,
924 PCI_ADDR(0, 0x18, 3, 0x90), 0xffffff80, 0x00000002,
925 PCI_ADDR(0, 0x18, 3, 0x98), 0x0000000f, 0x00068300,
931 print_debug("setting up CPU");
932 print_debug_hex8(ctrl->node_id);
933 print_debug(" northbridge registers\r\n");
934 max = sizeof(register_values)/sizeof(register_values[0]);
935 for(i = 0; i < max; i += 3) {
940 print_debug_hex32(register_values[i]);
942 print_debug_hex32(register_values[i+2]);
945 dev = (register_values[i] & ~0xff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
946 where = register_values[i] & 0xff;
947 reg = pci_read_config32(dev, where);
948 reg &= register_values[i+1];
949 reg |= register_values[i+2];
950 pci_write_config32(dev, where, reg);
953 reg = pci_read_config32(register_values[i]);
954 reg &= register_values[i+1];
955 reg |= register_values[i+2];
956 pci_write_config32(register_values[i], reg);
959 print_debug("done.\r\n");
963 static int is_dual_channel(const struct mem_controller *ctrl)
966 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
967 return dcl & DCL_128BitEn;
970 static int is_opteron(const struct mem_controller *ctrl)
972 /* Test to see if I am an Opteron.
973 * FIXME Testing dual channel capability is correct for now
974 * but a beter test is probably required.
977 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
978 return !!(nbcap & NBCAP_128Bit);
981 static int is_registered(const struct mem_controller *ctrl)
983 /* Test to see if we are dealing with registered SDRAM.
984 * If we are not registered we are unbuffered.
985 * This function must be called after spd_handle_unbuffered_dimms.
988 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
989 return !(dcl & DCL_UnBufDimm);
997 static struct dimm_size spd_get_dimm_size(unsigned device)
999 /* Calculate the log base 2 size of a DIMM in bits */
1000 struct dimm_size sz;
1005 /* Note it might be easier to use byte 31 here, it has the DIMM size as
1006 * a multiple of 4MB. The way we do it now we can size both
1007 * sides of an assymetric dimm.
1009 value = spd_read_byte(device, 3); /* rows */
1010 if (value < 0) goto out;
1011 sz.side1 += value & 0xf;
1013 value = spd_read_byte(device, 4); /* columns */
1014 if (value < 0) goto out;
1015 sz.side1 += value & 0xf;
1017 value = spd_read_byte(device, 17); /* banks */
1018 if (value < 0) goto out;
1019 sz.side1 += log2(value & 0xff);
1021 /* Get the module data width and convert it to a power of two */
1022 value = spd_read_byte(device, 7); /* (high byte) */
1023 if (value < 0) goto out;
1027 low = spd_read_byte(device, 6); /* (low byte) */
1028 if (low < 0) goto out;
1029 value = value | (low & 0xff);
1030 sz.side1 += log2(value);
1033 value = spd_read_byte(device, 5); /* number of physical banks */
1034 if (value <= 1) goto out;
1036 /* Start with the symmetrical case */
1037 sz.side2 = sz.side1;
1039 value = spd_read_byte(device, 3); /* rows */
1040 if (value < 0) goto out;
1041 if ((value & 0xf0) == 0) goto out; /* If symmetrical we are done */
1042 sz.side2 -= (value & 0x0f); /* Subtract out rows on side 1 */
1043 sz.side2 += ((value >> 4) & 0x0f); /* Add in rows on side 2 */
1045 value = spd_read_byte(device, 4); /* columns */
1046 if (value < 0) goto out;
1047 sz.side2 -= (value & 0x0f); /* Subtract out columns on side 1 */
1048 sz.side2 += ((value >> 4) & 0x0f); /* Add in columsn on side 2 */
1054 static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
1056 uint32_t base0, base1, map;
1060 print_debug("set_dimm_size: (");
1061 print_debug_hex32(sz.side1);
1062 print_debug_char(',');
1063 print_debug_hex32(sz.side2);
1064 print_debug_char(',');
1065 print_debug_hex32(index);
1066 print_debug(")\r\n");
1068 if (sz.side1 != sz.side2) {
1071 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
1072 map &= ~(0xf << (index + 4));
1074 /* For each base register.
1075 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
1076 * The initialize dimm size is in bits.
1077 * Set the base enable bit0.
1082 /* Make certain side1 of the dimm is at least 32MB */
1083 if (sz.side1 >= (25 +3)) {
1084 map |= (sz.side1 - (25 + 3)) << (index *4);
1085 base0 = (1 << ((sz.side1 - (25 + 3)) + 21)) | 1;
1087 /* Make certain side2 of the dimm is at least 32MB */
1088 if (sz.side2 >= (25 + 3)) {
1089 base1 = (1 << ((sz.side2 - (25 + 3)) + 21)) | 1;
1092 /* Double the size if we are using dual channel memory */
1093 if (is_dual_channel(ctrl)) {
1094 base0 = (base0 << 1) | (base0 & 1);
1095 base1 = (base1 << 1) | (base1 & 1);
1098 /* Clear the reserved bits */
1099 base0 &= ~0x001ffffe;
1100 base1 &= ~0x001ffffe;
1102 /* Set the appropriate DIMM base address register */
1103 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
1104 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
1105 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
1107 /* Enable the memory clocks for this DIMM */
1109 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1110 dch |= DCH_MEMCLK_EN0 << index;
1111 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
1115 static void spd_set_ram_size(const struct mem_controller *ctrl)
1119 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1120 struct dimm_size sz;
1121 sz = spd_get_dimm_size(ctrl->channel0[i]);
1122 set_dimm_size(ctrl, sz, i);
1125 static void fill_last(unsigned long node_id,unsigned long base)
1127 //BY LYH //Fill next base reg with right value
1132 for(device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1); device
1133 += PCI_DEV(0, 1, 0)) {
1134 for(i=node_id+1;i<=7;i++) {
1135 base_reg=0x40+(i<<3);
1136 pci_write_config32(device,base_reg,base);
1142 static void route_dram_accesses(const struct mem_controller *ctrl,
1143 unsigned long base_k, unsigned long limit_k)
1145 /* Route the addresses to the controller node */
1150 unsigned limit_reg, base_reg;
1153 node_id = ctrl->node_id;
1154 index = (node_id << 3);
1155 limit = (limit_k << 2);
1156 limit &= 0xffff0000;
1157 limit -= 0x00010000;
1158 limit |= ( 0 << 8) | (node_id << 0);
1159 base = (base_k << 2);
1161 base |= (0 << 8) | (1<<1) | (1<<0);
1163 limit_reg = 0x44 + index;
1164 base_reg = 0x40 + index;
1165 for(device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1); device += PCI_DEV(0, 1, 0)) {
1166 pci_write_config32(device, limit_reg, limit);
1167 pci_write_config32(device, base_reg, base);
1172 static void set_top_mem(unsigned tom_k)
1174 /* Error if I don't have memory */
1180 /* Report the amount of memory. */
1181 print_debug("RAM: 0x");
1182 print_debug_hex32(tom_k);
1183 print_debug(" KB\r\n");
1186 /* Now set top of memory */
1188 msr.lo = (tom_k & 0x003fffff) << 10;
1189 msr.hi = (tom_k & 0xffc00000) >> 22;
1190 wrmsr(TOP_MEM2, msr);
1192 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
1193 * so I can see my rom chip and other I/O devices.
1195 if (tom_k >= 0x003f0000) {
1198 msr.lo = (tom_k & 0x003fffff) << 10;
1199 msr.hi = (tom_k & 0xffc00000) >> 22;
1200 wrmsr(TOP_MEM, msr);
1203 static void order_dimms(const struct mem_controller *ctrl)
1205 unsigned long tom, tom_k, base_k;
1208 /* Compute the memory base address address */
1210 for(node_id = 0; node_id < ctrl->node_id; node_id++) {
1211 uint32_t limit, base;
1213 index = node_id << 3;
1214 base = pci_read_config32(ctrl->f1, 0x40 + index);
1215 /* Only look at the limit if the base is enabled */
1216 if ((base & 3) == 3) {
1217 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1218 base_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1221 /* Remember which registers we have used in the high 8 bits of tom */
1224 /* Find the largest remaining canidate */
1225 unsigned index, canidate;
1226 uint32_t csbase, csmask;
1230 for(index = 0; index < 8; index++) {
1232 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1234 /* Is it enabled? */
1239 /* Is it greater? */
1240 if (value <= csbase) {
1244 /* Has it already been selected */
1245 if (tom & (1 << (index + 24))) {
1248 /* I have a new canidate */
1252 /* See if I have found a new canidate */
1257 /* Remember the dimm size */
1258 size = csbase >> 21;
1260 /* If this is the first chip select, round base_k to
1261 * be a multiple of it's size. Then set tom to equal
1263 * I assume that size is a power of two.
1265 if ((tom & 0xff000000) == 0) {
1267 size_k = size << 15;
1268 base_k = (base_k + size_k -1) & ~(size_k -1);
1272 /* Remember I have used this register */
1273 tom |= (1 << (canidate + 24));
1275 /* Recompute the cs base register value */
1276 #if 1 // BY LYH Need to count from 0 for every memory controller
1277 csbase = ((tom - (base_k>>15))<< 21) | 1;
1278 // print_debug("csbase=");
1279 // print_debug_hex32(csbase);
1280 // print_debug("\r\n");
1282 csbase = (tom << 21) | 1;
1285 /* Increment the top of memory */
1288 /* Compute the memory mask */
1289 csmask = ((size -1) << 21);
1290 csmask |= 0xfe00; /* For now don't optimize */
1291 #warning "Don't forget to optimize the DIMM size"
1293 /* Write the new base register */
1294 pci_write_config32(ctrl->f2, DRAM_CSBASE + (canidate << 2), csbase);
1295 /* Write the new mask register */
1296 pci_write_config32(ctrl->f2, DRAM_CSMASK + (canidate << 2), csmask);
1299 tom_k = (tom & ~0xff000000) << 15;
1301 print_debug("tom: ");
1302 print_debug_hex32(tom);
1303 print_debug(" base_k: ");
1304 print_debug_hex32(base_k);
1305 print_debug(" tom_k: ");
1306 print_debug_hex32(tom_k);
1307 print_debug("\r\n");
1309 route_dram_accesses(ctrl, base_k, tom_k);
1311 fill_last(ctrl->node_id, tom_k<<2);
1315 dump_pci_device(PCI_DEV(0, 0x18, 1));
1317 // if(ctrl->node_id==1) {
1318 // pci_write_config32(ctrl->f2, DRAM_CSBASE, 0x00000001);
1325 static void disable_dimm(const struct mem_controller *ctrl, unsigned index)
1327 print_debug("disabling dimm");
1328 print_debug_hex8(index);
1329 print_debug("\r\n");
1330 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), 0);
1331 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), 0);
1335 static void spd_handle_unbuffered_dimms(const struct mem_controller *ctrl)
1343 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1345 value = spd_read_byte(ctrl->channel0[i], 21);
1347 disable_dimm(ctrl, i);
1350 /* Registered dimm ? */
1351 if (value & (1 << 1)) {
1354 /* Otherwise it must be an unbuffered dimm */
1359 if (unbuffered && registered) {
1360 die("Mixed buffered and registered dimms not supported");
1362 if (unbuffered && is_opteron(ctrl)) {
1363 die("Unbuffered Dimms not supported on Opteron");
1366 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1367 dcl &= ~DCL_UnBufDimm;
1369 dcl |= DCL_UnBufDimm;
1371 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1373 if (is_registered(ctrl)) {
1374 print_debug("Registered\r\n");
1376 print_debug("Unbuffered\r\n");
1381 static void spd_enable_2channels(const struct mem_controller *ctrl)
1385 /* SPD addresses to verify are identical */
1386 #warning "FINISHME review and see if these are the bytes I need"
1387 /* FINISHME review and see if these are the bytes I need */
1388 static const unsigned addresses[] = {
1389 2, /* Type should be DDR SDRAM */
1390 3, /* *Row addresses */
1391 4, /* *Column addresses */
1392 5, /* *Physical Banks */
1393 6, /* *Module Data Width low */
1394 7, /* *Module Data Width high */
1395 9, /* *Cycle time at highest CAS Latency CL=X */
1396 11, /* *SDRAM Type */
1397 13, /* *SDRAM Width */
1398 17, /* *Logical Banks */
1399 18, /* *Supported CAS Latencies */
1400 21, /* *SDRAM Module Attributes */
1401 23, /* *Cycle time at CAS Latnecy (CLX - 0.5) */
1402 26, /* *Cycle time at CAS Latnecy (CLX - 1.0) */
1403 27, /* *tRP Row precharge time */
1404 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1405 29, /* *tRCD RAS to CAS */
1406 30, /* *tRAS Activate to Precharge */
1407 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1408 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1410 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1411 if (!(nbcap & NBCAP_128Bit)) {
1414 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1415 unsigned device0, device1;
1418 device0 = ctrl->channel0[i];
1419 device1 = ctrl->channel1[i];
1422 for(j = 0; j < sizeof(addresses)/sizeof(addresses[0]); j++) {
1424 addr = addresses[j];
1425 value0 = spd_read_byte(device0, addr);
1429 value1 = spd_read_byte(device1, addr);
1433 if (value0 != value1) {
1438 print_debug("Enabling dual channel memory\r\n");
1440 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1441 dcl &= ~DCL_32ByteEn;
1442 dcl |= DCL_128BitEn;
1443 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1448 uint8_t divisor; /* In 1/2 ns increments */
1451 uint32_t dch_memclk;
1452 uint16_t dch_tref4k, dch_tref8k;
1457 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1459 static const struct mem_param speed[] = {
1461 .name = "100Mhz\r\n",
1463 .divisor = (10 <<1),
1466 .dch_memclk = DCH_MEMCLK_100MHZ << DCH_MEMCLK_SHIFT,
1467 .dch_tref4k = DTH_TREF_100MHZ_4K,
1468 .dch_tref8k = DTH_TREF_100MHZ_8K,
1472 .name = "133Mhz\r\n",
1474 .divisor = (7<<1)+1,
1477 .dch_memclk = DCH_MEMCLK_133MHZ << DCH_MEMCLK_SHIFT,
1478 .dch_tref4k = DTH_TREF_133MHZ_4K,
1479 .dch_tref8k = DTH_TREF_133MHZ_8K,
1483 .name = "166Mhz\r\n",
1488 .dch_memclk = DCH_MEMCLK_166MHZ << DCH_MEMCLK_SHIFT,
1489 .dch_tref4k = DTH_TREF_166MHZ_4K,
1490 .dch_tref8k = DTH_TREF_166MHZ_8K,
1494 .name = "200Mhz\r\n",
1499 .dch_memclk = DCH_MEMCLK_200MHZ << DCH_MEMCLK_SHIFT,
1500 .dch_tref4k = DTH_TREF_200MHZ_4K,
1501 .dch_tref8k = DTH_TREF_200MHZ_8K,
1508 const struct mem_param *param;
1509 for(param = &speed[0]; param->cycle_time ; param++) {
1510 if (min_cycle_time > (param+1)->cycle_time) {
1514 if (!param->cycle_time) {
1515 die("min_cycle_time to low");
1518 print_debug(param->name);
1523 static const struct mem_param *spd_set_memclk(const struct mem_controller *ctrl)
1525 /* Compute the minimum cycle time for these dimms */
1526 const struct mem_param *param;
1527 unsigned min_cycle_time, min_latency;
1531 static const int latency_indicies[] = { 26, 23, 9 };
1532 static const unsigned char min_cycle_times[] = {
1533 [NBCAP_MEMCLK_200MHZ] = 0x50, /* 5ns */
1534 [NBCAP_MEMCLK_166MHZ] = 0x60, /* 6ns */
1535 [NBCAP_MEMCLK_133MHZ] = 0x75, /* 7.5ns */
1536 [NBCAP_MEMCLK_100MHZ] = 0xa0, /* 10ns */
1540 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1541 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1545 print_debug("min_cycle_time: ");
1546 print_debug_hex8(min_cycle_time);
1547 print_debug(" min_latency: ");
1548 print_debug_hex8(min_latency);
1549 print_debug("\r\n");
1552 /* Compute the least latency with the fastest clock supported
1553 * by both the memory controller and the dimms.
1555 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1556 int new_cycle_time, new_latency;
1561 /* First find the supported CAS latencies
1562 * Byte 18 for DDR SDRAM is interpreted:
1563 * bit 0 == CAS Latency = 1.0
1564 * bit 1 == CAS Latency = 1.5
1565 * bit 2 == CAS Latency = 2.0
1566 * bit 3 == CAS Latency = 2.5
1567 * bit 4 == CAS Latency = 3.0
1568 * bit 5 == CAS Latency = 3.5
1572 new_cycle_time = 0xa0;
1575 latencies = spd_read_byte(ctrl->channel0[i], 18);
1576 if (latencies <= 0) continue;
1578 /* Compute the lowest cas latency supported */
1579 latency = log2(latencies) -2;
1581 /* Loop through and find a fast clock with a low latency */
1582 for(index = 0; index < 3; index++, latency++) {
1584 if ((latency < 2) || (latency > 4) ||
1585 (!(latencies & (1 << latency)))) {
1588 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1593 /* Only increase the latency if we decreas the clock */
1594 if ((value >= min_cycle_time) && (value < new_cycle_time)) {
1595 new_cycle_time = value;
1596 new_latency = latency;
1599 if (new_latency > 4){
1602 /* Does min_latency need to be increased? */
1603 if (new_cycle_time > min_cycle_time) {
1604 min_cycle_time = new_cycle_time;
1606 /* Does min_cycle_time need to be increased? */
1607 if (new_latency > min_latency) {
1608 min_latency = new_latency;
1612 print_debug_hex8(i);
1613 print_debug(" min_cycle_time: ");
1614 print_debug_hex8(min_cycle_time);
1615 print_debug(" min_latency: ");
1616 print_debug_hex8(min_latency);
1617 print_debug("\r\n");
1620 /* Make a second pass through the dimms and disable
1621 * any that cannot support the selected memclk and cas latency.
1624 for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1630 latencies = spd_read_byte(ctrl->channel0[i], 18);
1631 if (latencies <= 0) {
1635 /* Compute the lowest cas latency supported */
1636 latency = log2(latencies) -2;
1638 /* Walk through searching for the selected latency */
1639 for(index = 0; index < 3; index++, latency++) {
1640 if (!(latencies & (1 << latency))) {
1643 if (latency == min_latency)
1646 /* If I can't find the latency or my index is bad error */
1647 if ((latency != min_latency) || (index >= 3)) {
1651 /* Read the min_cycle_time for this latency */
1652 value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
1654 /* All is good if the selected clock speed
1655 * is what I need or slower.
1657 if (value <= min_cycle_time) {
1660 /* Otherwise I have an error, disable the dimm */
1662 disable_dimm(ctrl, i);
1665 print_debug("min_cycle_time: ");
1666 print_debug_hex8(min_cycle_time);
1667 print_debug(" min_latency: ");
1668 print_debug_hex8(min_latency);
1669 print_debug("\r\n");
1671 /* Now that I know the minimum cycle time lookup the memory parameters */
1672 param = get_mem_param(min_cycle_time);
1674 /* Update DRAM Config High with our selected memory speed */
1675 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1676 value &= ~(DCH_MEMCLK_MASK << DCH_MEMCLK_SHIFT);
1677 value |= param->dch_memclk;
1678 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1680 static const unsigned latencies[] = { DTL_CL_2, DTL_CL_2_5, DTL_CL_3 };
1681 /* Update DRAM Timing Low with our selected cas latency */
1682 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1683 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1684 value |= latencies[min_latency - 2] << DTL_TCL_SHIFT;
1685 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1691 static int update_dimm_Trc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1693 unsigned clocks, old_clocks;
1696 value = spd_read_byte(ctrl->channel0[i], 41);
1697 if (value < 0) return -1;
1698 if ((value == 0) || (value == 0xff)) {
1701 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1702 if (clocks < DTL_TRC_MIN) {
1703 clocks = DTL_TRC_MIN;
1705 if (clocks > DTL_TRC_MAX) {
1709 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1710 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
1711 if (old_clocks > clocks) {
1712 clocks = old_clocks;
1714 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
1715 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
1716 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1720 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1722 unsigned clocks, old_clocks;
1725 value = spd_read_byte(ctrl->channel0[i], 42);
1726 if (value < 0) return -1;
1727 if ((value == 0) || (value == 0xff)) {
1728 value = param->tRFC;
1730 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1731 if (clocks < DTL_TRFC_MIN) {
1732 clocks = DTL_TRFC_MIN;
1734 if (clocks > DTL_TRFC_MAX) {
1737 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1738 old_clocks = ((dtl >> DTL_TRFC_SHIFT) & DTL_TRFC_MASK) + DTL_TRFC_BASE;
1739 if (old_clocks > clocks) {
1740 clocks = old_clocks;
1742 dtl &= ~(DTL_TRFC_MASK << DTL_TRFC_SHIFT);
1743 dtl |= ((clocks - DTL_TRFC_BASE) << DTL_TRFC_SHIFT);
1744 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1749 static int update_dimm_Trcd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1751 unsigned clocks, old_clocks;
1754 value = spd_read_byte(ctrl->channel0[i], 29);
1755 if (value < 0) return -1;
1757 clocks = (value + (param->divisor << 1) -1)/(param->divisor << 1);
1759 clocks = (value + ((param->divisor & 0xff) << 1) -1)/((param->divisor & 0xff) << 1);
1761 if (clocks < DTL_TRCD_MIN) {
1762 clocks = DTL_TRCD_MIN;
1764 if (clocks > DTL_TRCD_MAX) {
1767 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1768 old_clocks = ((dtl >> DTL_TRCD_SHIFT) & DTL_TRCD_MASK) + DTL_TRCD_BASE;
1769 if (old_clocks > clocks) {
1770 clocks = old_clocks;
1772 dtl &= ~(DTL_TRCD_MASK << DTL_TRCD_SHIFT);
1773 dtl |= ((clocks - DTL_TRCD_BASE) << DTL_TRCD_SHIFT);
1774 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1778 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1780 unsigned clocks, old_clocks;
1783 value = spd_read_byte(ctrl->channel0[i], 28);
1784 if (value < 0) return -1;
1785 clocks = (value + ((param->divisor & 0xff) << 1) -1)/((param->divisor & 0xff) << 1);
1786 if (clocks < DTL_TRRD_MIN) {
1787 clocks = DTL_TRRD_MIN;
1789 if (clocks > DTL_TRRD_MAX) {
1792 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1793 old_clocks = ((dtl >> DTL_TRRD_SHIFT) & DTL_TRRD_MASK) + DTL_TRRD_BASE;
1794 if (old_clocks > clocks) {
1795 clocks = old_clocks;
1797 dtl &= ~(DTL_TRRD_MASK << DTL_TRRD_SHIFT);
1798 dtl |= ((clocks - DTL_TRRD_BASE) << DTL_TRRD_SHIFT);
1799 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1803 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1805 unsigned clocks, old_clocks;
1808 value = spd_read_byte(ctrl->channel0[i], 30);
1809 if (value < 0) return -1;
1810 clocks = ((value << 1) + param->divisor - 1)/param->divisor;
1811 if (clocks < DTL_TRAS_MIN) {
1812 clocks = DTL_TRAS_MIN;
1814 if (clocks > DTL_TRAS_MAX) {
1817 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1818 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
1819 if (old_clocks > clocks) {
1820 clocks = old_clocks;
1822 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
1823 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
1824 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1828 static int update_dimm_Trp(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1830 unsigned clocks, old_clocks;
1833 value = spd_read_byte(ctrl->channel0[i], 27);
1834 if (value < 0) return -1;
1836 clocks = (value + (param->divisor << 1) - 1)/(param->divisor << 1);
1838 clocks = (value + ((param->divisor & 0xff) << 1) - 1)/((param->divisor & 0xff) << 1);
1841 print_debug("Trp: ");
1842 print_debug_hex8(clocks);
1843 print_debug(" spd value: ");
1844 print_debug_hex8(value);
1845 print_debug(" divisor: ");
1846 print_debug_hex8(param->divisor);
1847 print_debug("\r\n");
1849 if (clocks < DTL_TRP_MIN) {
1850 clocks = DTL_TRP_MIN;
1852 if (clocks > DTL_TRP_MAX) {
1855 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1856 old_clocks = ((dtl >> DTL_TRP_SHIFT) & DTL_TRP_MASK) + DTL_TRP_BASE;
1857 if (old_clocks > clocks) {
1858 clocks = old_clocks;
1860 dtl &= ~(DTL_TRP_MASK << DTL_TRP_SHIFT);
1861 dtl |= ((clocks - DTL_TRP_BASE) << DTL_TRP_SHIFT);
1862 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1866 static void set_Twr(const struct mem_controller *ctrl, const struct mem_param *param)
1869 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1870 dtl &= ~(DTL_TWR_MASK << DTL_TWR_SHIFT);
1871 dtl |= (param->dtl_twr - DTL_TWR_BASE) << DTL_TWR_SHIFT;
1872 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
1876 static void init_Tref(const struct mem_controller *ctrl, const struct mem_param *param)
1879 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1880 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1881 dth |= (param->dch_tref4k << DTH_TREF_SHIFT);
1882 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1885 static int update_dimm_Tref(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1889 unsigned tref, old_tref;
1890 value = spd_read_byte(ctrl->channel0[i], 3);
1891 if (value < 0) return -1;
1894 tref = param->dch_tref8k;
1896 tref = param->dch_tref4k;
1899 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1900 old_tref = (dth >> DTH_TREF_SHIFT) & DTH_TREF_MASK;
1901 if ((value == 12) && (old_tref == param->dch_tref4k)) {
1902 tref = param->dch_tref4k;
1904 tref = param->dch_tref8k;
1906 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
1907 dth |= (tref << DTH_TREF_SHIFT);
1908 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1913 static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1918 value = spd_read_byte(ctrl->channel0[i], 13);
1923 dimm += DCL_x4DIMM_SHIFT;
1924 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1925 dcl &= ~(1 << dimm);
1929 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1933 static int update_dimm_ecc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
1937 value = spd_read_byte(ctrl->channel0[i], 11);
1942 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1943 dcl &= ~DCL_DimmEccEn;
1944 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1949 static int count_dimms(const struct mem_controller *ctrl)
1954 for(index = 0; index < 8; index += 2) {
1956 csbase = pci_read_config32(ctrl->f2, (DRAM_CSBASE + index << 2));
1964 static void set_Twtr(const struct mem_controller *ctrl, const struct mem_param *param)
1968 clocks = 1; /* AMD says hard code this */
1969 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
1970 dth &= ~(DTH_TWTR_MASK << DTH_TWTR_SHIFT);
1971 dth |= ((clocks - DTH_TWTR_BASE) << DTH_TWTR_SHIFT);
1972 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
1975 static void set_Trwt(const struct mem_controller *ctrl, const struct mem_param *param)
1983 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1984 latency = (dtl >> DTL_TCL_SHIFT) & DTL_TCL_MASK;
1985 divisor = param->divisor;
1987 if (is_opteron(ctrl)) {
1988 if (latency == DTL_CL_2) {
1989 if (divisor == ((6 << 0) + 0)) {
1993 else if (divisor > ((6 << 0)+0)) {
1994 /* 100Mhz && 133Mhz */
1998 else if (latency == DTL_CL_2_5) {
2001 else if (latency == DTL_CL_3) {
2002 if (divisor == ((6 << 0)+0)) {
2006 else if (divisor > ((6 << 0)+0)) {
2007 /* 100Mhz && 133Mhz */
2012 else /* Athlon64 */ {
2013 if (is_registered(ctrl)) {
2014 if (latency == DTL_CL_2) {
2017 else if (latency == DTL_CL_2_5) {
2020 else if (latency == DTL_CL_3) {
2024 else /* Unbuffered */{
2025 if (latency == DTL_CL_2) {
2028 else if (latency == DTL_CL_2_5) {
2031 else if (latency == DTL_CL_3) {
2036 if ((clocks < DTH_TRWT_MIN) || (clocks > DTH_TRWT_MAX)) {
2037 die("Unknown Trwt");
2040 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
2041 dth &= ~(DTH_TRWT_MASK << DTH_TRWT_SHIFT);
2042 dth |= ((clocks - DTH_TRWT_BASE) << DTH_TRWT_SHIFT);
2043 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
2047 static void set_Twcl(const struct mem_controller *ctrl, const struct mem_param *param)
2049 /* Memory Clocks after CAS# */
2052 if (is_registered(ctrl)) {
2057 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
2058 dth &= ~(DTH_TWCL_MASK << DTH_TWCL_SHIFT);
2059 dth |= ((clocks - DTH_TWCL_BASE) << DTH_TWCL_SHIFT);
2060 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
2064 static void set_read_preamble(const struct mem_controller *ctrl, const struct mem_param *param)
2068 unsigned rdpreamble;
2069 divisor = param->divisor;
2070 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2071 dch &= ~(DCH_RDPREAMBLE_MASK << DCH_RDPREAMBLE_SHIFT);
2073 if (is_registered(ctrl)) {
2074 if (divisor == ((10 << 1)+0)) {
2076 rdpreamble = ((9 << 1)+ 0);
2078 else if (divisor == ((7 << 1)+1)) {
2080 rdpreamble = ((8 << 1)+0);
2082 else if (divisor == ((6 << 1)+0)) {
2084 rdpreamble = ((7 << 1)+1);
2091 for(i = 0; i < 4; i++) {
2092 if (ctrl->channel0[i]) {
2096 if (divisor == ((10 << 1)+0)) {
2100 rdpreamble = ((9 << 1)+0);
2103 rdpreamble = ((14 << 1)+0);
2106 else if (divisor == ((7 << 1)+1)) {
2110 rdpreamble = ((7 << 1)+0);
2113 rdpreamble = ((11 << 1)+0);
2116 else if (divisor == ((6 << 1)+0)) {
2120 rdpreamble = ((7 << 1)+0);
2123 rdpreamble = ((9 << 1)+0);
2126 else if (divisor == ((5 << 1)+0)) {
2130 rdpreamble = ((5 << 1)+0);
2133 rdpreamble = ((7 << 1)+0);
2137 if ((rdpreamble < DCH_RDPREAMBLE_MIN) || (rdpreamble > DCH_RDPREAMBLE_MAX)) {
2138 die("Unknown rdpreamble");
2140 dch |= (rdpreamble - DCH_RDPREAMBLE_BASE) << DCH_RDPREAMBLE_SHIFT;
2141 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2144 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
2151 dimms = count_dimms(ctrl);
2153 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2154 dch &= ~(DCH_ASYNC_LAT_MASK << DCH_ASYNC_LAT_SHIFT);
2156 if (is_registered(ctrl)) {
2168 die("Too many unbuffered dimms");
2170 else if (dimms == 3) {
2179 dch |= ((async_lat - DCH_ASYNC_LAT_BASE) << DCH_ASYNC_LAT_SHIFT);
2180 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2183 static void set_idle_cycle_limit(const struct mem_controller *ctrl, const struct mem_param *param)
2186 /* AMD says to Hardcode this */
2187 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2188 dch &= ~(DCH_IDLE_LIMIT_MASK << DCH_IDLE_LIMIT_SHIFT);
2189 dch |= DCH_IDLE_LIMIT_16 << DCH_IDLE_LIMIT_SHIFT;
2190 dch |= DCH_DYN_IDLE_CTR_EN;
2191 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2194 static void spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param)
2198 init_Tref(ctrl, param);
2199 for(i = 0; (i < 4) && ctrl->channel0[i]; i++) {
2201 /* DRAM Timing Low Register */
2202 if (update_dimm_Trc (ctrl, param, i) < 0) goto dimm_err;
2203 if (update_dimm_Trfc(ctrl, param, i) < 0) goto dimm_err;
2204 if (update_dimm_Trcd(ctrl, param, i) < 0) goto dimm_err;
2205 if (update_dimm_Trrd(ctrl, param, i) < 0) goto dimm_err;
2206 if (update_dimm_Tras(ctrl, param, i) < 0) goto dimm_err;
2207 if (update_dimm_Trp (ctrl, param, i) < 0) goto dimm_err;
2209 /* DRAM Timing High Register */
2210 if (update_dimm_Tref(ctrl, param, i) < 0) goto dimm_err;
2212 /* DRAM Config Low */
2213 if (update_dimm_x4 (ctrl, param, i) < 0) goto dimm_err;
2214 if (update_dimm_ecc(ctrl, param, i) < 0) goto dimm_err;
2217 disable_dimm(ctrl, i);
2220 /* DRAM Timing Low Register */
2221 set_Twr(ctrl, param);
2223 /* DRAM Timing High Register */
2224 set_Twtr(ctrl, param);
2225 set_Trwt(ctrl, param);
2226 set_Twcl(ctrl, param);
2228 /* DRAM Config High */
2229 set_read_preamble(ctrl, param);
2230 set_max_async_latency(ctrl, param);
2231 set_idle_cycle_limit(ctrl, param);
2234 static void sdram_set_spd_registers(const struct mem_controller *ctrl)
2236 const struct mem_param *param;
2237 spd_enable_2channels(ctrl);
2238 spd_set_ram_size(ctrl);
2239 spd_handle_unbuffered_dimms(ctrl);
2240 param = spd_set_memclk(ctrl);
2241 spd_set_dram_timing(ctrl, param);
2245 #define TIMEOUT_LOOPS 300000
2246 static void sdram_enable(int controllers, const struct mem_controller *ctrl)
2250 /* Before enabling memory start the memory clocks */
2251 for(i = 0; i < controllers; i++) {
2253 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
2254 dch |= DCH_MEMCLK_VALID;
2255 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
2258 /* And if necessary toggle the the reset on the dimms by hand */
2259 memreset(controllers, ctrl);
2261 for(i = 0; i < controllers; i++) {
2263 /* Toggle DisDqsHys to get it working */
2264 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2266 print_debug("dcl: ");
2267 print_debug_hex32(dcl);
2268 print_debug("\r\n");
2271 dcl &= ~DCL_DimmEccEn;
2273 #warning "FIXME set the ECC type to perform"
2274 #warning "FIXME initialize the scrub registers"
2276 if (dcl & DCL_DimmEccEn) {
2277 print_debug("ECC enabled\r\n");
2280 dcl |= DCL_DisDqsHys;
2281 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2282 dcl &= ~DCL_DisDqsHys;
2283 dcl &= ~DCL_DLL_Disable;
2286 dcl |= DCL_DramInit;
2287 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2290 for(i = 0; i < controllers; i++) {
2292 print_debug("Initializing memory: ");
2295 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2297 if ((loops & 1023) == 0) {
2300 } while(((dcl & DCL_DramInit) != 0) && (loops < TIMEOUT_LOOPS));
2301 if (loops >= TIMEOUT_LOOPS) {
2302 print_debug(" failed\r\n");
2304 print_debug(" done\r\n");
2307 if (dcl & DCL_DimmEccEn) {
2308 print_debug("Clearing memory: ");
2310 dcl &= ~DCL_MemClrStatus;
2311 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
2314 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
2316 if ((loops & 1023) == 0) {
2318 print_debug_hex32(loops);
2320 } while(((dcl & DCL_MemClrStatus) == 0) && (loops < TIMEOUT_LOOPS));
2321 if (loops >= TIMEOUT_LOOPS) {
2322 print_debug("failed\r\n");
2324 print_debug("done\r\n");
2326 pci_write_config32(ctrl[i].f3, SCRUB_ADDR_LOW, 0);
2327 pci_write_config32(ctrl[i].f3, SCRUB_ADDR_HIGH, 0);