2 * This file is part of the coreboot project.
4 * Copyright (C) 2008 Arastra, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 /* This code is based on src/northbridge/intel/e7520/raminit.c */
23 #include <cpu/x86/mem.h>
24 #include <cpu/x86/mtrr.h>
25 #include <cpu/x86/cache.h>
29 /* DDR2 memory controller register space */
30 #define MCBAR 0x90000000
32 static void sdram_set_registers(const struct mem_controller *ctrl)
34 static const u32 register_values[] = {
36 /* CKDIS 0x8c disable clocks */
37 PCI_ADDR(0, 0x00, 0, CKDIS), 0xffff0000, 0x0000ffff,
39 /* 0x9c Device present and extended RAM control
40 * DEVPRES is very touchy, hard code the initialization
41 * of PCI-E ports here.
43 PCI_ADDR(0, 0x00, 0, DEVPRES), 0x00000000, 0x07020801 | DEVPRES_CONFIG,
45 /* 0xc8 Remap RAM base and limit off */
46 PCI_ADDR(0, 0x00, 0, REMAPLIMIT), 0x00000000, 0x03df0000,
49 PCI_ADDR(0, 0x00, 0, 0xd8), 0x00000000, 0xb5930000,
50 PCI_ADDR(0, 0x00, 0, 0xe8), 0x00000000, 0x00004a2a,
53 PCI_ADDR(0, 0x00, 0, MCHCFG0), 0xfce0ffff, 0x00006000, /* 6000 */
56 PCI_ADDR(0, 0x00, 0, PAM-1), 0xcccccc7f, 0x33333000,
57 PCI_ADDR(0, 0x00, 0, PAM+3), 0xcccccccc, 0x33333333,
60 PCI_ADDR(0, 0x00, 0, DEVPRES1), 0xffbffff, (1<<22)|(6<<2) | DEVPRES1_CONFIG,
63 PCI_ADDR(0, 0x00, 0, IURBASE), 0x00000fff, MCBAR |0,
68 max = sizeof(register_values)/sizeof(register_values[0]);
69 for(i = 0; i < max; i += 3) {
73 dev = (register_values[i] & ~0xff) - PCI_DEV(0, 0x00, 0) + ctrl->f0;
74 where = register_values[i] & 0xff;
75 reg = pci_read_config32(dev, where);
76 reg &= register_values[i+1];
77 reg |= register_values[i+2];
78 pci_write_config32(dev, where, reg);
80 print_spew("done.\r\n");
88 static struct dimm_size spd_get_dimm_size(u16 device)
90 /* Calculate the log base 2 size of a DIMM in bits */
96 /* Note it might be easier to use byte 31 here, it has the DIMM size as
97 * a multiple of 4MB. The way we do it now we can size both
98 * sides of an assymetric dimm.
100 value = spd_read_byte(device, 3); /* rows */
101 if (value < 0) goto hw_err;
102 if ((value & 0xf) == 0) goto val_err;
103 sz.side1 += value & 0xf;
105 value = spd_read_byte(device, 4); /* columns */
106 if (value < 0) goto hw_err;
107 if ((value & 0xf) == 0) goto val_err;
108 sz.side1 += value & 0xf;
110 value = spd_read_byte(device, 17); /* banks */
111 if (value < 0) goto hw_err;
112 if ((value & 0xff) == 0) goto val_err;
113 sz.side1 += log2(value & 0xff);
115 /* Get the module data width and convert it to a power of two */
116 value = spd_read_byte(device, 7); /* (high byte) */
117 if (value < 0) goto hw_err;
121 low = spd_read_byte(device, 6); /* (low byte) */
122 if (low < 0) goto hw_err;
123 value = value | (low & 0xff);
124 if ((value != 72) && (value != 64)) goto val_err;
125 sz.side1 += log2(value);
128 value = spd_read_byte(device, 5); /* number of physical banks */
130 if (value < 0) goto hw_err;
133 if (value == 1) goto out;
134 if (value != 2) goto val_err;
136 /* Start with the symmetrical case */
139 value = spd_read_byte(device, 3); /* rows */
140 if (value < 0) goto hw_err;
141 if ((value & 0xf0) == 0) goto out; /* If symmetrical we are done */
142 sz.side2 -= (value & 0x0f); /* Subtract out rows on side 1 */
143 sz.side2 += ((value >> 4) & 0x0f); /* Add in rows on side 2 */
145 value = spd_read_byte(device, 4); /* columns */
146 if (value < 0) goto hw_err;
147 if ((value & 0xff) == 0) goto val_err;
148 sz.side2 -= (value & 0x0f); /* Subtract out columns on side 1 */
149 sz.side2 += ((value >> 4) & 0x0f); /* Add in columns on side 2 */
153 die("Bad SPD value\r\n");
154 /* If an hw_error occurs report that I have no memory */
163 static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
168 for(i = cum = 0; i < DIMM_SOCKETS; i++) {
170 if (dimm_mask & (1 << i)) {
171 sz = spd_get_dimm_size(ctrl->channel0[i]);
173 return -1; /* Report SPD error */
175 /* convert bits to multiples of 64MB */
177 cum += (1 << sz.side1);
179 pci_write_config8(ctrl->f0, DRB + (i*2), cum);
182 cum += (1 << sz.side2);
184 pci_write_config8(ctrl->f0, DRB+1 + (i*2), cum);
187 pci_write_config8(ctrl->f0, DRB + (i*2), cum);
188 pci_write_config8(ctrl->f0, DRB+1 + (i*2), cum);
192 /* set TOM top of memory 0xcc */
193 pci_write_config16(ctrl->f0, TOM, cum);
194 /* set TOLM top of low memory */
200 pci_write_config16(ctrl->f0, TOLM, cum);
205 static u32 spd_detect_dimms(const struct mem_controller *ctrl)
210 for(i = 0; i < DIMM_SOCKETS; i++) {
213 device = ctrl->channel0[i];
215 byte = spd_read_byte(device, 2); /* Type */
217 dimm_mask |= (1 << i);
220 device = ctrl->channel1[i];
222 byte = spd_read_byte(device, 2);
224 dimm_mask |= (1 << (i + DIMM_SOCKETS));
232 static int spd_set_row_attributes(const struct mem_controller *ctrl,
241 for(cnt=0; cnt < 4; cnt++) {
242 if (!(dimm_mask & (1 << cnt))) {
246 value = spd_read_byte(ctrl->channel0[cnt], 3); /* rows */
247 if (value < 0) goto hw_err;
248 if ((value & 0xf) == 0) goto val_err;
251 value = spd_read_byte(ctrl->channel0[cnt], 4); /* columns */
252 if (value < 0) goto hw_err;
253 if ((value & 0xf) == 0) goto val_err;
256 value = spd_read_byte(ctrl->channel0[cnt], 17); /* banks */
257 if (value < 0) goto hw_err;
258 if ((value & 0xff) == 0) goto val_err;
259 reg += log2(value & 0xff);
261 /* Get the device width and convert it to a power of two */
262 value = spd_read_byte(ctrl->channel0[cnt], 13);
263 if (value < 0) goto hw_err;
264 value = log2(value & 0xff);
266 if(reg < 27) goto hw_err;
270 dra += reg << (cnt*8);
271 value = spd_read_byte(ctrl->channel0[cnt], 5);
273 dra += reg << ((cnt*8)+4);
277 pci_write_config32(ctrl->f0, DRA, dra);
281 die("Bad SPD value\r\n");
282 /* If an hw_error occurs report that I have no memory */
291 static int spd_set_drt_attributes(const struct mem_controller *ctrl,
292 long dimm_mask, u32 drc)
303 static const u8 cycle_time[3] = { 0x75, 0x60, 0x50 };
304 static const u8 latency_indicies[] = { 26, 23, 9 };
307 drt = pci_read_config32(ctrl->f0, DRT);
308 drt &= 3; /* save bits 1:0 */
310 for(first_dimm = 0; first_dimm < 4; first_dimm++) {
311 if (dimm_mask & (1 << first_dimm))
315 drt |= (1<<6); /* back to back write turn around */
317 drt |= (3<<18); /* Trasmax */
319 for(cnt=0; cnt < 4; cnt++) {
320 if (!(dimm_mask & (1 << cnt))) {
323 reg = spd_read_byte(ctrl->channel0[cnt], 18); /* CAS Latency */
324 /* Compute the lowest cas latency supported */
325 latency = log2(reg) -2;
327 /* Loop through and find a fast clock with a low latency */
328 for(index = 0; index < 3; index++, latency++) {
329 if ((latency < 2) || (latency > 4) ||
330 (!(reg & (1 << latency)))) {
333 value = spd_read_byte(ctrl->channel0[cnt],
334 latency_indicies[index]);
336 if(value <= cycle_time[drc&3]) {
337 if( latency > cas_latency) {
338 cas_latency = latency;
344 index = (cas_latency-2);
345 if((index)==0) cas_latency = 20;
346 else if((index)==1) cas_latency = 25;
347 else cas_latency = 30;
349 for(cnt=0;cnt<4;cnt++) {
350 if (!(dimm_mask & (1 << cnt))) {
353 reg = spd_read_byte(ctrl->channel0[cnt], 27)&0x0ff;
354 if(((index>>8)&0x0ff)<reg) {
355 index &= ~(0x0ff << 8);
358 reg = spd_read_byte(ctrl->channel0[cnt], 28)&0x0ff;
359 if(((index>>16)&0x0ff)<reg) {
360 index &= ~(0x0ff << 16);
363 reg = spd_read_byte(ctrl->channel0[cnt], 29)&0x0ff;
364 if(((index2>>0)&0x0ff)<reg) {
365 index2 &= ~(0x0ff << 0);
368 reg = spd_read_byte(ctrl->channel0[cnt], 41)&0x0ff;
369 if(((index2>>8)&0x0ff)<reg) {
370 index2 &= ~(0x0ff << 8);
373 reg = spd_read_byte(ctrl->channel0[cnt], 42)&0x0ff;
374 if(((index2>>16)&0x0ff)<reg) {
375 index2 &= ~(0x0ff << 16);
381 value = cycle_time[drc&3];
382 if(value <= 0x50) { /* 200 MHz */
384 drt |= (2<<2); /* CAS latency 4 */
387 drt |= (1<<2); /* CAS latency 3 */
390 if((index&0x0ff00)<=0x03c00) {
391 drt |= (1<<8); /* Trp RAS Precharg */
393 drt |= (2<<8); /* Trp RAS Precharg */
396 /* Trcd RAS to CAS delay */
397 if((index2&0x0ff)<=0x03c) {
403 /* Tdal Write auto precharge recovery delay */
407 if((index2&0x0ff00)<=0x03700)
409 else if((index2&0xff00)<=0x03c00)
412 drt |= (2<<14); /* spd 41 */
414 drt |= (2<<16); /* Twr not defined for DDR docs say use 2 */
417 if((index&0x0ff0000)<=0x0140000) {
419 } else if((index&0x0ff0000)<=0x0280000) {
421 } else if((index&0x0ff0000)<=0x03c0000) {
427 /* Trfc Auto refresh cycle time */
428 if((index2&0x0ff0000)<=0x04b0000) {
430 } else if((index2&0x0ff0000)<=0x0690000) {
435 /* Docs say use 55 for all 200Mhz */
438 else if(value <= 0x60) { /* 167 Mhz */
439 /* according to new documentation CAS latency is 00
440 * for bits 3:2 for all 167 Mhz
441 drt |= ((index&3)<<2); */ /* set CAS latency */
442 if((index&0x0ff00)<=0x03000) {
443 drt |= (1<<8); /* Trp RAS Precharg */
445 drt |= (2<<8); /* Trp RAS Precharg */
448 /* Trcd RAS to CAS delay */
449 if((index2&0x0ff)<=0x030) {
455 /* Tdal Write auto precharge recovery delay */
459 drt |= (2<<14); /* spd 41, but only one choice */
461 drt |= (2<<16); /* Twr not defined for DDR docs say 2 */
464 if((index&0x0ff0000)<=0x0180000) {
466 } else if((index&0x0ff0000)<=0x0300000) {
472 /* Trfc Auto refresh cycle time */
473 if((index2&0x0ff0000)<=0x0480000) {
475 } else if((index2&0x0ff0000)<=0x0780000) {
480 /* Docs state to use 99 for all 167 Mhz */
483 else if(value <= 0x75) { /* 133 Mhz */
484 drt |= ((index&3)<<2); /* set CAS latency */
485 if((index&0x0ff00)<=0x03c00) {
486 drt |= (1<<8); /* Trp RAS Precharg */
488 drt |= (2<<8); /* Trp RAS Precharg */
491 /* Trcd RAS to CAS delay */
492 if((index2&0x0ff)<=0x03c) {
498 /* Tdal Write auto precharge recovery delay */
502 drt |= (2<<14); /* spd 41, but only one choice */
504 drt |= (1<<16); /* Twr not defined for DDR docs say 1 */
507 if((index&0x0ff0000)<=0x01e0000) {
509 } else if((index&0x0ff0000)<=0x03c0000) {
515 /* Trfc Auto refresh cycle time */
516 if((index2&0x0ff0000)<=0x04b0000) {
518 } else if((index2&0x0ff0000)<=0x0780000) {
524 /* Based on CAS latency */
532 die("Invalid SPD 9 bus speed.\r\n");
536 pci_write_config32(ctrl->f0, DRT, drt);
541 static int spd_set_dram_controller_mode(const struct mem_controller *ctrl,
549 static const u8 spd_rates[6] = {15,3,7,7,62,62};
550 static const u8 drc_rates[5] = {0,15,7,62,3};
551 static const u8 fsb_conversion[8] = {0,1,3,2,3,0,3,3};
554 drc = pci_read_config32(ctrl->f0, DRC);
555 for(cnt=0; cnt < 4; cnt++) {
556 if (!(dimm_mask & (1 << cnt))) {
559 value = spd_read_byte(ctrl->channel0[cnt], 11); /* ECC */
560 if (value != 2) die("ERROR - Non ECC memory dimm\r\n");
562 value = spd_read_byte(ctrl->channel0[cnt], 12); /*refresh rate*/
563 value &= 0x0f; /* clip self refresh bit */
564 if (value > 5) goto hw_err;
565 if (rate > spd_rates[value])
566 rate = spd_rates[value];
568 value = spd_read_byte(ctrl->channel0[cnt], 9); /* cycle time */
569 if (value > 0x75) goto hw_err;
571 drc |= (1 << 20); /* enable ECC */
572 for (cnt = 1; cnt < 5; cnt++)
573 if (drc_rates[cnt] == rate)
576 drc &= ~(7 << 8); /* clear the rate bits */
580 drc |= (1 << 26); /* set the overlap bit - the factory BIOS does */
581 drc |= (1 << 27); /* set DED retry enable - the factory BIOS does */
583 drc &= ~(1 << 5); /* enable ODT */
584 drc |= (1 << 4); /* independent clocks */
586 /* set front side bus speed */
587 msr = rdmsr(0xcd); /* returns 0 on Pentium M 90nm */
588 value = msr.lo & 0x07;
590 drc |= (fsb_conversion[value] << 2);
592 /* set dram type to ddr2 */
599 die("Bad SPD value\r\n");
600 /* If an hw_error occurs report that I have no memory */
607 static void sdram_set_spd_registers(const struct mem_controller *ctrl)
611 /* Test if we can read the spd and if ram is ddr or ddr2 */
612 dimm_mask = spd_detect_dimms(ctrl);
613 if (!(dimm_mask & ((1 << DIMM_SOCKETS) - 1))) {
614 print_err("No memory for this cpu\r\n");
620 static void do_delay(void)
628 #define TIMEOUT_LOOPS 300000
630 #define DCALCSR 0x100
631 #define DCALADDR 0x104
632 #define DCALDATA 0x108
634 static void set_on_dimm_termination_enable(const struct mem_controller *ctrl)
641 /* Set up northbridge values */
643 pci_write_config32(ctrl->f0, SDRC, 0x30000000);
644 /* Figure out which slots are Empty, Single, or Double sided */
645 for(i=0,t4=0,c2=0;i<8;i+=2) {
646 c1 = pci_read_config8(ctrl->f0, DRB+i);
647 if(c1 == c2) continue;
648 c2 = pci_read_config8(ctrl->f0, DRB+1+i);
656 if( ((t4>>8)&0x0f) == 0 ) {
657 data32 = 0x00000010; /* EEES */
660 if ( ((t4>>16)&0x0f) == 0 ) {
661 data32 = 0x00003132; /* EESS */
664 if ( ((t4>>24)&0x0f) == 0 ) {
665 data32 = 0x00335566; /* ESSS */
668 data32 = 0x77bbddee; /* SSSS */
672 if( ((t4>>8)&0x0f) == 0 ) {
673 data32 = 0x00003132; /* EEED */
676 if ( ((t4>>8)&0x0f) == 2 ) {
677 data32 = 0xb373ecdc; /* EEDD */
680 if ( ((t4>>16)&0x0f) == 0 ) {
681 data32 = 0x00b3a898; /* EESD */
684 data32 = 0x777becdc; /* ESSD */
687 die("Error - First dimm slot empty\r\n");
690 print_debug("ODT Value = ");
691 print_debug_hex32(data32);
694 pci_write_config32(ctrl->f0, DDR2ODTC, data32);
696 for(dimm=0;dimm<8;dimm+=2) {
698 write32(MCBAR+DCALADDR, 0x0b840001);
699 write32(MCBAR+DCALCSR, 0x81000003 | (dimm << 20));
701 for(i=0;i<1001;i++) {
702 data32 = read32(MCBAR+DCALCSR);
703 if(!(data32 & (1<<31)))
708 static void set_receive_enable(const struct mem_controller *ctrl)
728 for(dimm=0;dimm<8;dimm+=1) {
731 write32(MCBAR+DCALDATA+(17*4), 0x04020000);
732 write32(MCBAR+DCALCSR, 0x81800004 | (dimm << 20));
734 for(i=0;i<1001;i++) {
735 data32 = read32(MCBAR+DCALCSR);
736 if(!(data32 & (1<<31)))
742 dcal_data32_0 = read32(MCBAR+DCALDATA + 0);
743 dcal_data32_1 = read32(MCBAR+DCALDATA + 4);
744 dcal_data32_2 = read32(MCBAR+DCALDATA + 8);
745 dcal_data32_3 = read32(MCBAR+DCALDATA + 12);
748 dcal_data32_0 = read32(MCBAR+DCALDATA + 16);
749 dcal_data32_1 = read32(MCBAR+DCALDATA + 20);
750 dcal_data32_2 = read32(MCBAR+DCALDATA + 24);
751 dcal_data32_3 = read32(MCBAR+DCALDATA + 28);
754 /* check if bank is installed */
755 if((dcal_data32_0 == 0) && (dcal_data32_2 == 0))
757 /* Calculate the timing value */
760 for(i=0,edge=0,bit=63,cnt=31,data32r=0,
761 work32l=dcal_data32_1,work32h=dcal_data32_3;
764 if(work32l & (1<<cnt))
767 work32l = dcal_data32_0;
768 work32h = dcal_data32_2;
774 if(!(work32l & (1<<cnt)))
777 work32l = dcal_data32_0;
778 work32h = dcal_data32_2;
786 data32 = ((bit%8) << 1);
787 if(work32h & (1<<cnt))
812 work32l = dcal_data32_0;
813 work32h = dcal_data32_2;
819 if(!(work32l & (1<<cnt)))
822 if(work32l & (1<<cnt))
825 data32 = (((cnt-1)%8)<<1);
826 if(work32h & (1<<(cnt-1))) {
829 /* test for frame edge cross overs */
830 if((edge == 1) && (data32 > 12) &&
831 (((recen+16)-data32) < 3)) {
835 if((edge == 2) && (data32 < 4) &&
836 ((recen - data32) > 12)) {
840 if(((recen+3) >= data32) && ((recen-3) <= data32))
850 recen+=2; /* this is not in the spec, but matches
851 the factory output, and has less failure */
852 recen <<= (dimm/2) * 8;
861 /* Check for Eratta problem */
862 for(i=cnt=0;i<32;i+=8) {
863 if (((recena>>i)&0x0f)>7) {
867 if((recena>>i)&0x0f) {
873 cnt = (cnt&0x0f) - (cnt>>16);
876 if(((recena>>i)&0x0f)>7) {
877 recena &= ~(0x0f<<i);
884 if(((recena>>i)&0x0f)<8) {
885 recena &= ~(0x0f<<i);
891 for(i=cnt=0;i<32;i+=8) {
892 if (((recenb>>i)&0x0f)>7) {
896 if((recenb>>i)&0x0f) {
902 cnt = (cnt&0x0f) - (cnt>>16);
905 if(((recenb>>i)&0x0f)>7) {
906 recenb &= ~(0x0f<<i);
913 if(((recenb>>8)&0x0f)<8) {
914 recenb &= ~(0x0f<<i);
921 print_debug("Receive enable A = ");
922 print_debug_hex32(recena);
923 print_debug(", Receive enable B = ");
924 print_debug_hex32(recenb);
927 /* clear out the calibration area */
928 write32(MCBAR+DCALDATA+(16*4), 0x00000000);
929 write32(MCBAR+DCALDATA+(17*4), 0x00000000);
930 write32(MCBAR+DCALDATA+(18*4), 0x00000000);
931 write32(MCBAR+DCALDATA+(19*4), 0x00000000);
934 write32(MCBAR+DCALCSR, 0x0000000f);
936 write32(MCBAR+0x150, recena);
937 write32(MCBAR+0x154, recenb);
941 static void sdram_enable(int controllers, const struct mem_controller *ctrl)
957 static const struct {
961 {{ 0x00000010, 0x00000000, 0x00000002, 0x00000001}},
963 {{ 0x00000120, 0x00000000, 0x00000032, 0x00000010}},
965 {{ 0x00154320, 0x00000000, 0x00065432, 0x00010000}},
967 {{ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff}},
970 static const u32 dqs_data[] = {
971 0xffffffff, 0xffffffff, 0x000000ff,
972 0xffffffff, 0xffffffff, 0x000000ff,
973 0xffffffff, 0xffffffff, 0x000000ff,
974 0xffffffff, 0xffffffff, 0x000000ff,
975 0xffffffff, 0xffffffff, 0x000000ff,
976 0xffffffff, 0xffffffff, 0x000000ff,
977 0xffffffff, 0xffffffff, 0x000000ff,
978 0xffffffff, 0xffffffff, 0x000000ff};
980 mask = spd_detect_dimms(ctrl);
981 print_debug("Starting SDRAM Enable\r\n");
984 #ifdef DIMM_MAP_LOGICAL
985 pci_write_config32(ctrl->f0, DRM,
986 0x00410000 | DIMM_MAP_LOGICAL);
988 pci_write_config32(ctrl->f0, DRM, 0x00411248);
990 /* set dram type and Front Side Bus freq. */
991 drc = spd_set_dram_controller_mode(ctrl, mask);
993 die("Error calculating DRC\r\n");
995 data32 = drc & ~(3 << 20); /* clear ECC mode */
996 data32 = data32 & ~(7 << 8); /* clear refresh rates */
997 data32 = data32 | (1 << 5); /* temp turn off ODT */
998 /* Set gearing, then dram controller mode */
999 /* drc bits 3:2 = FSB speed */
1000 for(iptr = gearing[(drc>>2)&3].clkgr,cnt=0;cnt<4;cnt++) {
1001 pci_write_config32(ctrl->f0, 0xa0+(cnt*4), iptr[cnt]);
1004 pci_write_config32(ctrl->f0, DRC, data32);
1006 /* turn the clocks on */
1008 pci_write_config16(ctrl->f0, CKDIS, 0x0000);
1010 /* 0x9a DDRCSR Take subsystem out of idle */
1011 data16 = pci_read_config16(ctrl->f0, DDRCSR);
1012 data16 &= ~(7 << 12);
1013 data16 |= (1 << 12);
1014 pci_write_config16(ctrl->f0, DDRCSR, data16);
1016 /* program row size DRB */
1017 spd_set_ram_size(ctrl, mask);
1019 /* program page size DRA */
1020 spd_set_row_attributes(ctrl, mask);
1022 /* program DRT timing values */
1023 cas_latency = spd_set_drt_attributes(ctrl, mask, drc);
1025 for(i=0;i<8;i+=2) { /* loop through each dimm to test */
1026 print_debug("DIMM ");
1027 print_debug_hex8(i);
1028 print_debug("\r\n");
1032 write32(MCBAR+DCALCSR, (0x01000000 | (i<<20)));
1033 write32(MCBAR+DCALCSR, (0x81000000 | (i<<20)));
1035 data32 = read32(MCBAR+DCALCSR);
1036 while(data32 & 0x80000000)
1037 data32 = read32(MCBAR+DCALCSR);
1043 for(cs=0;cs<8;cs+=2) {
1044 write32(MCBAR + DCALCSR, (0x81000000 | (cs<<20)));
1045 data32 = read32(MCBAR+DCALCSR);
1046 while(data32 & 0x80000000)
1047 data32 = read32(MCBAR+DCALCSR);
1050 /* Precharg all banks */
1052 for(cs=0;cs<8;cs+=2) {
1053 write32(MCBAR+DCALADDR, 0x04000000);
1054 write32(MCBAR+DCALCSR, (0x81000002 | (cs<<20)));
1055 data32 = read32(MCBAR+DCALCSR);
1056 while(data32 & 0x80000000)
1057 data32 = read32(MCBAR+DCALCSR);
1060 /* EMRS dll's enabled */
1062 for(cs=0;cs<8;cs+=2) {
1063 /* fixme hard code AL additive latency */
1064 write32(MCBAR+DCALADDR, 0x0b940001);
1065 write32(MCBAR+DCALCSR, (0x81000003 | (cs<<20)));
1066 data32 = read32(MCBAR+DCALCSR);
1067 while(data32 & 0x80000000)
1068 data32 = read32(MCBAR+DCALCSR);
1070 /* MRS reset dll's */
1072 if(cas_latency == 30)
1073 mode_reg = 0x053a0000;
1075 mode_reg = 0x054a0000;
1076 for(cs=0;cs<8;cs+=2) {
1077 write32(MCBAR+DCALADDR, mode_reg);
1078 write32(MCBAR+DCALCSR, (0x81000003 | (cs<<20)));
1079 data32 = read32(MCBAR+DCALCSR);
1080 while(data32 & 0x80000000)
1081 data32 = read32(MCBAR+DCALCSR);
1084 /* Precharg all banks */
1088 for(cs=0;cs<8;cs+=2) {
1089 write32(MCBAR+DCALADDR, 0x04000000);
1090 write32(MCBAR+DCALCSR, (0x81000002 | (cs<<20)));
1091 data32 = read32(MCBAR+DCALCSR);
1092 while(data32 & 0x80000000)
1093 data32 = read32(MCBAR+DCALCSR);
1096 /* Do 2 refreshes */
1098 for(cs=0;cs<8;cs+=2) {
1099 write32(MCBAR+DCALCSR, (0x81000001 | (cs<<20)));
1100 data32 = read32(MCBAR+DCALCSR);
1101 while(data32 & 0x80000000)
1102 data32 = read32(MCBAR+DCALCSR);
1105 for(cs=0;cs<8;cs+=2) {
1106 write32(MCBAR+DCALCSR, (0x81000001 | (cs<<20)));
1107 data32 = read32(MCBAR+DCALCSR);
1108 while(data32 & 0x80000000)
1109 data32 = read32(MCBAR+DCALCSR);
1112 /* for good luck do 6 more */
1113 for(cs=0;cs<8;cs+=2) {
1114 write32(MCBAR+DCALCSR, (0x81000001 | (cs<<20)));
1117 for(cs=0;cs<8;cs+=2) {
1118 write32(MCBAR+DCALCSR, (0x81000001 | (cs<<20)));
1121 for(cs=0;cs<8;cs+=2) {
1122 write32(MCBAR+DCALCSR, (0x81000001 | (cs<<20)));
1125 for(cs=0;cs<8;cs+=2) {
1126 write32(MCBAR+DCALCSR, (0x81000001 | (cs<<20)));
1129 for(cs=0;cs<8;cs+=2) {
1130 write32(MCBAR+DCALCSR, (0x81000001 | (cs<<20)));
1133 for(cs=0;cs<8;cs+=2) {
1134 write32(MCBAR+DCALCSR, (0x81000001 | (cs<<20)));
1137 /* MRS reset dll's normal */
1139 for(cs=0;cs<8;cs+=2) {
1140 write32(MCBAR+DCALADDR, (mode_reg & ~(1<<24)));
1141 write32(MCBAR+DCALCSR, (0x81000003 | (cs<<20)));
1142 data32 = read32(MCBAR+DCALCSR);
1143 while(data32 & 0x80000000)
1144 data32 = read32(MCBAR+DCALCSR);
1147 /* Do only if DDR2 EMRS dll's enabled */
1149 for(cs=0;cs<8;cs+=2) {
1150 write32(MCBAR+DCALADDR, (0x0b940001));
1151 write32(MCBAR+DCALCSR, (0x81000003 | (cs<<20)));
1152 data32 = read32(MCBAR+DCALCSR);
1153 while(data32 & 0x80000000)
1154 data32 = read32(MCBAR+DCALCSR);
1159 write32(MCBAR+DCALCSR, 0x0000000f);
1161 /* enable on dimm termination */
1162 set_on_dimm_termination_enable(ctrl);
1164 /* receive enable calibration */
1165 set_receive_enable(ctrl);
1168 pci_write_config32(ctrl->f0, 0x94, 0x3904aa00);
1169 for(i = 0, cnt = (MCBAR+0x200); i < 24; i++, cnt+=4) {
1170 write32(cnt, dqs_data[i]);
1172 pci_write_config32(ctrl->f0, 0x94, 0x3900aa00);
1174 /* Enable refresh */
1176 data32 = drc & ~(3 << 20); /* clear ECC mode */
1177 pci_write_config32(ctrl->f0, DRC, data32);
1178 write32(MCBAR+DCALCSR, 0x0008000f);
1180 /* clear memory and init ECC */
1181 print_debug("Clearing memory\r\n");
1182 for(i=0;i<64;i+=4) {
1183 write32(MCBAR+DCALDATA+i, 0x00000000);
1186 for(cs=0;cs<8;cs+=2) {
1187 write32(MCBAR+DCALCSR, (0x810831d8 | (cs<<20)));
1188 data32 = read32(MCBAR+DCALCSR);
1189 while(data32 & 0x80000000)
1190 data32 = read32(MCBAR+DCALCSR);
1193 /* Bring memory subsystem on line */
1194 data32 = pci_read_config32(ctrl->f0, 0x98);
1195 data32 |= (1 << 31);
1196 pci_write_config32(ctrl->f0, 0x98, data32);
1197 /* wait for completion */
1198 print_debug("Waiting for mem complete\r\n");
1200 data32 = pci_read_config32(ctrl->f0, 0x98);
1201 if( (data32 & (1<<31)) == 0)
1204 print_debug("Done\r\n");
1206 /* Set initialization complete */
1209 data32 = drc & ~(3 << 20); /* clear ECC mode */
1210 pci_write_config32(ctrl->f0, DRC, data32);
1212 /* Set the ecc mode */
1213 pci_write_config32(ctrl->f0, DRC, drc);
1215 /* Enable memory scrubbing */
1217 data16 = pci_read_config16(ctrl->f0, MCHSCRB);
1219 data16 |= ((2 << 2) | (2 << 0));
1220 pci_write_config16(ctrl->f0, MCHSCRB, data16);
1222 /* The memory is now setup, use it */
1223 cache_lbmem(MTRR_TYPE_WRBACK);