1 #include <console/console.h>
4 #include <device/device.h>
5 #include <device/pci.h>
6 #include <device/pci_ids.h>
11 #include "northbridge.h"
12 #include <cpu/amd/gx2def.h>
13 #include <cpu/x86/msr.h>
14 #include <cpu/x86/cache.h>
15 #include <cpu/amd/vr.h>
17 #include "../../../southbridge/amd/cs5536/cs5536.h"
20 #define NORTHBRIDGE_FILE "northbridge.c"
22 /* todo: add a resource record. We don't do this here because this may be called when
23 * very little of the platform is actually working.
31 /* Get the RAM size from the memory controller as calculated and set by auto_size_dimm() */
32 msr = rdmsr(MC_CF07_DATA);
33 printk(BIOS_DEBUG, "sizeram: _MSR MC_CF07_DATA: %08x:%08x\n", msr.hi, msr.lo);
39 sizem = (1 << ((dimm >> 12)-1)) * 8;
45 sizem += (1 << ((dimm >> 12)-1)) * 8;
47 printk(BIOS_DEBUG, "sizeram: sizem 0x%x\n", sizem);
51 /* here is programming for the various MSRs. */
52 #define IM_QWAIT 0x100000
54 #define DMCF_WRITE_SERIALIZE_REQUEST (2<<12) /* 2 outstanding */ /* in high */
55 #define DMCF_SERIAL_LOAD_MISSES (2) /* enabled */
57 /* these are the 8-bit attributes for controlling RCONF registers */
58 #define CACHE_DISABLE (1<<0)
59 #define WRITE_ALLOCATE (1<<1)
60 #define WRITE_PROTECT (1<<2)
61 #define WRITE_THROUGH (1<<3)
62 #define WRITE_COMBINE (1<<4)
63 #define WRITE_SERIALIZE (1<<5)
65 /* ram has none of this stuff */
66 #define RAM_PROPERTIES (0)
67 #define DEVICE_PROPERTIES (WRITE_SERIALIZE|CACHE_DISABLE)
68 #define ROM_PROPERTIES (WRITE_SERIALIZE|WRITE_PROTECT|CACHE_DISABLE)
69 #define MSR_WS_CD_DEFAULT (0x21212121)
71 /* 1810-1817 give you 8 registers with which to program protection regions */
72 /* the are region configuration range registers, or RRCF */
73 /* in msr terms, the are a straight base, top address assign, since they are 4k aligned. */
74 /* so no left-shift needed for top or base */
75 #define RRCF_LOW(base,properties) (base|(1<<8)|properties)
76 #define RRCF_LOW_CD(base) RRCF_LOW(base, CACHE_DISABLE)
78 /* build initializer for P2D MSR */
79 #define P2D_BM(msr, pdid1, bizarro, pbase, pmask) {msr, {.hi=(pdid1<<29)|(bizarro<<28)|(pbase>>24), .lo=(pbase<<8)|pmask}}
80 #define P2D_BMO(msr, pdid1, bizarro, poffset, pbase, pmask) {msr, {.hi=(pdid1<<29)|(bizarro<<28)|(poffset<<8)|(pbase>>24), .lo=(pbase<<8)|pmask}}
81 #define P2D_R(msr, pdid1, bizarro, pmax, pmin) {msr, {.hi=(pdid1<<29)|(bizarro<<28)|(pmax>>12), .lo=(pmax<<20)|pmin}}
82 #define P2D_RO(msr, pdid1, bizarro, poffset, pmax, pmin) {msr, {.hi=(pdid1<<29)|(bizarro<<28)|(poffset<<8)|(pmax>>12), .lo=(pmax<<20)|pmin}}
83 #define P2D_SC(msr, pdid1, bizarro, wen, ren,pscbase) {msr, {.hi=(pdid1<<29)|(bizarro<<28)|(wen), .lo=(ren<<16)|(pscbase>>18)}}
84 #define IOD_BM(msr, pdid1, bizarro, ibase, imask) {msr, {.hi=(pdid1<<29)|(bizarro<<28)|(ibase>>12), .lo=(ibase<<20)|imask}}
85 #define IOD_SC(msr, pdid1, bizarro, en, wen, ren, ibase) {msr, {.hi=(pdid1<<29)|(bizarro<<28), .lo=(en<<24)|(wen<<21)|(ren<<20)|(ibase<<3)}}
92 {0x1700, {.hi = 0, .lo = IM_QWAIT}},
93 {0x1800, {.hi = DMCF_WRITE_SERIALIZE_REQUEST, .lo = DMCF_SERIAL_LOAD_MISSES}},
94 /* 1808 will be done down below, so we have to do 180a->1817 (well, 1813 really) */
95 /* for 180a, for now, we assume VSM will configure it */
96 /* 180b is left at reset value,a0000-bffff is non-cacheable */
97 /* 180c, c0000-dffff is set to write serialize and non-cachable */
98 /* oops, 180c will be set by cpu bug handling in cpubug.c */
99 //{0x180c, {.hi = MSR_WS_CD_DEFAULT, .lo = MSR_WS_CD_DEFAULT}},
100 /* 180d is left at default, e0000-fffff is non-cached */
102 /* we will assume 180e, the ssm region configuration, is left at default or set by VSM */
103 /* we will not set 0x180f, the DMM,yet */
104 //{0x1810, {.hi=0xee7ff000, .lo=RRCF_LOW(0xee000000, WRITE_COMBINE|CACHE_DISABLE)}},
105 //{0x1811, {.hi = 0xefffb000, .lo = RRCF_LOW_CD(0xefff8000)}},
106 //{0x1812, {.hi = 0xefff7000, .lo = RRCF_LOW_CD(0xefff4000)}},
107 //{0x1813, {.hi = 0xefff3000, .lo = RRCF_LOW_CD(0xefff0000)}},
108 /* now for GLPCI routing */
110 P2D_BM(0x10000020, 0x1, 0x0, 0x0, 0xfff80),
111 P2D_BM(0x10000021, 0x1, 0x0, 0x80000, 0xfffe0),
112 P2D_SC(0x1000002c, 0x1, 0x0, 0x0, 0xff03, 0xC0000),
114 P2D_BM(0x40000020, 0x1, 0x0, 0x0, 0xfff80),
115 P2D_BM(0x40000021, 0x1, 0x0, 0x80000, 0xfffe0),
116 P2D_SC(0x4000002d, 0x1, 0x0, 0x0, 0xff03, 0xC0000),
120 /* note that dev is NOT used -- yet */
121 static void irq_init_steering(struct device *dev, u16 irq_map)
123 /* Set up IRQ steering */
124 u32 pciAddr = 0x80000000 | (CHIPSET_DEV_NUM << 11) | 0x5C;
126 printk(BIOS_DEBUG, "%s(%p [%08X], %04X)\n", __func__, dev, pciAddr, irq_map);
128 /* The IRQ steering values (in hex) are effectively dcba, where:
129 * <a> represents the IRQ for INTA,
130 * <b> represents the IRQ for INTB,
131 * <c> represents the IRQ for INTC, and
132 * <d> represents the IRQ for INTD.
133 * Thus, a value of irq_map = 0xAA5B translates to:
134 * INTA = IRQB (IRQ 11)
135 * INTB = IRQ5 (IRQ 5)
136 * INTC = IRQA (IRQ 10)
137 * INTD = IRQA (IRQ 10)
139 outl(pciAddr & ~3, 0xCF8);
140 outl(irq_map, 0xCFC);
145 * Returns the amount of memory (in KB) available to the system. This is the
146 * total amount of memory less the amount of memory reserved for SMM use.
148 static int setup_gx2_cache(void)
151 unsigned long long val;
152 int sizekbytes, sizereg;
154 sizekbytes = sizeram() * 1024;
155 printk(BIOS_DEBUG, "setup_gx2_cache: enable for %d KB\n", sizekbytes);
156 /* build up the rconf word. */
157 /* the SYSTOP bits 27:8 are actually the top bits from 31:12. Book fails to say that */
159 val = ((unsigned long long) ROM_PROPERTIES) << 56;
160 /* make rom base useful for 1M roms */
161 /* Flash base address -- sized for 1M for now */
162 val |= ((unsigned long long) 0xfff00)<<36;
163 /* set the devrp properties */
164 val |= ((unsigned long long) DEVICE_PROPERTIES) << 28;
165 /* Take our TOM, RIGHT shift 12, since it page-aligned, then LEFT-shift 8 for reg. */
166 /* yank off memory for the SMM handler */
167 sizekbytes -= SMM_SIZE;
168 sizereg = sizekbytes;
169 sizereg *= 1024; /* convert to bytes */
173 val |= RAM_PROPERTIES;
175 msr.hi = (val >> 32);
176 printk(BIOS_DEBUG, "msr 0x%08X will be set to %08x:%08x\n", CPU_RCONF_DEFAULT, msr.hi, msr.lo);
177 wrmsr(CPU_RCONF_DEFAULT, msr);
184 /* we have to do this here. We have not found a nicer way to do it */
185 static void setup_gx2(void)
187 unsigned long tmp, tmp2;
189 unsigned long size_kb, membytes;
191 size_kb = setup_gx2_cache();
193 membytes = size_kb * 1024;
194 /* NOTE! setup_gx2_cache returns the SIZE OF RAM - RAMADJUST!
195 * so it is safe to use. You should NOT at this point call
196 * sizeram() directly.
199 /* we need to set 0x10000028 and 0x40000029 */
201 /* These two descriptors cover the range from 1 MB (0x100000) to
202 * SYSTOP (a.k.a. TOM, or Top of Memory)
206 /* This has already been done elsewhere */
207 printk(BIOS_DEBUG, "size_kb 0x%x, membytes 0x%x\n", size_kb, membytes);
208 msr.hi = 0x20000000 | membytes>>24;
209 msr.lo = 0x100 | ( ((membytes >>12) & 0xfff) << 20);
210 wrmsr(0x10000028, msr);
211 msr.hi = 0x20000000 | membytes>>24;
212 msr.lo = 0x100 | ( ((membytes >>12) & 0xfff) << 20);
213 wrmsr(0x40000029, msr);
216 msr = rdmsr(0x10000028);
217 printk(BIOS_DEBUG, "MSR 0x%x is now 0x%x:0x%x\n", 0x10000028, msr.hi,msr.lo);
218 msr = rdmsr(0x40000029);
219 printk(BIOS_DEBUG, "MSR 0x%x is now 0x%x:0x%x\n", 0x40000029, msr.hi,msr.lo);
222 /* fixme: SMM MSR 0x10000026 and 0x400000023 */
223 /* calculate the OFFSET field */
224 tmp = membytes - SMM_OFFSET;
228 tmp |= (SMM_OFFSET >> 24);
230 /* calculate the PBASE and PMASK fields */
231 tmp2 = (SMM_OFFSET << 8) & 0xFFF00000; /* shift right 12 then left 20 == left 8 */
232 tmp2 |= (((~(SMM_SIZE * 1024) + 1) >> 12) & 0xfffff);
233 printk(BIOS_DEBUG, "MSR 0x%x is now 0x%lx:0x%lx\n", 0x10000026, tmp, tmp2);
236 wrmsr(0x10000026, msr);
242 wrmsr(0x10000026, msr);
243 msr = rdmsr(0x10000026);
244 printk(BIOS_DEBUG, "MSR 0x%x is now 0x%x:0x%x\n", 0x10000026, msr.hi, msr.lo);
251 printk(BIOS_DEBUG, "MSR 0x%x is now 0x%x:0x%x\n", 0x1808, msr.hi, msr.lo);
253 #if 0 /* SDG - don't do this */
254 /* now do the default MSR values */
255 for(i = 0; msr_defaults[i].msr_no; i++) {
257 wrmsr(msr_defaults[i].msr_no, msr_defaults[i].msr); /* MSR - see table above */
258 msr = rdmsr(msr_defaults[i].msr_no);
259 printk(BIOS_DEBUG, "MSR 0x%08X is now 0x%08X:0x%08X\n", msr_defaults[i].msr_no, msr.hi,msr.lo);
264 static void enable_shadow(device_t dev)
269 static void northbridge_init(device_t dev)
273 struct northbridge_amd_gx2_config *nb = (struct northbridge_amd_gx2_config *)dev->chip_info;
274 printk(BIOS_DEBUG, "northbridge: %s()\n", __func__);
277 irq_init_steering(dev, nb->irqmap);
279 /* HACK HACK HACK HACK */
280 /* 0x1000 is where GPIO is being assigned */
287 /* due to vsa interactions, we need not not touch the nb settings ... */
288 /* this is a test -- we are not sure it will work -- but it ought to */
289 static void set_resources(struct device *dev)
292 struct resource *res;
294 for(res = &dev->resource_list; res; res = res->next) {
295 pci_set_resource(dev, resource);
300 for(bus = dev->link_list; bus; bus = bus->next) {
302 assign_resources(bus);
307 /* set a default latency timer */
308 pci_write_config8(dev, PCI_LATENCY_TIMER, 0x40);
310 /* set a default secondary latency timer */
311 if ((dev->hdr_type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) {
312 pci_write_config8(dev, PCI_SEC_LATENCY_TIMER, 0x40);
315 /* zero the irq settings */
316 u8 line = pci_read_config8(dev, PCI_INTERRUPT_PIN);
318 pci_write_config8(dev, PCI_INTERRUPT_LINE, 0);
320 /* set the cache line size, so far 64 bytes is good for everyone */
321 pci_write_config8(dev, PCI_CACHE_LINE_SIZE, 64 >> 2);
325 static struct device_operations northbridge_operations = {
326 .read_resources = pci_dev_read_resources,
328 .set_resources = pci_dev_set_resources,
330 .set_resources = set_resources,
331 .enable_resources = pci_dev_enable_resources,
332 .init = northbridge_init,
337 static const struct pci_driver northbridge_driver __pci_driver = {
338 .ops = &northbridge_operations,
339 .vendor = PCI_VENDOR_ID_NS,
340 .device = PCI_DEVICE_ID_NS_GX2,
343 /* FIXME handle UMA correctly. */
344 #define FRAMEBUFFERK 4096
346 static void pci_domain_set_resources(device_t dev)
352 pci_tolm = find_pci_tolm(dev->link_list);
353 mc_dev = dev->link_list->children;
355 unsigned int tomk, tolmk;
356 unsigned int ramreg = 0;
358 unsigned int *bcdramtop = (unsigned int *)(GX_BASE + BC_DRAM_TOP);
359 unsigned int *mcgbaseadd = (unsigned int *)(GX_BASE + MC_GBASE_ADD);
361 for(i=0; i<0x20; i+= 0x10) {
362 unsigned int *mcreg = (unsigned int *)(GX_BASE + MC_BANK_CFG);
363 unsigned int mem_config = *mcreg;
365 if (((mem_config & (DIMM_PG_SZ << i)) >> (4 + i)) == 7)
367 ramreg += 1 << (((mem_config & (DIMM_SZ << i)) >> (i + 8)) + 2);
372 /* Sort out the framebuffer size */
373 tomk -= FRAMEBUFFERK;
374 *bcdramtop = ((tomk << 10) - 1);
375 *mcgbaseadd = (tomk >> 9);
377 printk(BIOS_DEBUG, "BC_DRAM_TOP = 0x%08x\n", *bcdramtop);
378 printk(BIOS_DEBUG, "MC_GBASE_ADD = 0x%08x\n", *mcgbaseadd);
380 printk(BIOS_DEBUG, "I would set ram size to %d Mbytes\n", (tomk >> 10));
382 /* Compute the top of Low memory */
383 tolmk = pci_tolm >> 10;
385 /* The PCI hole does does not overlap the memory.
389 /* Report the memory regions */
391 ram_resource(dev, idx++, 0, tolmk);
394 assign_resources(dev->link_list);
397 static struct device_operations pci_domain_ops = {
398 .read_resources = pci_domain_read_resources,
399 .set_resources = pci_domain_set_resources,
400 .enable_resources = NULL,
402 .scan_bus = pci_domain_scan_bus,
405 static void cpu_bus_init(device_t dev)
407 initialize_cpus(dev->link_list);
410 static void cpu_bus_noop(device_t dev)
414 static struct device_operations cpu_bus_ops = {
415 .read_resources = cpu_bus_noop,
416 .set_resources = cpu_bus_noop,
417 .enable_resources = cpu_bus_noop,
418 .init = cpu_bus_init,
422 void chipsetInit (void);
424 #if CONFIG_WRITE_HIGH_TABLES==1
428 static void enable_dev(struct device *dev)
430 printk(BIOS_DEBUG, "gx2 north: enable_dev\n");
431 void do_vsmbios(void);
433 /* Set the operations if it is a special bus type */
434 if (dev->path.type == DEVICE_PATH_PCI_DOMAIN) {
436 printk(BIOS_DEBUG, "DEVICE_PATH_PCI_DOMAIN\n");
437 /* cpubug MUST be called before setup_gx2(), so we force the issue here */
444 dev->ops = &pci_domain_ops;
446 tomk = ((sizeram() - VIDEO_MB) * 1024) - SMM_SIZE;
447 #if CONFIG_WRITE_HIGH_TABLES==1
448 /* Leave some space for ACPI, PIRQ and MP tables */
449 high_tables_base = (tomk * 1024) - HIGH_MEMORY_SIZE;
450 high_tables_size = HIGH_MEMORY_SIZE;
452 ram_resource(dev, 0, 0, tomk);
453 } else if (dev->path.type == DEVICE_PATH_APIC_CLUSTER) {
454 printk(BIOS_DEBUG, "DEVICE_PATH_APIC_CLUSTER\n");
455 dev->ops = &cpu_bus_ops;
457 printk(BIOS_DEBUG, "gx2 north: end enable_dev\n");
460 struct chip_operations northbridge_amd_gx2_ops = {
461 CHIP_NAME("AMD GX (previously GX2) Northbridge")
462 .enable_dev = enable_dev,