2 * This file is part of the coreboot project.
4 * Copyright (C) 2011 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <console/console.h>
23 #include <device/device.h>
24 #include <device/pci.h>
25 #include <device/pci_ids.h>
26 #include <device/hypertransport.h>
31 #include <cpu/x86/lapic.h>
34 #if CONFIG_LOGICAL_CPUS==1
35 #include <pc80/mc146818rtc.h>
38 #include <cpu/amd/amdfam10_sysconf.h>
42 #include "root_complex/chip.h"
43 #include "northbridge.h"
47 extern uint32_t agesawrapper_amdinitmid(void);
49 typedef struct amdfam10_sysconf_t sys_info_conf_t;
50 typedef struct dram_base_mask {
51 u32 base; //[47:27] at [28:8]
52 u32 mask; //[47:27] at [28:8] and enable at bit 0
56 struct amdfam10_sysconf_t sysconf;
57 static device_t __f0_dev[NODE_NUMS];
58 static device_t __f1_dev[NODE_NUMS];
59 static device_t __f2_dev[NODE_NUMS];
60 static device_t __f4_dev[NODE_NUMS];
61 static unsigned fx_devs = 0;
63 #if (defined CONFIG_EXT_CONF_SUPPORT) && CONFIG_EXT_CONF_SUPPORT == 1
64 #error CONFIG_EXT_CONF_SUPPORT == 1 not support anymore!
67 static dram_base_mask_t get_dram_base_mask(u32 nodeid)
73 #if CONFIG_EXT_CONF_SUPPORT == 1
74 /* I will use ext space only for simple */
75 pci_write_config32(dev, 0x110, nodeid | (1<<28)); // [47:27] at [28:8]
76 d.mask = pci_read_config32(dev, 0x114); // enable is bit 0
77 pci_write_config32(dev, 0x110, nodeid | (0<<28));
78 d.base = pci_read_config32(dev, 0x114) & 0x1fffff00; //[47:27] at [28:8];
81 temp = pci_read_config32(dev, 0x44 + (nodeid << 3)); //[39:24] at [31:16]
82 d.mask = ((temp & 0xfff80000)>>(8+3)); // mask out DramMask [26:24] too
83 temp = pci_read_config32(dev, 0x144 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
86 temp = pci_read_config32(dev, 0x40 + (nodeid << 3)); //[39:24] at [31:16]
87 d.mask |= (temp & 1); // enable bit
89 d.base = ((temp & 0xfff80000)>>(8+3)); // mask out DramBase [26:24) too
90 temp = pci_read_config32(dev, 0x140 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
96 #if CONFIG_EXT_CONF_SUPPORT
97 static void set_addr_map_reg_4_6_in_one_node(u32 nodeid, u32 cfg_map_dest,
98 u32 busn_min, u32 busn_max,
104 u32 index_min, index_max;
105 u32 dest_min, dest_max;
106 index_min = busn_min>>2; dest_min = busn_min - (index_min<<2);
107 index_max = busn_max>>2; dest_max = busn_max - (index_max<<2);
109 // three case: index_min==index_max, index_min+1=index_max; index_min+1<index_max
110 dev = __f1_dev[nodeid];
111 if (index_min== index_max) {
112 pci_write_config32(dev, 0x110, index_min | (type<<28));
113 tempreg = pci_read_config32(dev, 0x114);
114 for (i=dest_min; i<=dest_max; i++) {
115 tempreg &= ~(0xff<<(i*8));
116 tempreg |= (cfg_map_dest<<(i*8));
118 pci_write_config32(dev, 0x110, index_min | (type<<28)); // do i need to write it again
119 pci_write_config32(dev, 0x114, tempreg);
120 } else if (index_min<index_max) {
121 pci_write_config32(dev, 0x110, index_min | (type<<28));
122 tempreg = pci_read_config32(dev, 0x114);
123 for (i=dest_min; i<=3; i++) {
124 tempreg &= ~(0xff<<(i*8));
125 tempreg |= (cfg_map_dest<<(i*8));
127 pci_write_config32(dev, 0x110, index_min | (type<<28)); // do i need to write it again
128 pci_write_config32(dev, 0x114, tempreg);
130 pci_write_config32(dev, 0x110, index_max | (type<<28));
131 tempreg = pci_read_config32(dev, 0x114);
132 for (i=0; i<=dest_max; i++) {
133 tempreg &= ~(0xff<<(i*8));
134 tempreg |= (cfg_map_dest<<(i*8));
136 pci_write_config32(dev, 0x110, index_max | (type<<28)); // do i need to write it again
137 pci_write_config32(dev, 0x114, tempreg);
138 if ((index_max-index_min)>1) {
140 for (i=0; i<=3; i++) {
141 tempreg &= ~(0xff<<(i*8));
142 tempreg |= (cfg_map_dest<<(i*8));
144 for (i=index_min+1; i<index_max;i++) {
145 pci_write_config32(dev, 0x110, i | (type<<28));
146 pci_write_config32(dev, 0x114, tempreg);
153 #if CONFIG_PCI_BUS_SEGN_BITS
154 static u32 check_segn(device_t dev, u32 segbusn, u32 nodes,
155 sys_info_conf_t *sysinfo)
157 //check segbusn here, We need every node have the same segn
158 if ((segbusn & 0xff)>(0xe0-1)) {// use next segn
159 u32 segn = (segbusn >> 8) & 0x0f;
165 val = pci_read_config32(dev, 0x160);
167 val |= (segbusn & 0xf00)<<(25-8);
168 pci_write_config32(dev, 0x160, val);
175 static u32 get_io_addr_index(u32 nodeid, u32 linkn)
179 for (index=0; index<256; index++) {
180 if ((sysconf.conf_io_addrx[index+4] == 0)) {
181 sysconf.conf_io_addr[index+4] = (nodeid & 0x3f) ;
182 sysconf.conf_io_addrx[index+4] = 1 | ((linkn & 0x7)<<4);
191 static u32 get_mmio_addr_index(u32 nodeid, u32 linkn)
195 for (index=0; index<64; index++) {
196 if ((sysconf.conf_mmio_addrx[index+8] == 0)) {
197 sysconf.conf_mmio_addr[index+8] = (nodeid & 0x3f) ;
198 sysconf.conf_mmio_addrx[index+8] = 1 | ((linkn & 0x7)<<4);
206 static void store_conf_io_addr(u32 nodeid, u32 linkn, u32 reg, u32 index,
207 u32 io_min, u32 io_max)
210 #if CONFIG_EXT_CONF_SUPPORT
213 /* io range allocation */
214 index = (reg-0xc0)>>3;
215 #if CONFIG_EXT_CONF_SUPPORT
221 val = (nodeid & 0x3f); // 6 bits used
222 sysconf.conf_io_addr[index] = val | ((io_max<<8) & 0xfffff000); //limit : with nodeid
223 val = 3 | ((linkn & 0x7)<<4) ; // 8 bits used
224 sysconf.conf_io_addrx[index] = val | ((io_min<<8) & 0xfffff000); // base : with enable bit
226 if (sysconf.io_addr_num<(index+1))
227 sysconf.io_addr_num = index+1;
230 static void store_conf_mmio_addr(u32 nodeid, u32 linkn, u32 reg, u32 index,
231 u32 mmio_min, u32 mmio_max)
234 #if CONFIG_EXT_CONF_SUPPORT
237 /* io range allocation */
238 index = (reg-0x80)>>3;
239 #if CONFIG_EXT_CONF_SUPPORT
245 val = (nodeid & 0x3f) ; // 6 bits used
246 sysconf.conf_mmio_addr[index] = val | (mmio_max & 0xffffff00); //limit : with nodeid and linkn
247 val = 3 | ((linkn & 0x7)<<4) ; // 8 bits used
248 sysconf.conf_mmio_addrx[index] = val | (mmio_min & 0xffffff00); // base : with enable bit
250 if (sysconf.mmio_addr_num<(index+1))
251 sysconf.mmio_addr_num = index+1;
254 static void set_io_addr_reg(device_t dev, u32 nodeid, u32 linkn, u32 reg,
255 u32 io_min, u32 io_max)
260 #if CONFIG_EXT_CONF_SUPPORT
263 /* io range allocation */
264 tempreg = (nodeid&0xf) | ((nodeid & 0x30)<<(8-4)) | (linkn<<4) | ((io_max&0xf0)<<(12-4)); //limit
265 for (i=0; i<sysconf.nodes; i++)
266 pci_write_config32(__f1_dev[i], reg+4, tempreg);
268 tempreg = 3 /*| ( 3<<4)*/ | ((io_min&0xf0)<<(12-4)); //base :ISA and VGA ?
270 // FIXME: can we use VGA reg instead?
271 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
272 printk(BIOS_SPEW, "%s, enabling legacy VGA IO forwarding for %s link %s\n",
273 __func__, dev_path(dev), link);
274 tempreg |= PCI_IO_BASE_VGA_EN;
276 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_NO_ISA) {
277 tempreg |= PCI_IO_BASE_NO_ISA;
280 for (i=0; i<sysconf.nodes; i++)
281 pci_write_config32(__f1_dev[i], reg, tempreg);
282 #if CONFIG_EXT_CONF_SUPPORT
288 // if ht_c_index > 3, We should use extend space
289 if (io_min>io_max) return;
290 // for nodeid at first
291 cfg_map_dest = (1<<7) | (1<<6) | (linkn<<0);
293 set_addr_map_reg_4_6_in_one_node(nodeid, cfg_map_dest, io_min, io_max, 4);
296 cfg_map_dest = (1<<7) | (0<<6) | (nodeid<<0);
297 for (j = 0; j< sysconf.nodes; j++) {
298 if (j== nodeid) continue;
299 set_addr_map_reg_4_6_in_one_node(j,cfg_map_dest, io_min, io_max, 4);
304 static void set_mmio_addr_reg(u32 nodeid, u32 linkn, u32 reg, u32 index, u32 mmio_min, u32 mmio_max, u32 nodes)
309 #if CONFIG_EXT_CONF_SUPPORT
312 /* io range allocation */
313 tempreg = (nodeid&0xf) | (linkn<<4) | (mmio_max&0xffffff00); //limit
314 for (i=0; i<nodes; i++)
315 pci_write_config32(__f1_dev[i], reg+4, tempreg);
316 tempreg = 3 | (nodeid & 0x30) | (mmio_min&0xffffff00);
317 for (i=0; i<sysconf.nodes; i++)
318 pci_write_config32(__f1_dev[i], reg, tempreg);
319 #if CONFIG_EXT_CONF_SUPPORT
325 // if ht_c_index > 3, We should use extend space
326 // for nodeid at first
329 if (mmio_min>mmio_max) {
334 dev = __f1_dev[nodeid];
335 tempreg = ((mmio_min>>3) & 0x1fffff00)| (1<<6) | (linkn<<0);
336 pci_write_config32(dev, 0x110, index | (2<<28));
337 pci_write_config32(dev, 0x114, tempreg);
339 tempreg = ((mmio_max>>3) & 0x1fffff00) | enable;
340 pci_write_config32(dev, 0x110, index | (3<<28));
341 pci_write_config32(dev, 0x114, tempreg);
344 tempreg = ((mmio_min>>3) & 0x1fffff00) | (0<<6) | (nodeid<<0);
345 for (j = 0; j< sysconf.nodes; j++) {
346 if (j== nodeid) continue;
348 pci_write_config32(dev, 0x110, index | (2<<28));
349 pci_write_config32(dev, 0x114, tempreg);
352 tempreg = ((mmio_max>>3) & 0x1fffff00) | enable;
353 for (j = 0; j< sysconf.nodes; j++) {
354 if(j==nodeid) continue;
356 pci_write_config32(dev, 0x110, index | (3<<28));
357 pci_write_config32(dev, 0x114, tempreg);
362 static device_t get_node_pci(u32 nodeid, u32 fn)
366 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
368 return dev_find_slot(CONFIG_CBB-1, PCI_DEVFN(CONFIG_CDB + nodeid - 32, fn));
372 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
376 static unsigned int read_nb_cfg_54(void)
379 msr = rdmsr(NB_CFG_MSR);
380 return (( msr.hi >> (54-32)) & 1);
383 static void get_fx_devs(void)
386 for (i = 0; i < NODE_NUMS; i++) {
387 __f0_dev[i] = get_node_pci(i, 0);
388 __f1_dev[i] = get_node_pci(i, 1);
389 __f2_dev[i] = get_node_pci(i, 2);
390 __f4_dev[i] = get_node_pci(i, 4);
391 if (__f0_dev[i] != NULL && __f1_dev[i] != NULL)
394 if (__f1_dev[0] == NULL || __f0_dev[0] == NULL || fx_devs == 0) {
395 die("Cannot find 0:0x18.[0|1]\n");
399 static u32 f1_read_config32(unsigned reg)
403 return pci_read_config32(__f1_dev[0], reg);
406 static void f1_write_config32(unsigned reg, u32 value)
411 for(i = 0; i < fx_devs; i++) {
414 if (dev && dev->enabled) {
415 pci_write_config32(dev, reg, value);
420 static u32 amdfam10_nodeid(device_t dev)
424 busn = dev->bus->secondary;
425 if (busn != CONFIG_CBB) {
426 return (dev->path.pci.devfn >> 3) - CONFIG_CDB + 32;
428 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
432 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
436 static void set_vga_enable_reg(u32 nodeid, u32 linkn)
440 val = 1 | (nodeid<<4) | (linkn<<12);
441 /* it will routing (1)mmio 0xa0000:0xbffff (2) io 0x3b0:0x3bb,
443 f1_write_config32(0xf4, val);
447 static int reg_useable(unsigned reg, device_t goal_dev, unsigned goal_nodeid,
450 struct resource *res;
451 unsigned nodeid, link = 0;
454 for (nodeid = 0; !res && (nodeid < fx_devs); nodeid++) {
456 dev = __f0_dev[nodeid];
459 for (link = 0; !res && (link < 8); link++) {
460 res = probe_resource(dev, IOINDEX(0x1000 + reg, link));
466 if ( (goal_link == (link - 1)) &&
467 (goal_nodeid == (nodeid - 1)) &&
475 static struct resource *amdfam10_find_iopair(device_t dev, unsigned nodeid, unsigned link)
477 struct resource *resource;
482 for (reg = 0xc0; reg <= 0xd8; reg += 0x8) {
484 result = reg_useable(reg, dev, nodeid, link);
486 /* I have been allocated this one */
489 else if (result > 1) {
490 /* I have a free register pair */
495 reg = free_reg; // if no free, the free_reg still be 0
500 //because of Extend conf space, we will never run out of reg, but we need one index to differ them. so same node and same link can have multi range
501 u32 index = get_io_addr_index(nodeid, link);
502 reg = 0x110+ (index<<24) + (4<<20); // index could be 0, 255
505 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
510 static struct resource *amdfam10_find_mempair(device_t dev, u32 nodeid, u32 link)
512 struct resource *resource;
517 for (reg = 0x80; reg <= 0xb8; reg += 0x8) {
519 result = reg_useable(reg, dev, nodeid, link);
521 /* I have been allocated this one */
524 else if (result > 1) {
525 /* I have a free register pair */
535 //because of Extend conf space, we will never run out of reg,
536 // but we need one index to differ them. so same node and
537 // same link can have multi range
538 u32 index = get_mmio_addr_index(nodeid, link);
539 reg = 0x110+ (index<<24) + (6<<20); // index could be 0, 63
542 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
546 static void amdfam10_link_read_bases(device_t dev, u32 nodeid, u32 link)
548 struct resource *resource;
550 /* Initialize the io space constraints on the current bus */
551 resource = amdfam10_find_iopair(dev, nodeid, link);
554 #if CONFIG_EXT_CONF_SUPPORT == 1
555 if((resource->index & 0x1fff) == 0x1110) { // ext
560 align = log2(HT_IO_HOST_ALIGN);
563 resource->align = align;
564 resource->gran = align;
565 resource->limit = 0xffffUL;
566 resource->flags = IORESOURCE_IO | IORESOURCE_BRIDGE;
569 /* Initialize the prefetchable memory constraints on the current bus */
570 resource = amdfam10_find_mempair(dev, nodeid, link);
574 resource->align = log2(HT_MEM_HOST_ALIGN);
575 resource->gran = log2(HT_MEM_HOST_ALIGN);
576 resource->limit = 0xffffffffffULL;
577 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
578 resource->flags |= IORESOURCE_BRIDGE;
580 #if CONFIG_EXT_CONF_SUPPORT == 1
581 if ((resource->index & 0x1fff) == 0x1110) { // ext
582 normalize_resource(resource);
588 /* Initialize the memory constraints on the current bus */
589 resource = amdfam10_find_mempair(dev, nodeid, link);
593 resource->align = log2(HT_MEM_HOST_ALIGN);
594 resource->gran = log2(HT_MEM_HOST_ALIGN);
595 resource->limit = 0xffffffffffULL;
596 resource->flags = IORESOURCE_MEM | IORESOURCE_BRIDGE;
597 #if CONFIG_EXT_CONF_SUPPORT == 1
598 if ((resource->index & 0x1fff) == 0x1110) { // ext
599 normalize_resource(resource);
605 static void amdfam10_read_resources(device_t dev)
609 nodeid = amdfam10_nodeid(dev);
610 for (link = dev->link_list; link; link = link->next) {
611 if (link->children) {
612 amdfam10_link_read_bases(dev, nodeid, link->link_num);
617 static void amdfam10_set_resource(device_t dev, struct resource *resource,
620 resource_t rbase, rend;
621 unsigned reg, link_num;
624 /* Make certain the resource has actually been set */
625 if (!(resource->flags & IORESOURCE_ASSIGNED)) {
629 /* If I have already stored this resource don't worry about it */
630 if (resource->flags & IORESOURCE_STORED) {
634 /* Only handle PCI memory and IO resources */
635 if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
638 /* Ensure I am actually looking at a resource of function 1 */
639 if ((resource->index & 0xffff) < 0x1000) {
642 /* Get the base address */
643 rbase = resource->base;
645 /* Get the limit (rounded up) */
646 rend = resource_end(resource);
648 /* Get the register and link */
649 reg = resource->index & 0xfff; // 4k
650 link_num = IOINDEX_LINK(resource->index);
652 if (resource->flags & IORESOURCE_IO) {
654 set_io_addr_reg(dev, nodeid, link_num, reg, rbase>>8, rend>>8);
655 store_conf_io_addr(nodeid, link_num, reg, (resource->index >> 24), rbase>>8, rend>>8);
657 else if (resource->flags & IORESOURCE_MEM) {
658 set_mmio_addr_reg(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8, sysconf.nodes) ;// [39:8]
659 store_conf_mmio_addr(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8);
661 resource->flags |= IORESOURCE_STORED;
662 sprintf(buf, " <node %x link %x>",
664 report_resource_stored(dev, resource, buf);
668 * I tried to reuse the resource allocation code in amdfam10_set_resource()
669 * but it is too difficult to deal with the resource allocation magic.
672 static void amdfam10_create_vga_resource(device_t dev, unsigned nodeid)
676 /* find out which link the VGA card is connected,
677 * we only deal with the 'first' vga card */
678 for (link = dev->link_list; link; link = link->next) {
679 if (link->bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
680 #if CONFIG_MULTIPLE_VGA_ADAPTERS == 1
681 extern device_t vga_pri; // the primary vga device, defined in device.c
682 printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary,
683 link->secondary,link->subordinate);
684 /* We need to make sure the vga_pri is under the link */
685 if((vga_pri->bus->secondary >= link->secondary ) &&
686 (vga_pri->bus->secondary <= link->subordinate )
693 /* no VGA card installed */
697 printk(BIOS_DEBUG, "VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, link->link_num);
698 set_vga_enable_reg(nodeid, link->link_num);
701 static void amdfam10_set_resources(device_t dev)
705 struct resource *res;
707 /* Find the nodeid */
708 nodeid = amdfam10_nodeid(dev);
710 amdfam10_create_vga_resource(dev, nodeid);
712 /* Set each resource we have found */
713 for (res = dev->resource_list; res; res = res->next) {
714 amdfam10_set_resource(dev, res, nodeid);
717 for (bus = dev->link_list; bus; bus = bus->next) {
719 assign_resources(bus);
724 static void mcf0_control_init(struct device *dev)
728 static unsigned amdfam10_scan_chains(device_t dev, unsigned max)
732 unsigned sblink = sysconf.sblk;
733 device_t io_hub = NULL;
734 u32 next_unitid = 0xff;
736 nodeid = amdfam10_nodeid(dev);
738 for (link = dev->link_list; link; link = link->next) {
739 if (link->link_num == sblink) { /* devicetree put IO Hub on link_lsit[3] */
740 io_hub = link->children;
741 if (!io_hub || !io_hub->enabled) {
742 die("I can't find the IO Hub, or IO Hub not enabled, please check the device tree.\n");
744 /* Now that nothing is overlapping it is safe to scan the children. */
745 max = pci_scan_bus(link, 0x00, ((next_unitid - 1) << 3) | 7, 0);
753 static struct device_operations northbridge_operations = {
754 .read_resources = amdfam10_read_resources,
755 .set_resources = amdfam10_set_resources,
756 .enable_resources = pci_dev_enable_resources,
757 .init = mcf0_control_init,
758 .scan_bus = amdfam10_scan_chains,
763 static const struct pci_driver mcf0_driver __pci_driver = {
764 .ops = &northbridge_operations,
765 .vendor = PCI_VENDOR_ID_AMD,
769 struct chip_operations northbridge_amd_agesa_family10_ops = {
770 CHIP_NAME("AMD FAM10 Northbridge")
775 static void amdfam10_domain_read_resources(device_t dev)
779 /* Find the already assigned resource pairs */
781 for (reg = 0x80; reg <= 0xd8; reg+= 0x08) {
783 base = f1_read_config32(reg);
784 limit = f1_read_config32(reg + 0x04);
785 /* Is this register allocated? */
786 if ((base & 3) != 0) {
787 unsigned nodeid, reg_link;
789 if (reg<0xc0) { // mmio
790 nodeid = (limit & 0xf) + (base&0x30);
792 nodeid = (limit & 0xf) + ((base>>4)&0x30);
794 reg_link = (limit >> 4) & 7;
795 reg_dev = __f0_dev[nodeid];
797 /* Reserve the resource */
798 struct resource *res;
799 res = new_resource(reg_dev, IOINDEX(0x1000 + reg, reg_link));
806 /* FIXME: do we need to check extend conf space?
807 I don't believe that much preset value */
809 #if CONFIG_PCI_64BIT_PREF_MEM == 0
810 pci_domain_read_resources(dev);
813 struct resource *resource;
814 for (link=dev->link_list; link; link = link->next) {
815 /* Initialize the system wide io space constraints */
816 resource = new_resource(dev, 0|(link->link_num<<2));
817 resource->base = 0x400;
818 resource->limit = 0xffffUL;
819 resource->flags = IORESOURCE_IO;
821 /* Initialize the system wide prefetchable memory resources constraints */
822 resource = new_resource(dev, 1|(link->link_num<<2));
823 resource->limit = 0xfcffffffffULL;
824 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
826 /* Initialize the system wide memory resources constraints */
827 resource = new_resource(dev, 2|(link->link_num<<2));
828 resource->limit = 0xfcffffffffULL;
829 resource->flags = IORESOURCE_MEM;
834 static void amdfam10_domain_enable_resources(device_t dev)
837 /* Must be called after PCI enumeration and resource allocation */
838 printk(BIOS_DEBUG, "\nFam10 - domain_enable_resources: AmdInitMid.\n");
839 val = agesawrapper_amdinitmid();
841 printk(BIOS_DEBUG, "agesawrapper_amdinitmid failed: %x \n", val);
843 printk(BIOS_DEBUG, " ader - leaving domain_enable_resources.\n");
847 static u32 my_find_pci_tolm(struct bus *bus, u32 tolm)
849 struct resource *min;
851 search_bus_resources(bus, IORESOURCE_MEM, IORESOURCE_MEM, tolm_test, &min);
852 if (min && tolm > min->base) {
858 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
859 struct hw_mem_hole_info {
860 unsigned hole_startk;
864 static struct hw_mem_hole_info get_hw_mem_hole_info(void)
866 struct hw_mem_hole_info mem_hole;
869 mem_hole.hole_startk = CONFIG_HW_MEM_HOLE_SIZEK;
870 mem_hole.node_id = -1;
872 for (i = 0; i < sysconf.nodes; i++) {
875 d = get_dram_base_mask(i);
876 if (!(d.mask & 1)) continue; // no memory on this node
878 hole = pci_read_config32(__f1_dev[i], 0xf0);
879 if (hole & 1) { // we find the hole
880 mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
881 mem_hole.node_id = i; // record the node No with hole
882 break; // only one hole
886 //We need to double check if there is speical set on base reg and limit reg are not continous instead of hole, it will find out it's hole_startk
887 if (mem_hole.node_id == -1) {
888 resource_t limitk_pri = 0;
889 for (i=0; i<sysconf.nodes; i++) {
891 resource_t base_k, limit_k;
892 d = get_dram_base_mask(i);
893 if (!(d.base & 1)) continue;
895 base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
896 if (base_k > 4 *1024 * 1024) break; // don't need to go to check
897 if (limitk_pri != base_k) { // we find the hole
898 mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G
899 mem_hole.node_id = i;
900 break; //only one hole
903 limit_k = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9;
904 limitk_pri = limit_k;
911 #if CONFIG_GFXUMA == 1
912 extern uint64_t uma_memory_base, uma_memory_size;
914 static void add_uma_resource(struct device *dev, int index)
916 struct resource *resource;
918 printk(BIOS_DEBUG, "Adding UMA memory area\n");
919 resource = new_resource(dev, index);
920 resource->base = (resource_t) uma_memory_base;
921 resource->size = (resource_t) uma_memory_size;
922 resource->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
923 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
927 static void amdfam10_domain_set_resources(device_t dev)
929 #if CONFIG_PCI_64BIT_PREF_MEM == 1
930 struct resource *io, *mem1, *mem2;
931 struct resource *res;
933 unsigned long mmio_basek;
937 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
938 struct hw_mem_hole_info mem_hole;
939 u32 reset_memhole = 1;
942 #if CONFIG_PCI_64BIT_PREF_MEM == 1
944 for (link = dev->link_list; link; link = link->next) {
945 /* Now reallocate the pci resources memory with the
946 * highest addresses I can manage.
948 mem1 = find_resource(dev, 1|(link->link_num<<2));
949 mem2 = find_resource(dev, 2|(link->link_num<<2));
951 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
952 mem1->base, mem1->limit, mem1->size, mem1->align);
953 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
954 mem2->base, mem2->limit, mem2->size, mem2->align);
956 /* See if both resources have roughly the same limits */
957 if (((mem1->limit <= 0xffffffff) && (mem2->limit <= 0xffffffff)) ||
958 ((mem1->limit > 0xffffffff) && (mem2->limit > 0xffffffff)))
960 /* If so place the one with the most stringent alignment first
962 if (mem2->align > mem1->align) {
963 struct resource *tmp;
968 /* Now place the memory as high up as it will go */
969 mem2->base = resource_max(mem2);
970 mem1->limit = mem2->base - 1;
971 mem1->base = resource_max(mem1);
974 /* Place the resources as high up as they will go */
975 mem2->base = resource_max(mem2);
976 mem1->base = resource_max(mem1);
979 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
980 mem1->base, mem1->limit, mem1->size, mem1->align);
981 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
982 mem2->base, mem2->limit, mem2->size, mem2->align);
985 for (res = &dev->resource_list; res; res = res->next)
987 res->flags |= IORESOURCE_ASSIGNED;
988 res->flags |= IORESOURCE_STORED;
989 report_resource_stored(dev, res, "");
993 pci_tolm = 0xffffffffUL;
994 for (link = dev->link_list; link; link = link->next) {
995 pci_tolm = my_find_pci_tolm(link, pci_tolm);
998 // FIXME handle interleaved nodes. If you fix this here, please fix
1000 mmio_basek = pci_tolm >> 10;
1001 /* Round mmio_basek to something the processor can support */
1002 mmio_basek &= ~((1 << 6) -1);
1004 // FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M
1005 // MMIO hole. If you fix this here, please fix amdk8, too.
1006 /* Round the mmio hole to 64M */
1007 mmio_basek &= ~((64*1024) - 1);
1009 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
1010 /* if the hw mem hole is already set in raminit stage, here we will compare
1011 * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
1012 * use hole_basek as mmio_basek and we don't need to reset hole.
1013 * otherwise We reset the hole to the mmio_basek
1016 mem_hole = get_hw_mem_hole_info();
1018 // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
1019 if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
1020 mmio_basek = mem_hole.hole_startk;
1027 for (i = 0; i < sysconf.nodes; i++) {
1029 resource_t basek, limitk, sizek; // 4 1T
1030 d = get_dram_base_mask(i);
1032 if (!(d.mask & 1)) continue;
1033 basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
1034 limitk = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9 ;
1035 sizek = limitk - basek;
1037 /* see if we need a hole from 0xa0000 to 0xbffff */
1038 if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
1039 ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
1041 basek = (8*64)+(16*16);
1042 sizek = limitk - ((8*64)+(16*16));
1046 //printk(BIOS_DEBUG, "node %d : mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n", i, mmio_basek, basek, limitk);
1048 /* split the region to accomodate pci memory space */
1049 if ((basek < 4*1024*1024 ) && (limitk > mmio_basek)) {
1050 if (basek <= mmio_basek) {
1052 pre_sizek = mmio_basek - basek;
1054 ram_resource(dev, (idx | i), basek, pre_sizek);
1057 #if CONFIG_WRITE_HIGH_TABLES==1
1058 if (high_tables_base==0) {
1059 /* Leave some space for ACPI, PIRQ and MP tables */
1060 #if CONFIG_GFXUMA == 1
1061 high_tables_base = uma_memory_base - HIGH_MEMORY_SIZE;
1063 high_tables_base = (mmio_basek * 1024) - HIGH_MEMORY_SIZE;
1065 high_tables_size = HIGH_MEMORY_SIZE;
1066 printk(BIOS_DEBUG, " split: %dK table at =%08llx\n",
1067 (u32)(high_tables_size / 1024), high_tables_base);
1073 if ((basek + sizek) <= 4*1024*1024) {
1077 basek = 4*1024*1024;
1078 sizek -= (4*1024*1024 - mmio_basek);
1082 #if CONFIG_GFXUMA == 1
1083 /* Deduct uma memory before reporting because
1084 * this is what the mtrr code expects */
1085 sizek -= uma_memory_size / 1024;
1087 ram_resource(dev, (idx | i), basek, sizek);
1089 #if CONFIG_WRITE_HIGH_TABLES==1
1090 printk(BIOS_DEBUG, "node %d: mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n",
1091 i, mmio_basek, basek, limitk);
1092 if (high_tables_base==0) {
1093 /* Leave some space for ACPI, PIRQ and MP tables */
1094 #if CONFIG_GFXUMA == 1
1095 high_tables_base = uma_memory_base - HIGH_MEMORY_SIZE;
1097 high_tables_base = (limitk * 1024) - HIGH_MEMORY_SIZE;
1099 high_tables_size = HIGH_MEMORY_SIZE;
1104 #if CONFIG_GFXUMA == 1
1105 add_uma_resource(dev, 7);
1108 for(link = dev->link_list; link; link = link->next) {
1109 if (link->children) {
1110 assign_resources(link);
1115 static u32 amdfam10_domain_scan_bus(device_t dev, u32 max)
1120 /* Unmap all of the HT chains */
1121 for (reg = 0xe0; reg <= 0xec; reg += 4) {
1122 f1_write_config32(reg, 0);
1124 #if CONFIG_EXT_CONF_SUPPORT == 1
1126 for (i = 0; i< sysconf.nodes; i++) {
1128 for(index = 0; index < 64; index++) {
1129 pci_write_config32(__f1_dev[i], 0x110, index | (6<<28));
1130 pci_write_config32(__f1_dev[i], 0x114, 0);
1137 for (link = dev->link_list; link; link = link->next) {
1138 max = pci_scan_bus(link, PCI_DEVFN(CONFIG_CDB, 0), 0xff, max);
1141 /* Tune the hypertransport transaction for best performance.
1142 * Including enabling relaxed ordering if it is safe.
1145 for (i = 0; i < fx_devs; i++) {
1147 f0_dev = __f0_dev[i];
1148 if (f0_dev && f0_dev->enabled) {
1150 httc = pci_read_config32(f0_dev, HT_TRANSACTION_CONTROL);
1151 httc &= ~HTTC_RSP_PASS_PW;
1152 if (!dev->link_list->disable_relaxed_ordering) {
1153 httc |= HTTC_RSP_PASS_PW;
1155 printk(BIOS_SPEW, "%s passpw: %s\n",
1157 (!dev->link_list->disable_relaxed_ordering)?
1158 "enabled":"disabled");
1159 pci_write_config32(f0_dev, HT_TRANSACTION_CONTROL, httc);
1166 static struct device_operations pci_domain_ops = {
1167 .read_resources = amdfam10_domain_read_resources,
1168 .set_resources = amdfam10_domain_set_resources,
1169 .enable_resources = amdfam10_domain_enable_resources,
1171 .scan_bus = amdfam10_domain_scan_bus,
1172 #if CONFIG_MMCONF_SUPPORT_DEFAULT
1173 .ops_pci_bus = &pci_ops_mmconf,
1175 .ops_pci_bus = &pci_cf8_conf1,
1180 static void sysconf_init(device_t dev) // first node
1182 sysconf.sblk = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
1184 sysconf.ht_c_num = 0;
1186 unsigned ht_c_index;
1188 for (ht_c_index=0; ht_c_index<32; ht_c_index++) {
1189 sysconf.ht_c_conf_bus[ht_c_index] = 0;
1192 sysconf.nodes = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1; //NodeCnt[2:0]
1194 /* Find the bootstrap processors apicid */
1195 sysconf.bsp_apicid = lapicid();
1198 static void add_more_links(device_t dev, unsigned total_links)
1200 struct bus *link, *last = NULL;
1203 for (link = dev->link_list; link; link = link->next)
1207 int links = total_links - last->link_num;
1208 link_num = last->link_num;
1210 link = malloc(links*sizeof(*link));
1212 die("Couldn't allocate more links!\n");
1213 memset(link, 0, links*sizeof(*link));
1219 link = malloc(total_links*sizeof(*link));
1220 memset(link, 0, total_links*sizeof(*link));
1221 dev->link_list = link;
1224 for (link_num = link_num + 1; link_num < total_links; link_num++) {
1225 link->link_num = link_num;
1227 link->next = link + 1;
1234 /* dummy read_resources */
1235 static void lapic_read_resources(device_t dev)
1239 static struct device_operations lapic_ops = {
1240 .read_resources = lapic_read_resources,
1241 .set_resources = pci_dev_set_resources,
1242 .enable_resources = pci_dev_enable_resources,
1249 static u32 cpu_bus_scan(device_t dev, u32 max)
1251 struct bus *cpu_bus;
1254 device_t pci_domain;
1261 int disable_siblings;
1262 unsigned ApicIdCoreIdSize;
1265 ApicIdCoreIdSize = (cpuid_ecx(0x80000008)>>12 & 0xf);
1266 if (ApicIdCoreIdSize) {
1267 siblings = (1<<ApicIdCoreIdSize)-1;
1269 siblings = 3; //quad core
1272 disable_siblings = !CONFIG_LOGICAL_CPUS;
1273 #if CONFIG_LOGICAL_CPUS == 1
1274 get_option(&disable_siblings, "multi_core");
1277 // How can I get the nb_cfg_54 of every node's nb_cfg_54 in bsp???
1278 nb_cfg_54 = read_nb_cfg_54();
1281 dev_mc = dev_find_slot(0, PCI_DEVFN(CONFIG_CDB, 0)); //0x00
1282 if (dev_mc && dev_mc->bus) {
1283 printk(BIOS_DEBUG, "%s found", dev_path(dev_mc));
1284 pci_domain = dev_mc->bus->dev;
1285 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1286 printk(BIOS_DEBUG, "\n%s move to ",dev_path(dev_mc));
1287 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
1288 printk(BIOS_DEBUG, "%s",dev_path(dev_mc));
1291 printk(BIOS_DEBUG, " but it is not under pci_domain directly ");
1293 printk(BIOS_DEBUG, "\n");
1295 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
1297 dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
1298 if (dev_mc && dev_mc->bus) {
1299 printk(BIOS_DEBUG, "%s found\n", dev_path(dev_mc));
1300 pci_domain = dev_mc->bus->dev;
1301 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1302 if ((pci_domain->link_list) && (pci_domain->link_list->children == dev_mc)) {
1303 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
1304 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
1305 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
1307 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
1308 dev_mc->path.pci.devfn -= PCI_DEVFN(0x18,0);
1309 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
1310 dev_mc = dev_mc->sibling;
1319 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
1321 printk(BIOS_ERR, "%02x:%02x.0 not found", CONFIG_CBB, CONFIG_CDB);
1325 sysconf_init(dev_mc);
1327 nodes = sysconf.nodes;
1329 #if CONFIG_CBB && (NODE_NUMS > 32)
1330 if (nodes>32) { // need to put node 32 to node 63 to bus 0xfe
1331 if (pci_domain->link_list && !pci_domain->link_list->next) {
1332 struct bus *new_link = new_link(pci_domain);
1333 pci_domain->link_list->next = new_link;
1334 new_link->link_num = 1;
1335 new_link->dev = pci_domain;
1336 new_link->children = 0;
1337 printk(BIOS_DEBUG, "%s links now 2\n", dev_path(pci_domain));
1339 pci_domain->link_list->next->secondary = CONFIG_CBB - 1;
1342 /* Find which cpus are present */
1343 cpu_bus = dev->link_list;
1344 for (i = 0; i < nodes; i++) {
1345 device_t cdb_dev, cpu;
1346 struct device_path cpu_path;
1347 unsigned busn, devn;
1351 devn = CONFIG_CDB+i;
1353 #if CONFIG_CBB && (NODE_NUMS > 32)
1357 pbus = pci_domain->link_list->next);
1361 /* Find the cpu's pci device */
1362 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1364 /* If I am probing things in a weird order
1365 * ensure all of the cpu's pci devices are found.
1368 for(fn = 0; fn <= 5; fn++) { //FBDIMM?
1369 cdb_dev = pci_probe_dev(NULL, pbus,
1370 PCI_DEVFN(devn, fn));
1372 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1375 /* Ok, We need to set the links for that device.
1376 * otherwise the device under it will not be scanned
1379 #if CONFIG_HT3_SUPPORT==1
1384 add_more_links(cdb_dev, linknum);
1387 cores_found = 0; // one core
1388 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
1389 if (cdb_dev && cdb_dev->enabled) {
1390 j = pci_read_config32(cdb_dev, 0xe8);
1391 cores_found = (j >> 12) & 3; // dev is func 3
1393 cores_found |= (j >> 13) & 4;
1394 printk(BIOS_DEBUG, " %s siblings=%d\n", dev_path(cdb_dev), cores_found);
1398 if (disable_siblings) {
1404 for (j = 0; j <=jj; j++ ) {
1405 extern CONST OPTIONS_CONFIG_TOPOLOGY ROMDATA TopologyConfiguration;
1406 u32 modules = TopologyConfiguration.PlatformNumberOfModules;
1407 u32 lapicid_start = 0;
1409 /* Build the cpu device path */
1410 cpu_path.type = DEVICE_PATH_APIC;
1412 * APIC ID calucation is tightly coupled with AGESA v5 code.
1413 * This calculation MUST match the assignment calculation done
1414 * in LocalApicInitializationAtEarly() function.
1415 * And reference GetLocalApicIdForCore()
1417 * Apply apic enumeration rules
1418 * For systems with >= 16 APICs, put the IO-APICs at 0..n and
1419 * put the local-APICs at m..z
1420 * For systems with < 16 APICs, put the Local-APICs at 0..n and
1421 * put the IO-APICs at (n + 1)..z
1423 if (nodes * (cores_found + 1) >= 0x10) {
1424 lapicid_start = 0x10;
1426 cpu_path.apic.apic_id = (lapicid_start * (i/modules + 1)) + ((i % modules) ? (j + (cores_found + 1)) : j);
1428 /* See if I can find the cpu */
1429 cpu = find_dev_path(cpu_bus, &cpu_path);
1431 /* Enable the cpu if I have the processor */
1432 if (cdb_dev && cdb_dev->enabled) {
1434 cpu = alloc_dev(cpu_bus, &cpu_path);
1441 /* Disable the cpu if I don't have the processor */
1442 if (cpu && (!cdb_dev || !cdb_dev->enabled)) {
1446 /* Report what I have done */
1448 cpu->path.apic.node_id = i;
1449 cpu->path.apic.core_id = j;
1450 if (cpu->path.type == DEVICE_PATH_APIC) {
1451 cpu->ops = &lapic_ops;
1453 printk(BIOS_DEBUG, "CPU: %s %s\n",
1454 dev_path(cpu), cpu->enabled?"enabled":"disabled");
1462 static void cpu_bus_init(device_t dev)
1464 initialize_cpus(dev->link_list);
1467 static void cpu_bus_noop(device_t dev)
1471 static void cpu_bus_read_resources(device_t dev)
1473 #if CONFIG_MMCONF_SUPPORT
1474 struct resource *resource = new_resource(dev, 0xc0010058);
1475 resource->base = CONFIG_MMCONF_BASE_ADDRESS;
1476 resource->size = CONFIG_MMCONF_BUS_NUMBER * 4096*256;
1477 resource->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
1478 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
1482 static void cpu_bus_set_resources(device_t dev)
1484 struct resource *resource = find_resource(dev, 0xc0010058);
1486 report_resource_stored(dev, resource, " <mmconfig>");
1488 pci_dev_set_resources(dev);
1491 static struct device_operations cpu_bus_ops = {
1492 .read_resources = cpu_bus_read_resources,
1493 .set_resources = cpu_bus_set_resources,
1494 .enable_resources = cpu_bus_noop,
1495 .init = cpu_bus_init,
1496 .scan_bus = cpu_bus_scan,
1499 static void root_complex_enable_dev(struct device *dev)
1501 /* Set the operations if it is a special bus type */
1502 if (dev->path.type == DEVICE_PATH_PCI_DOMAIN) {
1503 dev->ops = &pci_domain_ops;
1505 else if (dev->path.type == DEVICE_PATH_APIC_CLUSTER) {
1506 dev->ops = &cpu_bus_ops;
1510 struct chip_operations northbridge_amd_agesa_family10_root_complex_ops = {
1511 CHIP_NAME("AMD FAM10 Root Complex")
1512 .enable_dev = root_complex_enable_dev,