2 * This file is part of the coreboot project.
4 * Copyright (C) 2007 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <console/console.h>
23 #include <device/device.h>
24 #include <device/pci.h>
25 #include <device/pci_ids.h>
26 #include <device/hypertransport.h>
32 #include <cpu/x86/lapic.h>
34 #if CONFIG_LOGICAL_CPUS==1
35 #include <cpu/amd/quadcore.h>
36 #include <pc80/mc146818rtc.h>
40 #include "root_complex/chip.h"
41 #include "northbridge.h"
45 #if HW_MEM_HOLE_SIZEK != 0
46 #include <cpu/amd/model_10xxx_rev.h>
49 #include <cpu/amd/amdfam10_sysconf.h>
51 struct amdfam10_sysconf_t sysconf;
53 #define FX_DEVS NODE_NUMS
54 static device_t __f0_dev[FX_DEVS];
55 static device_t __f1_dev[FX_DEVS];
56 static device_t __f2_dev[FX_DEVS];
57 static device_t __f4_dev[FX_DEVS];
59 device_t get_node_pci(u32 nodeid, u32 fn)
63 return dev_find_slot(CBB, PCI_DEVFN(CDB + nodeid, fn));
65 return dev_find_slot(CBB-1, PCI_DEVFN(CDB + nodeid - 32, fn));
69 return dev_find_slot(CBB, PCI_DEVFN(CDB + nodeid, fn));
73 static void get_fx_devs(void)
79 for(i = 0; i < FX_DEVS; i++) {
80 __f0_dev[i] = get_node_pci(i, 0);
81 __f1_dev[i] = get_node_pci(i, 1);
82 __f2_dev[i] = get_node_pci(i, 2);
83 __f4_dev[i] = get_node_pci(i, 4);
86 printk_err("Cannot find %02x:%02x.1", CBB, CDB);
87 die("Cannot go on\n");
91 static u32 f1_read_config32(u32 reg)
94 return pci_read_config32(__f1_dev[0], reg);
97 static void f1_write_config32(u32 reg, u32 value)
101 for(i = 0; i < FX_DEVS; i++) {
104 if (dev && dev->enabled) {
105 pci_write_config32(dev, reg, value);
111 static u32 amdfam10_nodeid(device_t dev)
115 busn = dev->bus->secondary;
117 return (dev->path.pci.devfn >> 3) - CDB + 32;
119 return (dev->path.pci.devfn >> 3) - CDB;
123 return (dev->path.pci.devfn >> 3) - CDB;
127 #include "amdfam10_conf.c"
129 static void set_vga_enable_reg(u32 nodeid, u32 linkn)
133 val = 1 | (nodeid<<4) | (linkn<<12);
134 /* it will routing (1)mmio 0xa0000:0xbffff (2) io 0x3b0:0x3bb,
136 f1_write_config32(0xf4, val);
140 static u32 amdfam10_scan_chain(device_t dev, u32 nodeid, u32 link, u32 sblink,
141 u32 max, u32 offset_unitid)
143 // I want to put sb chain in bus 0 can I?
149 u32 ht_unitid_base[4]; // here assume only 4 HT device on chain
152 u32 is_sublink1 = (link>3);
163 regpos = 0x170 + 4 * (link&3); // it is only on sublink0
164 reg = pci_read_config32(dev, regpos);
165 if(reg & 1) return max; // already ganged no sblink1
166 devx = get_node_pci(nodeid, 4);
172 dev->link[link].cap = 0x80 + ((link&3) *0x20);
174 link_type = pci_read_config32(devx, dev->link[link].cap + 0x18);
175 } while(link_type & ConnectionPending);
176 if (!(link_type & LinkConnected)) {
180 link_type = pci_read_config32(devx, dev->link[link].cap + 0x18);
181 } while(!(link_type & InitComplete));
182 if (!(link_type & NonCoherent)) {
185 /* See if there is an available configuration space mapping
186 * register in function 1.
188 ht_c_index = get_ht_c_index(nodeid, link, &sysconf);
190 #if EXT_CONF_SUPPORT == 0
191 if(ht_c_index>=4) return max;
194 /* Set up the primary, secondary and subordinate bus numbers.
195 * We have no idea how many busses are behind this bridge yet,
196 * so we set the subordinate bus number to 0xff for the moment.
199 #if SB_HT_CHAIN_ON_BUS0 > 0
200 // first chain will on bus 0
201 if((nodeid == 0) && (sblink==link)) { // actually max is 0 here
204 #if SB_HT_CHAIN_ON_BUS0 > 1
205 // second chain will be on 0x40, third 0x80, forth 0xc0
206 // i would refined that to 2, 3, 4 ==> 0, 0x, 40, 0x80, 0xc0
207 // >4 will use more segments, We can have 16 segmment and every segment have 256 bus, For that case need the kernel support mmio pci config.
209 min_bus = ((busn>>3) + 1) << 3; // one node can have 8 link and segn is the same
211 max = min_bus | (segn<<8);
221 max_bus = 0xfc | (segn<<8);
223 dev->link[link].secondary = min_bus;
224 dev->link[link].subordinate = max_bus;
225 /* Read the existing primary/secondary/subordinate bus
226 * number configuration.
228 busses = pci_read_config32(devx, dev->link[link].cap + 0x14);
230 /* Configure the bus numbers for this bridge: the configuration
231 * transactions will not be propagates by the bridge if it is
232 * not correctly configured
234 busses &= 0xffff00ff;
235 busses |= ((u32)(dev->link[link].secondary) << 8);
236 pci_write_config32(devx, dev->link[link].cap + 0x14, busses);
239 /* set the config map space */
241 set_config_map_reg(nodeid, link, ht_c_index, dev->link[link].secondary, dev->link[link].subordinate, sysconf.segbit, sysconf.nodes);
243 /* Now we can scan all of the subordinate busses i.e. the
244 * chain on the hypertranport link
247 ht_unitid_base[i] = 0x20;
250 //if ext conf is enabled, only need use 0x1f
252 max_devfn = (0x17<<3) | 7;
254 max_devfn = (0x1f<<3) | 7;
256 max = hypertransport_scan_chain(&dev->link[link], 0, max_devfn, max, ht_unitid_base, offset_unitid);
259 /* We know the number of busses behind this bridge. Set the
260 * subordinate bus number to it's real value
262 if(ht_c_index>3) { // clear the extend reg
263 clear_config_map_reg(nodeid, link, ht_c_index, (max+1)>>sysconf.segbit, (dev->link[link].subordinate)>>sysconf.segbit, sysconf.nodes);
266 dev->link[link].subordinate = max;
267 set_config_map_reg(nodeid, link, ht_c_index, dev->link[link].secondary, dev->link[link].subordinate, sysconf.segbit, sysconf.nodes);
271 // config config_reg, and ht_unitid_base to update hcdn_reg;
274 temp |= (ht_unitid_base[i] & 0xff) << (i*8);
277 sysconf.hcdn_reg[ht_c_index] = temp;
281 store_ht_c_conf_bus(nodeid, link, ht_c_index, dev->link[link].secondary, dev->link[link].subordinate, &sysconf);
287 static u32 amdfam10_scan_chains(device_t dev, u32 max)
291 u32 sblink = sysconf.sblk;
292 u32 offset_unitid = 0;
294 nodeid = amdfam10_nodeid(dev);
297 // Put sb chain in bus 0
298 #if SB_HT_CHAIN_ON_BUS0 > 0
300 #if ((HT_CHAIN_UNITID_BASE != 1) || (HT_CHAIN_END_UNITID_BASE != 0x20))
303 max = amdfam10_scan_chain(dev, nodeid, sblink, sblink, max, offset_unitid ); // do sb ht chain at first, in case s2885 put sb chain (8131/8111) on link2, but put 8151 on link0
308 #if PCI_BUS_SEGN_BITS
309 max = check_segn(dev, max, sysconf.nodes, &sysconf);
313 for(link = 0; link < dev->links; link++) {
314 #if SB_HT_CHAIN_ON_BUS0 > 0
315 if( (nodeid == 0) && (sblink == link) ) continue; //already done
318 #if ((HT_CHAIN_UNITID_BASE != 1) || (HT_CHAIN_END_UNITID_BASE != 0x20))
319 #if SB_HT_CHAIN_UNITID_OFFSET_ONLY == 1
320 if((nodeid == 0) && (sblink == link))
325 max = amdfam10_scan_chain(dev, nodeid, link, sblink, max, offset_unitid);
331 static int reg_useable(u32 reg,device_t goal_dev, u32 goal_nodeid,
334 struct resource *res;
338 for(nodeid = 0; !res && (nodeid < NODE_NUMS); nodeid++) {
340 dev = __f0_dev[nodeid];
343 for(link = 0; !res && (link < 8); link++) {
344 res = probe_resource(dev, 0x1000 + reg + (link<<16)); // 8 links, 0x1000 man f1,
350 if ( (goal_link == (link - 1)) &&
351 (goal_nodeid == (nodeid - 1)) &&
359 static struct resource *amdfam10_find_iopair(device_t dev, u32 nodeid, u32 link)
361 struct resource *resource;
365 for(reg = 0xc0; reg <= 0xd8; reg += 0x8) {
367 result = reg_useable(reg, dev, nodeid, link);
369 /* I have been allocated this one */
372 else if (result > 1) {
373 /* I have a free register pair */
378 reg = free_reg; // if no free, the free_reg still be 0
383 //because of Extend conf space, we will never run out of reg, but we need one index to differ them. so same node and same link can have multi range
384 u32 index = get_io_addr_index(nodeid, link);
385 reg = 0x110+ (index<<24) + (4<<20); // index could be 0, 255
388 resource = new_resource(dev, 0x1000 + reg + (link<<16));
393 static struct resource *amdfam10_find_mempair(device_t dev, u32 nodeid, u32 link)
395 struct resource *resource;
399 for(reg = 0x80; reg <= 0xb8; reg += 0x8) {
401 result = reg_useable(reg, dev, nodeid, link);
403 /* I have been allocated this one */
406 else if (result > 1) {
407 /* I have a free register pair */
417 //because of Extend conf space, we will never run out of reg,
418 // but we need one index to differ them. so same node and
419 // same link can have multi range
420 u32 index = get_mmio_addr_index(nodeid, link);
421 reg = 0x110+ (index<<24) + (6<<20); // index could be 0, 63
424 resource = new_resource(dev, 0x1000 + reg + (link<<16));
429 static void amdfam10_link_read_bases(device_t dev, u32 nodeid, u32 link)
431 struct resource *resource;
433 /* Initialize the io space constraints on the current bus */
434 resource = amdfam10_find_iopair(dev, nodeid, link);
437 #if EXT_CONF_SUPPORT == 1
438 if((resource->index & 0x1fff) == 0x1110) { // ext
443 align = log2(HT_IO_HOST_ALIGN);
446 resource->align = align;
447 resource->gran = align;
448 resource->limit = 0xffffUL;
449 resource->flags = IORESOURCE_IO;
450 compute_allocate_resource(&dev->link[link], resource,
451 IORESOURCE_IO, IORESOURCE_IO);
454 /* Initialize the prefetchable memory constraints on the current bus */
455 resource = amdfam10_find_mempair(dev, nodeid, link);
459 resource->align = log2(HT_MEM_HOST_ALIGN);
460 resource->gran = log2(HT_MEM_HOST_ALIGN);
461 resource->limit = 0xffffffffffULL;
462 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
463 compute_allocate_resource(&dev->link[link], resource,
464 IORESOURCE_MEM | IORESOURCE_PREFETCH,
465 IORESOURCE_MEM | IORESOURCE_PREFETCH);
467 #if EXT_CONF_SUPPORT == 1
468 if((resource->index & 0x1fff) == 0x1110) { // ext
469 normalize_resource(resource);
475 /* Initialize the memory constraints on the current bus */
476 resource = amdfam10_find_mempair(dev, nodeid, link);
480 resource->align = log2(HT_MEM_HOST_ALIGN);
481 resource->gran = log2(HT_MEM_HOST_ALIGN);
482 resource->limit = 0xffffffffffULL;
483 resource->flags = IORESOURCE_MEM;
484 compute_allocate_resource(&dev->link[link], resource,
485 IORESOURCE_MEM | IORESOURCE_PREFETCH,
488 #if EXT_CONF_SUPPORT == 1
489 if((resource->index & 0x1fff) == 0x1110) { // ext
490 normalize_resource(resource);
498 static void amdfam10_read_resources(device_t dev)
502 nodeid = amdfam10_nodeid(dev);
503 for(link = 0; link < dev->links; link++) {
504 if (dev->link[link].children) {
505 amdfam10_link_read_bases(dev, nodeid, link);
511 static void amdfam10_set_resource(device_t dev, struct resource *resource,
514 resource_t rbase, rend;
518 /* Make certain the resource has actually been set */
519 if (!(resource->flags & IORESOURCE_ASSIGNED)) {
523 /* If I have already stored this resource don't worry about it */
524 if (resource->flags & IORESOURCE_STORED) {
528 /* Only handle PCI memory and IO resources */
529 if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
532 /* Ensure I am actually looking at a resource of function 1 */
533 if ((resource->index & 0xffff) < 0x1000) {
536 /* Get the base address */
537 rbase = resource->base;
539 /* Get the limit (rounded up) */
540 rend = resource_end(resource);
542 /* Get the register and link */
543 reg = resource->index & 0xfff; // 4k
544 link = ( resource->index>> 16)& 0x7; // 8 links
546 if (resource->flags & IORESOURCE_IO) {
547 compute_allocate_resource(&dev->link[link], resource,
548 IORESOURCE_IO, IORESOURCE_IO);
550 set_io_addr_reg(dev, nodeid, link, reg, rbase>>8, rend>>8);
551 store_conf_io_addr(nodeid, link, reg, (resource->index >> 24), rbase>>8, rend>>8);
553 else if (resource->flags & IORESOURCE_MEM) {
554 compute_allocate_resource(&dev->link[link], resource,
555 IORESOURCE_MEM | IORESOURCE_PREFETCH,
556 resource->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH));
557 set_mmio_addr_reg(nodeid, link, reg, (resource->index >>24), rbase>>8, rend>>8, sysconf.nodes) ;// [39:8]
558 store_conf_mmio_addr(nodeid, link, reg, (resource->index >>24), rbase>>8, rend>>8);
560 resource->flags |= IORESOURCE_STORED;
561 sprintf(buf, " <node %02x link %02x>",
563 report_resource_stored(dev, resource, buf);
568 * I tried to reuse the resource allocation code in amdfam10_set_resource()
569 * but it is too diffcult to deal with the resource allocation magic.
571 #if CONFIG_CONSOLE_VGA_MULTI == 1
572 extern device_t vga_pri; // the primary vga device, defined in device.c
575 static void amdfam10_create_vga_resource(device_t dev, unsigned nodeid)
579 /* find out which link the VGA card is connected,
580 * we only deal with the 'first' vga card */
581 for (link = 0; link < dev->links; link++) {
582 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
583 #if CONFIG_CONSOLE_VGA_MULTI == 1
584 printk_debug("VGA: vga_pri bus num = %d dev->link[link] bus range [%d,%d]\n", vga_pri->bus->secondary,
585 dev->link[link].secondary,dev->link[link].subordinate);
586 /* We need to make sure the vga_pri is under the link */
587 if((vga_pri->bus->secondary >= dev->link[link].secondary ) &&
588 (vga_pri->bus->secondary <= dev->link[link].subordinate )
595 /* no VGA card installed */
596 if (link == dev->links)
599 printk_debug("VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, link);
600 set_vga_enable_reg(nodeid, link);
603 static void amdfam10_set_resources(device_t dev)
608 /* Find the nodeid */
609 nodeid = amdfam10_nodeid(dev);
611 amdfam10_create_vga_resource(dev, nodeid);
613 /* Set each resource we have found */
614 for(i = 0; i < dev->resources; i++) {
615 amdfam10_set_resource(dev, &dev->resource[i], nodeid);
618 for(link = 0; link < dev->links; link++) {
620 bus = &dev->link[link];
622 assign_resources(bus);
628 static void amdfam10_enable_resources(device_t dev)
630 pci_dev_enable_resources(dev);
631 enable_childrens_resources(dev);
634 static void mcf0_control_init(struct device *dev)
638 static struct device_operations northbridge_operations = {
639 .read_resources = amdfam10_read_resources,
640 .set_resources = amdfam10_set_resources,
641 .enable_resources = amdfam10_enable_resources,
642 .init = mcf0_control_init,
643 .scan_bus = amdfam10_scan_chains,
649 static struct pci_driver mcf0_driver __pci_driver = {
650 .ops = &northbridge_operations,
651 .vendor = PCI_VENDOR_ID_AMD,
655 struct chip_operations northbridge_amd_amdfam10_ops = {
656 CHIP_NAME("AMD FAM10 Northbridge")
660 static void pci_domain_read_resources(device_t dev)
662 struct resource *resource;
666 /* Find the already assigned resource pairs */
668 for(reg = 0x80; reg <= 0xd8; reg+= 0x08) {
670 base = f1_read_config32(reg);
671 limit = f1_read_config32(reg + 0x04);
672 /* Is this register allocated? */
673 if ((base & 3) != 0) {
674 unsigned nodeid, link;
676 if(reg<0xc0) { // mmio
677 nodeid = (limit & 0xf) + (base&0x30);
679 nodeid = (limit & 0xf) + ((base>>4)&0x30);
681 link = (limit >> 4) & 7;
682 dev = __f0_dev[nodeid];
684 /* Reserve the resource */
685 struct resource *resource;
686 resource = new_resource(dev, 0x1000 + reg + (link<<16));
693 /* FIXME: do we need to check extend conf space?
694 I don't believe that much preset value */
696 #if CONFIG_PCI_64BIT_PREF_MEM == 0
697 /* Initialize the system wide io space constraints */
698 resource = new_resource(dev, IOINDEX_SUBTRACTIVE(0, 0));
699 resource->base = 0x400;
700 resource->limit = 0xffffUL;
701 resource->flags = IORESOURCE_IO | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
703 /* Initialize the system wide memory resources constraints */
704 resource = new_resource(dev, IOINDEX_SUBTRACTIVE(1, 0));
705 resource->limit = 0xfcffffffffULL;
706 resource->flags = IORESOURCE_MEM | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
708 for(link=0; link<dev->links; link++) {
709 /* Initialize the system wide io space constraints */
710 resource = new_resource(dev, 0|(link<<2));
711 resource->base = 0x400;
712 resource->limit = 0xffffUL;
713 resource->flags = IORESOURCE_IO;
714 compute_allocate_resource(&dev->link[link], resource,
715 IORESOURCE_IO, IORESOURCE_IO);
717 /* Initialize the system wide prefetchable memory resources constraints */
718 resource = new_resource(dev, 1|(link<<2));
719 resource->limit = 0xfcffffffffULL;
720 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
721 compute_allocate_resource(&dev->link[link], resource,
722 IORESOURCE_MEM | IORESOURCE_PREFETCH,
723 IORESOURCE_MEM | IORESOURCE_PREFETCH);
725 /* Initialize the system wide memory resources constraints */
726 resource = new_resource(dev, 2|(link<<2));
727 resource->limit = 0xfcffffffffULL;
728 resource->flags = IORESOURCE_MEM;
729 compute_allocate_resource(&dev->link[link], resource,
730 IORESOURCE_MEM | IORESOURCE_PREFETCH,
736 static void ram_resource(device_t dev, unsigned long index,
737 resource_t basek, resource_t sizek)
739 struct resource *resource;
744 resource = new_resource(dev, index);
745 resource->base = basek << 10;
746 resource->size = sizek << 10;
747 resource->flags = IORESOURCE_MEM | IORESOURCE_CACHEABLE | \
748 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
751 static void tolm_test(void *gp, struct device *dev, struct resource *new)
753 struct resource **best_p = gp;
754 struct resource *best;
756 if (!best || (best->base > new->base)) {
762 static u32 find_pci_tolm(struct bus *bus, u32 tolm)
764 struct resource *min;
766 search_bus_resources(bus, IORESOURCE_MEM, IORESOURCE_MEM, tolm_test, &min);
767 if (min && tolm > min->base) {
773 #if CONFIG_PCI_64BIT_PREF_MEM == 1
774 #define BRIDGE_IO_MASK (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH)
777 #if HW_MEM_HOLE_SIZEK != 0
779 struct hw_mem_hole_info {
780 unsigned hole_startk;
784 static struct hw_mem_hole_info get_hw_mem_hole_info(void)
786 struct hw_mem_hole_info mem_hole;
789 mem_hole.hole_startk = HW_MEM_HOLE_SIZEK;
790 mem_hole.node_id = -1;
792 for (i = 0; i < sysconf.nodes; i++) {
793 struct dram_base_mask_t d;
795 d = get_dram_base_mask(i);
796 if(!(d.mask & 1)) continue; // no memory on this node
798 hole = pci_read_config32(__f1_dev[i], 0xf0);
799 if(hole & 1) { // we find the hole
800 mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
801 mem_hole.node_id = i; // record the node No with hole
802 break; // only one hole
806 //We need to double check if there is speical set on base reg and limit reg are not continous instead of hole, it will find out it's hole_startk
807 if(mem_hole.node_id==-1) {
808 resource_t limitk_pri = 0;
809 for(i=0; i<sysconf.nodes; i++) {
810 struct dram_base_mask_t d;
811 resource_t base_k, limit_k;
812 d = get_dram_base_mask(i);
813 if(!(d.base & 1)) continue;
815 base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
816 if(base_k > 4 *1024 * 1024) break; // don't need to go to check
817 if(limitk_pri != base_k) { // we find the hole
818 mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G
819 mem_hole.node_id = i;
820 break; //only one hole
823 limit_k = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9;
824 limitk_pri = limit_k;
831 #if CONFIG_AMDMCT == 0
832 static void disable_hoist_memory(unsigned long hole_startk, int i)
836 struct dram_base_mask_t d;
843 struct sys_info *sysinfox = (struct sys_info *)((CONFIG_LB_MEM_TOPK<<10) - DCACHE_RAM_GLOBAL_VAR_SIZE); // in RAM
844 struct mem_info *meminfo;
845 meminfo = &sysinfox->meminfo[i];
847 one_DCT = get_one_DCT(meminfo);
849 // 1. find which node has hole
850 // 2. change limit in that node.
851 // 3. change base and limit in later node
852 // 4. clear that node f0
854 // if there is not mem hole enabled, we need to change it's base instead
856 hole_sizek = (4*1024*1024) - hole_startk;
858 for(ii=NODE_NUMS-1;ii>i;ii--) {
860 d = get_dram_base_mask(ii);
862 if(!(d.mask & 1)) continue;
864 d.base -= (hole_sizek>>9);
865 d.mask -= (hole_sizek>>9);
866 set_dram_base_mask(ii, d, sysconf.nodes);
868 if(get_DctSelHiEn(ii) & 1) {
869 sel_m = get_DctSelBaseAddr(ii);
870 sel_m -= hole_startk>>10;
871 set_DctSelBaseAddr(ii, sel_m);
875 d = get_dram_base_mask(i);
877 hoist = pci_read_config32(dev, 0xf0);
878 sel_hi_en = get_DctSelHiEn(i);
881 sel_m = get_DctSelBaseAddr(i);
885 pci_write_config32(dev, 0xf0, 0);
886 d.mask -= (hole_sizek>>9);
887 set_dram_base_mask(i, d, sysconf.nodes);
888 if(one_DCT || (sel_m >= (hole_startk>>10))) {
890 sel_m -= hole_startk>>10;
891 set_DctSelBaseAddr(i, sel_m);
895 set_DctSelBaseOffset(i, 0);
899 d.base -= (hole_sizek>>9);
900 d.mask -= (hole_sizek>>9);
901 set_dram_base_mask(i, d, sysconf.nodes);
904 sel_m -= hole_startk>>10;
905 set_DctSelBaseAddr(i, sel_m);
914 #if HAVE_HIGH_TABLES==1
915 #define HIGH_TABLES_SIZE 64 // maximum size of high tables in KB
916 extern uint64_t high_tables_base, high_tables_size;
919 static void pci_domain_set_resources(device_t dev)
921 #if CONFIG_PCI_64BIT_PREF_MEM == 1
922 struct resource *io, *mem1, *mem2;
923 struct resource *resource, *last;
925 unsigned long mmio_basek;
929 #if HW_MEM_HOLE_SIZEK != 0
930 struct hw_mem_hole_info mem_hole;
931 u32 reset_memhole = 1;
934 #if CONFIG_PCI_64BIT_PREF_MEM == 1
936 for(link=0; link<dev->links; link++) {
937 /* Now reallocate the pci resources memory with the
938 * highest addresses I can manage.
940 mem1 = find_resource(dev, 1|(link<<2));
941 mem2 = find_resource(dev, 2|(link<<2));
943 printk_debug("base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
944 mem1->base, mem1->limit, mem1->size, mem1->align);
945 printk_debug("base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
946 mem2->base, mem2->limit, mem2->size, mem2->align);
948 /* See if both resources have roughly the same limits */
949 if (((mem1->limit <= 0xffffffff) && (mem2->limit <= 0xffffffff)) ||
950 ((mem1->limit > 0xffffffff) && (mem2->limit > 0xffffffff)))
952 /* If so place the one with the most stringent alignment first
954 if (mem2->align > mem1->align) {
955 struct resource *tmp;
960 /* Now place the memory as high up as it will go */
961 mem2->base = resource_max(mem2);
962 mem1->limit = mem2->base - 1;
963 mem1->base = resource_max(mem1);
966 /* Place the resources as high up as they will go */
967 mem2->base = resource_max(mem2);
968 mem1->base = resource_max(mem1);
971 printk_debug("base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
972 mem1->base, mem1->limit, mem1->size, mem1->align);
973 printk_debug("base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
974 mem2->base, mem2->limit, mem2->size, mem2->align);
977 last = &dev->resource[dev->resources];
978 for(resource = &dev->resource[0]; resource < last; resource++)
980 resource->flags |= IORESOURCE_ASSIGNED;
981 resource->flags &= ~IORESOURCE_STORED;
982 link = (resource>>2) & 3;
983 compute_allocate_resource(&dev->link[link], resource,
984 BRIDGE_IO_MASK, resource->flags & BRIDGE_IO_MASK);
986 resource->flags |= IORESOURCE_STORED;
987 report_resource_stored(dev, resource, "");
992 pci_tolm = 0xffffffffUL;
993 for(link=0;link<dev->links; link++) {
994 pci_tolm = find_pci_tolm(&dev->link[link], pci_tolm);
997 #warning "FIXME handle interleaved nodes"
998 mmio_basek = pci_tolm >> 10;
999 /* Round mmio_basek to something the processor can support */
1000 mmio_basek &= ~((1 << 6) -1);
1002 #warning "FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M MMIO hole"
1003 /* Round the mmio hold to 64M */
1004 mmio_basek &= ~((64*1024) - 1);
1006 #if HW_MEM_HOLE_SIZEK != 0
1007 /* if the hw mem hole is already set in raminit stage, here we will compare
1008 * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
1009 * use hole_basek as mmio_basek and we don't need to reset hole.
1010 * otherwise We reset the hole to the mmio_basek
1013 mem_hole = get_hw_mem_hole_info();
1015 // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
1016 if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
1017 mmio_basek = mem_hole.hole_startk;
1021 #if CONFIG_AMDMCT == 0
1022 //mmio_basek = 3*1024*1024; // for debug to meet boundary
1025 if(mem_hole.node_id!=-1) {
1026 /* We need to select HW_MEM_HOLE_SIZEK for raminit, it can not
1027 make hole_startk to some basek too!
1028 We need to reset our Mem Hole, because We want more big HOLE
1030 Before that We need to disable mem hole at first, becase
1031 memhole could already be set on i+1 instead
1033 disable_hoist_memory(mem_hole.hole_startk, mem_hole.node_id);
1036 #if HW_MEM_HOLE_SIZE_AUTO_INC == 1
1037 // We need to double check if the mmio_basek is valid for hole
1038 // setting, if it is equal to basek, we need to decrease it some
1039 resource_t basek_pri;
1040 for (i = 0; i < sysconf.nodes; i++) {
1041 struct dram_base_mask_t d;
1043 d = get_dram_base_mask(i);
1045 if(!(d.mask &1)) continue;
1047 basek = ((resource_t)(d.base & 0x1fffff00)) << 9;
1048 if(mmio_basek == (u32)basek) {
1049 mmio_basek -= (uin32_t)(basek - basek_pri); // increase mem hole size to make sure it is on middle of pri node
1062 for(i = 0; i < sysconf.nodes; i++) {
1063 struct dram_base_mask_t d;
1064 resource_t basek, limitk, sizek; // 4 1T
1065 d = get_dram_base_mask(i);
1067 if(!(d.mask & 1)) continue;
1068 basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
1069 limitk = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9 ;
1070 sizek = limitk - basek;
1072 /* see if we need a hole from 0xa0000 to 0xbffff */
1073 if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
1074 ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
1076 basek = (8*64)+(16*16);
1077 sizek = limitk - ((8*64)+(16*16));
1081 // printk_debug("node %d : mmio_basek=%08x, basek=%08x, limitk=%08x\n", i, mmio_basek, basek, limitk);
1083 /* split the region to accomodate pci memory space */
1084 if ( (basek < 4*1024*1024 ) && (limitk > mmio_basek) ) {
1085 if (basek <= mmio_basek) {
1087 pre_sizek = mmio_basek - basek;
1089 ram_resource(dev, (idx | i), basek, pre_sizek);
1092 #if HAVE_HIGH_TABLES==1
1093 if (i==0 && high_tables_base==0) {
1094 /* Leave some space for ACPI, PIRQ and MP tables */
1095 high_tables_base = (mmio_basek - HIGH_TABLES_SIZE) * 1024;
1096 high_tables_size = HIGH_TABLES_SIZE * 1024;
1097 printk_debug("(split)%xK table at =%08llx\n", HIGH_TABLES_SIZE,
1102 #if CONFIG_AMDMCT == 0
1103 #if HW_MEM_HOLE_SIZEK != 0
1105 struct sys_info *sysinfox = (struct sys_info *)((CONFIG_LB_MEM_TOPK<<10) - DCACHE_RAM_GLOBAL_VAR_SIZE); // in RAM
1106 struct mem_info *meminfo;
1107 meminfo = &sysinfox->meminfo[i];
1108 sizek += hoist_memory(mmio_basek,i, get_one_DCT(meminfo), sysconf.nodes);
1115 if ((basek + sizek) <= 4*1024*1024) {
1119 basek = 4*1024*1024;
1120 sizek -= (4*1024*1024 - mmio_basek);
1123 ram_resource(dev, (idx | i), basek, sizek);
1125 #if HAVE_HIGH_TABLES==1
1126 printk_debug("%d: mmio_basek=%08lx, basek=%08x, limitk=%08x\n",
1127 i, mmio_basek, basek, limitk);
1128 if (i==0 && high_tables_base==0) {
1129 /* Leave some space for ACPI, PIRQ and MP tables */
1130 high_tables_base = (limitk - HIGH_TABLES_SIZE) * 1024;
1131 high_tables_size = HIGH_TABLES_SIZE * 1024;
1136 for(link = 0; link < dev->links; link++) {
1138 bus = &dev->link[link];
1139 if (bus->children) {
1140 assign_resources(bus);
1145 static u32 pci_domain_scan_bus(device_t dev, u32 max)
1149 /* Unmap all of the HT chains */
1150 for(reg = 0xe0; reg <= 0xec; reg += 4) {
1151 f1_write_config32(reg, 0);
1153 #if EXT_CONF_SUPPORT == 1
1155 for(i = 0; i< sysconf.nodes; i++) {
1157 for(index = 0; index < 64; index++) {
1158 pci_write_config32(__f1_dev[i], 0x110, index | (6<<28));
1159 pci_write_config32(__f1_dev[i], 0x114, 0);
1166 for(i=0;i<dev->links;i++) {
1167 max = pci_scan_bus(&dev->link[i], PCI_DEVFN(CDB, 0), 0xff, max);
1170 /* Tune the hypertransport transaction for best performance.
1171 * Including enabling relaxed ordering if it is safe.
1174 for(i = 0; i < FX_DEVS; i++) {
1176 f0_dev = __f0_dev[i];
1177 if (f0_dev && f0_dev->enabled) {
1179 httc = pci_read_config32(f0_dev, HT_TRANSACTION_CONTROL);
1180 httc &= ~HTTC_RSP_PASS_PW;
1181 if (!dev->link[0].disable_relaxed_ordering) {
1182 httc |= HTTC_RSP_PASS_PW;
1184 printk_spew("%s passpw: %s\n",
1186 (!dev->link[0].disable_relaxed_ordering)?
1187 "enabled":"disabled");
1188 pci_write_config32(f0_dev, HT_TRANSACTION_CONTROL, httc);
1194 static struct device_operations pci_domain_ops = {
1195 .read_resources = pci_domain_read_resources,
1196 .set_resources = pci_domain_set_resources,
1197 .enable_resources = enable_childrens_resources,
1199 .scan_bus = pci_domain_scan_bus,
1200 #if MMCONF_SUPPORT_DEFAULT
1201 .ops_pci_bus = &pci_ops_mmconf,
1203 .ops_pci_bus = &pci_cf8_conf1,
1207 static void sysconf_init(device_t dev) // first node
1209 sysconf.sblk = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
1211 sysconf.ht_c_num = 0;
1213 unsigned ht_c_index;
1215 for(ht_c_index=0; ht_c_index<32; ht_c_index++) {
1216 sysconf.ht_c_conf_bus[ht_c_index] = 0;
1219 sysconf.nodes = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1;
1220 #if CONFIG_MAX_PHYSICAL_CPUS > 8
1221 sysconf.nodes += (((pci_read_config32(dev, 0x160)>>4) & 7)<<3);
1224 sysconf.enabled_apic_ext_id = 0;
1225 sysconf.lift_bsp_apicid = 0;
1227 /* Find the bootstrap processors apicid */
1228 sysconf.bsp_apicid = lapicid();
1229 sysconf.apicid_offset = sysconf.bsp_apicid;
1231 #if (ENABLE_APIC_EXT_ID == 1)
1232 if (pci_read_config32(dev, 0x68) & (HTTC_APIC_EXT_ID|HTTC_APIC_EXT_BRD_CST))
1234 sysconf.enabled_apic_ext_id = 1;
1236 #if (APIC_ID_OFFSET>0)
1237 if(sysconf.enabled_apic_ext_id) {
1238 if(sysconf.bsp_apicid == 0) {
1239 /* bsp apic id is not changed */
1240 sysconf.apicid_offset = APIC_ID_OFFSET;
1242 sysconf.lift_bsp_apicid = 1;
1251 static u32 cpu_bus_scan(device_t dev, u32 max)
1253 struct bus *cpu_bus;
1255 device_t pci_domain;
1261 int disable_siblings;
1262 unsigned ApicIdCoreIdSize;
1265 ApicIdCoreIdSize = (cpuid_ecx(0x80000008)>>12 & 0xf);
1266 if(ApicIdCoreIdSize) {
1267 siblings = (1<<ApicIdCoreIdSize)-1;
1269 siblings = 3; //quad core
1272 disable_siblings = !CONFIG_LOGICAL_CPUS;
1273 #if CONFIG_LOGICAL_CPUS == 1
1274 get_option(&disable_siblings, "quad_core");
1277 // for pre_e0, nb_cfg_54 can not be set, ( even set, when you read it
1279 // How can I get the nb_cfg_54 of every node' nb_cfg_54 in bsp???
1280 // and differ d0 and e0 single core
1282 nb_cfg_54 = read_nb_cfg_54();
1285 dev_mc = dev_find_slot(0, PCI_DEVFN(CDB, 0)); //0x00
1286 if(dev_mc && dev_mc->bus) {
1287 printk_debug("%s found", dev_path(dev_mc));
1288 pci_domain = dev_mc->bus->dev;
1289 if(pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1290 printk_debug("\n%s move to ",dev_path(dev_mc));
1291 dev_mc->bus->secondary = CBB; // move to 0xff
1292 printk_debug("%s",dev_path(dev_mc));
1295 printk_debug(" but it is not under pci_domain directly ");
1300 dev_mc = dev_find_slot(CBB, PCI_DEVFN(CDB, 0));
1302 dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
1303 if (dev_mc && dev_mc->bus) {
1304 printk_debug("%s found\n", dev_path(dev_mc));
1305 pci_domain = dev_mc->bus->dev;
1306 if(pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1307 if((pci_domain->links==1) && (pci_domain->link[0].children == dev_mc)) {
1308 printk_debug("%s move to ",dev_path(dev_mc));
1309 dev_mc->bus->secondary = CBB; // move to 0xff
1310 printk_debug("%s\n",dev_path(dev_mc));
1312 printk_debug("%s move to ",dev_path(dev_mc));
1313 dev_mc->path.pci.devfn -= PCI_DEVFN(0x18,0);
1314 printk_debug("%s\n",dev_path(dev_mc));
1315 dev_mc = dev_mc->sibling;
1324 dev_mc = dev_find_slot(CBB, PCI_DEVFN(CDB, 0));
1326 printk_err("%02x:%02x.0 not found", CBB, CDB);
1330 sysconf_init(dev_mc);
1332 nodes = sysconf.nodes;
1334 #if CBB && (NODE_NUMS > 32)
1335 if(nodes>32) { // need to put node 32 to node 63 to bus 0xfe
1336 if(pci_domain->links==1) {
1337 pci_domain->links++; // from 1 to 2
1338 pci_domain->link[1].link = 1;
1339 pci_domain->link[1].dev = pci_domain;
1340 pci_domain->link[1].children = 0;
1341 printk_debug("%s links increase to %d\n", dev_path(pci_domain), pci_domain->links);
1343 pci_domain->link[1].secondary = CBB - 1;
1346 /* Find which cpus are present */
1347 cpu_bus = &dev->link[0];
1348 for(i = 0; i < nodes; i++) {
1350 struct device_path cpu_path;
1351 unsigned busn, devn;
1357 #if CBB && (NODE_NUMS > 32)
1361 pbus = &(pci_domain->link[1]);
1365 /* Find the cpu's pci device */
1366 dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1368 /* If I am probing things in a weird order
1369 * ensure all of the cpu's pci devices are found.
1372 for(j = 0; j <= 5; j++) { //FBDIMM?
1373 dev = pci_probe_dev(NULL, pbus,
1374 PCI_DEVFN(devn, j));
1376 dev = dev_find_slot(busn, PCI_DEVFN(devn,0));
1379 /* Ok, We need to set the links for that device.
1380 * otherwise the device under it will not be scanned
1389 if(dev->links < linknum) {
1390 for(j=dev->links; j<linknum; j++) {
1391 dev->link[j].link = j;
1392 dev->link[j].dev = dev;
1394 dev->links = linknum;
1395 printk_debug("%s links increase to %d\n", dev_path(dev), dev->links);
1399 cores_found = 0; // one core
1400 dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
1401 if (dev && dev->enabled) {
1402 j = pci_read_config32(dev, 0xe8);
1403 cores_found = (j >> 12) & 3; // dev is func 3
1404 printk_debug(" %s siblings=%d\n", dev_path(dev), cores_found);
1408 if(disable_siblings) {
1415 for (j = 0; j <=jj; j++ ) {
1417 /* Build the cpu device path */
1418 cpu_path.type = DEVICE_PATH_APIC;
1419 cpu_path.apic.apic_id = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:64); // ?
1421 /* See if I can find the cpu */
1422 cpu = find_dev_path(cpu_bus, &cpu_path);
1424 /* Enable the cpu if I have the processor */
1425 if (dev && dev->enabled) {
1427 cpu = alloc_dev(cpu_bus, &cpu_path);
1434 /* Disable the cpu if I don't have the processor */
1435 if (cpu && (!dev || !dev->enabled)) {
1439 /* Report what I have done */
1441 cpu->path.apic.node_id = i;
1442 cpu->path.apic.core_id = j;
1443 #if (ENABLE_APIC_EXT_ID == 1) && (APIC_ID_OFFSET>0)
1444 if(sysconf.enabled_apic_ext_id) {
1445 if(sysconf.lift_bsp_apicid) {
1446 cpu->path.apic.apic_id += sysconf.apicid_offset;
1449 if (cpu->path.apic.apic_id != 0)
1450 cpu->path.apic.apic_id += sysconf.apicid_offset;
1454 printk_debug("CPU: %s %s\n",
1455 dev_path(cpu), cpu->enabled?"enabled":"disabled");
1464 static void cpu_bus_init(device_t dev)
1466 initialize_cpus(&dev->link[0]);
1470 static void cpu_bus_noop(device_t dev)
1475 static struct device_operations cpu_bus_ops = {
1476 .read_resources = cpu_bus_noop,
1477 .set_resources = cpu_bus_noop,
1478 .enable_resources = cpu_bus_noop,
1479 .init = cpu_bus_init,
1480 .scan_bus = cpu_bus_scan,
1484 static void root_complex_enable_dev(struct device *dev)
1486 /* Set the operations if it is a special bus type */
1487 if (dev->path.type == DEVICE_PATH_PCI_DOMAIN) {
1488 dev->ops = &pci_domain_ops;
1490 else if (dev->path.type == DEVICE_PATH_APIC_CLUSTER) {
1491 dev->ops = &cpu_bus_ops;
1495 struct chip_operations northbridge_amd_amdfam10_root_complex_ops = {
1496 CHIP_NAME("AMD FAM10 Root Complex")
1497 .enable_dev = root_complex_enable_dev,