2 * This file is part of the coreboot project.
4 * Copyright (C) 2007 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <console/console.h>
23 #include <device/device.h>
24 #include <device/pci.h>
25 #include <device/pci_ids.h>
26 #include <device/hypertransport.h>
32 #include <cpu/x86/lapic.h>
34 #if CONFIG_LOGICAL_CPUS==1
35 #include <cpu/amd/quadcore.h>
36 #include <pc80/mc146818rtc.h>
40 #include "root_complex/chip.h"
41 #include "northbridge.h"
45 #if HW_MEM_HOLE_SIZEK != 0
46 #include <cpu/amd/model_10xxx_rev.h>
49 #include <cpu/amd/amdfam10_sysconf.h>
51 struct amdfam10_sysconf_t sysconf;
53 #define FX_DEVS NODE_NUMS
54 static device_t __f0_dev[FX_DEVS];
55 static device_t __f1_dev[FX_DEVS];
56 static device_t __f2_dev[FX_DEVS];
57 static device_t __f4_dev[FX_DEVS];
59 device_t get_node_pci(u32 nodeid, u32 fn)
63 return dev_find_slot(CBB, PCI_DEVFN(CDB + nodeid, fn));
65 return dev_find_slot(CBB-1, PCI_DEVFN(CDB + nodeid - 32, fn));
69 return dev_find_slot(CBB, PCI_DEVFN(CDB + nodeid, fn));
73 static void get_fx_devs(void)
79 for(i = 0; i < FX_DEVS; i++) {
80 __f0_dev[i] = get_node_pci(i, 0);
81 __f1_dev[i] = get_node_pci(i, 1);
82 __f2_dev[i] = get_node_pci(i, 2);
83 __f4_dev[i] = get_node_pci(i, 4);
86 printk_err("Cannot find %02x:%02x.1", CBB, CDB);
87 die("Cannot go on\n");
91 static u32 f1_read_config32(u32 reg)
94 return pci_read_config32(__f1_dev[0], reg);
97 static void f1_write_config32(u32 reg, u32 value)
101 for(i = 0; i < FX_DEVS; i++) {
104 if (dev && dev->enabled) {
105 pci_write_config32(dev, reg, value);
111 static u32 amdfam10_nodeid(device_t dev)
115 busn = dev->bus->secondary;
117 return (dev->path.pci.devfn >> 3) - CDB + 32;
119 return (dev->path.pci.devfn >> 3) - CDB;
123 return (dev->path.pci.devfn >> 3) - CDB;
127 #include "amdfam10_conf.c"
129 static void set_vga_enable_reg(u32 nodeid, u32 linkn)
133 val = 1 | (nodeid<<4) | (linkn<<12);
134 /* it will routing (1)mmio 0xa0000:0xbffff (2) io 0x3b0:0x3bb,
136 f1_write_config32(0xf4, val);
140 static u32 amdfam10_scan_chain(device_t dev, u32 nodeid, u32 link, u32 sblink,
141 u32 max, u32 offset_unitid)
143 // I want to put sb chain in bus 0 can I?
149 u32 ht_unitid_base[4]; // here assume only 4 HT device on chain
152 u32 is_sublink1 = (link>3);
163 regpos = 0x170 + 4 * (link&3); // it is only on sublink0
164 reg = pci_read_config32(dev, regpos);
165 if(reg & 1) return max; // already ganged no sblink1
166 devx = get_node_pci(nodeid, 4);
172 dev->link[link].cap = 0x80 + ((link&3) *0x20);
174 link_type = pci_read_config32(devx, dev->link[link].cap + 0x18);
175 } while(link_type & ConnectionPending);
176 if (!(link_type & LinkConnected)) {
180 link_type = pci_read_config32(devx, dev->link[link].cap + 0x18);
181 } while(!(link_type & InitComplete));
182 if (!(link_type & NonCoherent)) {
185 /* See if there is an available configuration space mapping
186 * register in function 1.
188 ht_c_index = get_ht_c_index(nodeid, link, &sysconf);
190 #if EXT_CONF_SUPPORT == 0
191 if(ht_c_index>=4) return max;
194 /* Set up the primary, secondary and subordinate bus numbers.
195 * We have no idea how many busses are behind this bridge yet,
196 * so we set the subordinate bus number to 0xff for the moment.
199 #if SB_HT_CHAIN_ON_BUS0 > 0
200 // first chain will on bus 0
201 if((nodeid == 0) && (sblink==link)) { // actually max is 0 here
204 #if SB_HT_CHAIN_ON_BUS0 > 1
205 // second chain will be on 0x40, third 0x80, forth 0xc0
206 // i would refined that to 2, 3, 4 ==> 0, 0x, 40, 0x80, 0xc0
207 // >4 will use more segments, We can have 16 segmment and every segment have 256 bus, For that case need the kernel support mmio pci config.
209 min_bus = ((busn>>3) + 1) << 3; // one node can have 8 link and segn is the same
211 max = min_bus | (segn<<8);
221 max_bus = 0xfc | (segn<<8);
223 dev->link[link].secondary = min_bus;
224 dev->link[link].subordinate = max_bus;
225 /* Read the existing primary/secondary/subordinate bus
226 * number configuration.
228 busses = pci_read_config32(devx, dev->link[link].cap + 0x14);
230 /* Configure the bus numbers for this bridge: the configuration
231 * transactions will not be propagates by the bridge if it is
232 * not correctly configured
234 busses &= 0xffff00ff;
235 busses |= ((u32)(dev->link[link].secondary) << 8);
236 pci_write_config32(devx, dev->link[link].cap + 0x14, busses);
239 /* set the config map space */
241 set_config_map_reg(nodeid, link, ht_c_index, dev->link[link].secondary, dev->link[link].subordinate, sysconf.segbit, sysconf.nodes);
243 /* Now we can scan all of the subordinate busses i.e. the
244 * chain on the hypertranport link
247 ht_unitid_base[i] = 0x20;
250 //if ext conf is enabled, only need use 0x1f
252 max_devfn = (0x17<<3) | 7;
254 max_devfn = (0x1f<<3) | 7;
256 max = hypertransport_scan_chain(&dev->link[link], 0, max_devfn, max, ht_unitid_base, offset_unitid);
259 /* We know the number of busses behind this bridge. Set the
260 * subordinate bus number to it's real value
262 if(ht_c_index>3) { // clear the extend reg
263 clear_config_map_reg(nodeid, link, ht_c_index, (max+1)>>sysconf.segbit, (dev->link[link].subordinate)>>sysconf.segbit, sysconf.nodes);
266 dev->link[link].subordinate = max;
267 set_config_map_reg(nodeid, link, ht_c_index, dev->link[link].secondary, dev->link[link].subordinate, sysconf.segbit, sysconf.nodes);
271 // config config_reg, and ht_unitid_base to update hcdn_reg;
274 temp |= (ht_unitid_base[i] & 0xff) << (i*8);
277 sysconf.hcdn_reg[ht_c_index] = temp;
281 store_ht_c_conf_bus(nodeid, link, ht_c_index, dev->link[link].secondary, dev->link[link].subordinate, &sysconf);
287 static u32 amdfam10_scan_chains(device_t dev, u32 max)
291 u32 sblink = sysconf.sblk;
292 u32 offset_unitid = 0;
294 nodeid = amdfam10_nodeid(dev);
297 // Put sb chain in bus 0
298 #if SB_HT_CHAIN_ON_BUS0 > 0
300 #if ((HT_CHAIN_UNITID_BASE != 1) || (HT_CHAIN_END_UNITID_BASE != 0x20))
303 max = amdfam10_scan_chain(dev, nodeid, sblink, sblink, max, offset_unitid ); // do sb ht chain at first, in case s2885 put sb chain (8131/8111) on link2, but put 8151 on link0
308 #if PCI_BUS_SEGN_BITS
309 max = check_segn(dev, max, sysconf.nodes, &sysconf);
313 for(link = 0; link < dev->links; link++) {
314 #if SB_HT_CHAIN_ON_BUS0 > 0
315 if( (nodeid == 0) && (sblink == link) ) continue; //already done
318 #if ((HT_CHAIN_UNITID_BASE != 1) || (HT_CHAIN_END_UNITID_BASE != 0x20))
319 #if SB_HT_CHAIN_UNITID_OFFSET_ONLY == 1
320 if((nodeid == 0) && (sblink == link))
325 max = amdfam10_scan_chain(dev, nodeid, link, sblink, max, offset_unitid);
331 static int reg_useable(u32 reg,device_t goal_dev, u32 goal_nodeid,
334 struct resource *res;
338 for(nodeid = 0; !res && (nodeid < NODE_NUMS); nodeid++) {
340 dev = __f0_dev[nodeid];
343 for(link = 0; !res && (link < 8); link++) {
344 res = probe_resource(dev, 0x1000 + reg + (link<<16)); // 8 links, 0x1000 man f1,
350 if ( (goal_link == (link - 1)) &&
351 (goal_nodeid == (nodeid - 1)) &&
359 static struct resource *amdfam10_find_iopair(device_t dev, u32 nodeid, u32 link)
361 struct resource *resource;
365 for(reg = 0xc0; reg <= 0xd8; reg += 0x8) {
367 result = reg_useable(reg, dev, nodeid, link);
369 /* I have been allocated this one */
372 else if (result > 1) {
373 /* I have a free register pair */
378 reg = free_reg; // if no free, the free_reg still be 0
383 //because of Extend conf space, we will never run out of reg, but we need one index to differ them. so same node and same link can have multi range
384 u32 index = get_io_addr_index(nodeid, link);
385 reg = 0x110+ (index<<24) + (4<<20); // index could be 0, 255
388 resource = new_resource(dev, 0x1000 + reg + (link<<16));
393 static struct resource *amdfam10_find_mempair(device_t dev, u32 nodeid, u32 link)
395 struct resource *resource;
399 for(reg = 0x80; reg <= 0xb8; reg += 0x8) {
401 result = reg_useable(reg, dev, nodeid, link);
403 /* I have been allocated this one */
406 else if (result > 1) {
407 /* I have a free register pair */
417 //because of Extend conf space, we will never run out of reg,
418 // but we need one index to differ them. so same node and
419 // same link can have multi range
420 u32 index = get_mmio_addr_index(nodeid, link);
421 reg = 0x110+ (index<<24) + (6<<20); // index could be 0, 63
424 resource = new_resource(dev, 0x1000 + reg + (link<<16));
429 static void amdfam10_link_read_bases(device_t dev, u32 nodeid, u32 link)
431 struct resource *resource;
433 /* Initialize the io space constraints on the current bus */
434 resource = amdfam10_find_iopair(dev, nodeid, link);
437 #if EXT_CONF_SUPPORT == 1
438 if((resource->index & 0x1fff) == 0x1110) { // ext
443 align = log2(HT_IO_HOST_ALIGN);
446 resource->align = align;
447 resource->gran = align;
448 resource->limit = 0xffffUL;
449 resource->flags = IORESOURCE_IO;
450 compute_allocate_resource(&dev->link[link], resource,
451 IORESOURCE_IO, IORESOURCE_IO);
454 /* Initialize the prefetchable memory constraints on the current bus */
455 resource = amdfam10_find_mempair(dev, nodeid, link);
459 resource->align = log2(HT_MEM_HOST_ALIGN);
460 resource->gran = log2(HT_MEM_HOST_ALIGN);
461 resource->limit = 0xffffffffffULL;
462 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
463 compute_allocate_resource(&dev->link[link], resource,
464 IORESOURCE_MEM | IORESOURCE_PREFETCH,
465 IORESOURCE_MEM | IORESOURCE_PREFETCH);
467 #if EXT_CONF_SUPPORT == 1
468 if((resource->index & 0x1fff) == 0x1110) { // ext
469 normalize_resource(resource);
475 /* Initialize the memory constraints on the current bus */
476 resource = amdfam10_find_mempair(dev, nodeid, link);
480 resource->align = log2(HT_MEM_HOST_ALIGN);
481 resource->gran = log2(HT_MEM_HOST_ALIGN);
482 resource->limit = 0xffffffffffULL;
483 resource->flags = IORESOURCE_MEM;
484 compute_allocate_resource(&dev->link[link], resource,
485 IORESOURCE_MEM | IORESOURCE_PREFETCH,
488 #if EXT_CONF_SUPPORT == 1
489 if((resource->index & 0x1fff) == 0x1110) { // ext
490 normalize_resource(resource);
498 static void amdfam10_read_resources(device_t dev)
502 nodeid = amdfam10_nodeid(dev);
503 for(link = 0; link < dev->links; link++) {
504 if (dev->link[link].children) {
505 amdfam10_link_read_bases(dev, nodeid, link);
511 static void amdfam10_set_resource(device_t dev, struct resource *resource,
514 resource_t rbase, rend;
518 /* Make certain the resource has actually been set */
519 if (!(resource->flags & IORESOURCE_ASSIGNED)) {
523 /* If I have already stored this resource don't worry about it */
524 if (resource->flags & IORESOURCE_STORED) {
528 /* Only handle PCI memory and IO resources */
529 if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
532 /* Ensure I am actually looking at a resource of function 1 */
533 if ((resource->index & 0xffff) < 0x1000) {
536 /* Get the base address */
537 rbase = resource->base;
539 /* Get the limit (rounded up) */
540 rend = resource_end(resource);
542 /* Get the register and link */
543 reg = resource->index & 0xfff; // 4k
544 link = ( resource->index>> 16)& 0x7; // 8 links
546 if (resource->flags & IORESOURCE_IO) {
547 compute_allocate_resource(&dev->link[link], resource,
548 IORESOURCE_IO, IORESOURCE_IO);
550 set_io_addr_reg(dev, nodeid, link, reg, rbase>>8, rend>>8);
551 store_conf_io_addr(nodeid, link, reg, (resource->index >> 24), rbase>>8, rend>>8);
553 else if (resource->flags & IORESOURCE_MEM) {
554 compute_allocate_resource(&dev->link[link], resource,
555 IORESOURCE_MEM | IORESOURCE_PREFETCH,
556 resource->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH));
557 set_mmio_addr_reg(nodeid, link, reg, (resource->index >>24), rbase>>8, rend>>8, sysconf.nodes) ;// [39:8]
558 store_conf_mmio_addr(nodeid, link, reg, (resource->index >>24), rbase>>8, rend>>8);
560 resource->flags |= IORESOURCE_STORED;
561 sprintf(buf, " <node %02x link %02x>",
563 report_resource_stored(dev, resource, buf);
568 * I tried to reuse the resource allocation code in amdfam10_set_resource()
569 * but it is too diffcult to deal with the resource allocation magic.
571 #if CONFIG_CONSOLE_VGA_MULTI == 1
572 extern device_t vga_pri; // the primary vga device, defined in device.c
575 static void amdfam10_create_vga_resource(device_t dev, unsigned nodeid)
579 /* find out which link the VGA card is connected,
580 * we only deal with the 'first' vga card */
581 for (link = 0; link < dev->links; link++) {
582 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
583 #if CONFIG_CONSOLE_VGA_MULTI == 1
584 printk_debug("VGA: vga_pri bus num = %d dev->link[link] bus range [%d,%d]\n", vga_pri->bus->secondary,
585 dev->link[link].secondary,dev->link[link].subordinate);
586 /* We need to make sure the vga_pri is under the link */
587 if((vga_pri->bus->secondary >= dev->link[link].secondary ) &&
588 (vga_pri->bus->secondary <= dev->link[link].subordinate )
595 /* no VGA card installed */
596 if (link == dev->links)
599 printk_debug("VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, link);
600 set_vga_enable_reg(nodeid, link);
603 static void amdfam10_set_resources(device_t dev)
608 /* Find the nodeid */
609 nodeid = amdfam10_nodeid(dev);
611 amdfam10_create_vga_resource(dev, nodeid);
613 /* Set each resource we have found */
614 for(i = 0; i < dev->resources; i++) {
615 amdfam10_set_resource(dev, &dev->resource[i], nodeid);
618 for(link = 0; link < dev->links; link++) {
620 bus = &dev->link[link];
622 assign_resources(bus);
628 static void amdfam10_enable_resources(device_t dev)
630 pci_dev_enable_resources(dev);
631 enable_childrens_resources(dev);
634 static void mcf0_control_init(struct device *dev)
638 static struct device_operations northbridge_operations = {
639 .read_resources = amdfam10_read_resources,
640 .set_resources = amdfam10_set_resources,
641 .enable_resources = amdfam10_enable_resources,
642 .init = mcf0_control_init,
643 .scan_bus = amdfam10_scan_chains,
649 static struct pci_driver mcf0_driver __pci_driver = {
650 .ops = &northbridge_operations,
651 .vendor = PCI_VENDOR_ID_AMD,
655 struct chip_operations northbridge_amd_amdfam10_ops = {
656 CHIP_NAME("AMD FAM10 Northbridge")
660 static void pci_domain_read_resources(device_t dev)
662 struct resource *resource;
666 /* Find the already assigned resource pairs */
668 for(reg = 0x80; reg <= 0xd8; reg+= 0x08) {
670 base = f1_read_config32(reg);
671 limit = f1_read_config32(reg + 0x04);
672 /* Is this register allocated? */
673 if ((base & 3) != 0) {
674 unsigned nodeid, link;
676 if(reg<0xc0) { // mmio
677 nodeid = (limit & 0xf) + (base&0x30);
679 nodeid = (limit & 0xf) + ((base>>4)&0x30);
681 link = (limit >> 4) & 7;
682 dev = __f0_dev[nodeid];
684 /* Reserve the resource */
685 struct resource *resource;
686 resource = new_resource(dev, 0x1000 + reg + (link<<16));
693 /* FIXME: do we need to check extend conf space?
694 I don't believe that much preset value */
696 #if CONFIG_PCI_64BIT_PREF_MEM == 0
697 /* Initialize the system wide io space constraints */
698 resource = new_resource(dev, IOINDEX_SUBTRACTIVE(0, 0));
699 resource->base = 0x400;
700 resource->limit = 0xffffUL;
701 resource->flags = IORESOURCE_IO | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
703 /* Initialize the system wide memory resources constraints */
704 resource = new_resource(dev, IOINDEX_SUBTRACTIVE(1, 0));
705 resource->limit = 0xfcffffffffULL;
706 resource->flags = IORESOURCE_MEM | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
708 for(link=0; link<dev->links; link++) {
709 /* Initialize the system wide io space constraints */
710 resource = new_resource(dev, 0|(link<<2));
711 resource->base = 0x400;
712 resource->limit = 0xffffUL;
713 resource->flags = IORESOURCE_IO;
714 compute_allocate_resource(&dev->link[link], resource,
715 IORESOURCE_IO, IORESOURCE_IO);
717 /* Initialize the system wide prefetchable memory resources constraints */
718 resource = new_resource(dev, 1|(link<<2));
719 resource->limit = 0xfcffffffffULL;
720 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
721 compute_allocate_resource(&dev->link[link], resource,
722 IORESOURCE_MEM | IORESOURCE_PREFETCH,
723 IORESOURCE_MEM | IORESOURCE_PREFETCH);
725 /* Initialize the system wide memory resources constraints */
726 resource = new_resource(dev, 2|(link<<2));
727 resource->limit = 0xfcffffffffULL;
728 resource->flags = IORESOURCE_MEM;
729 compute_allocate_resource(&dev->link[link], resource,
730 IORESOURCE_MEM | IORESOURCE_PREFETCH,
736 static void ram_resource(device_t dev, unsigned long index,
737 resource_t basek, resource_t sizek)
739 struct resource *resource;
744 resource = new_resource(dev, index);
745 resource->base = basek << 10;
746 resource->size = sizek << 10;
747 resource->flags = IORESOURCE_MEM | IORESOURCE_CACHEABLE | \
748 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
751 static void tolm_test(void *gp, struct device *dev, struct resource *new)
753 struct resource **best_p = gp;
754 struct resource *best;
756 if (!best || (best->base > new->base)) {
762 static u32 find_pci_tolm(struct bus *bus, u32 tolm)
764 struct resource *min;
766 search_bus_resources(bus, IORESOURCE_MEM, IORESOURCE_MEM, tolm_test, &min);
767 if (min && tolm > min->base) {
773 #if CONFIG_PCI_64BIT_PREF_MEM == 1
774 #define BRIDGE_IO_MASK (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH)
777 #if HW_MEM_HOLE_SIZEK != 0
779 struct hw_mem_hole_info {
780 unsigned hole_startk;
784 static struct hw_mem_hole_info get_hw_mem_hole_info(void)
786 struct hw_mem_hole_info mem_hole;
789 mem_hole.hole_startk = HW_MEM_HOLE_SIZEK;
790 mem_hole.node_id = -1;
792 for (i = 0; i < sysconf.nodes; i++) {
793 struct dram_base_mask_t d;
795 d = get_dram_base_mask(i);
796 if(!(d.mask & 1)) continue; // no memory on this node
798 hole = pci_read_config32(__f1_dev[i], 0xf0);
799 if(hole & 1) { // we find the hole
800 mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
801 mem_hole.node_id = i; // record the node No with hole
802 break; // only one hole
806 //We need to double check if there is speical set on base reg and limit reg are not continous instead of hole, it will find out it's hole_startk
807 if(mem_hole.node_id==-1) {
808 resource_t limitk_pri = 0;
809 for(i=0; i<sysconf.nodes; i++) {
810 struct dram_base_mask_t d;
811 resource_t base_k, limit_k;
812 d = get_dram_base_mask(i);
813 if(!(d.base & 1)) continue;
815 base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
816 if(base_k > 4 *1024 * 1024) break; // don't need to go to check
817 if(limitk_pri != base_k) { // we find the hole
818 mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G
819 mem_hole.node_id = i;
820 break; //only one hole
823 limit_k = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9;
824 limitk_pri = limit_k;
831 #if CONFIG_AMDMCT == 0
832 static void disable_hoist_memory(unsigned long hole_startk, int i)
836 struct dram_base_mask_t d;
843 struct sys_info *sysinfox = (struct sys_info *)((CONFIG_LB_MEM_TOPK<<10) - DCACHE_RAM_GLOBAL_VAR_SIZE); // in RAM
844 struct mem_info *meminfo;
845 meminfo = &sysinfox->meminfo[i];
847 one_DCT = get_one_DCT(meminfo);
849 // 1. find which node has hole
850 // 2. change limit in that node.
851 // 3. change base and limit in later node
852 // 4. clear that node f0
854 // if there is not mem hole enabled, we need to change it's base instead
856 hole_sizek = (4*1024*1024) - hole_startk;
858 for(ii=NODE_NUMS-1;ii>i;ii--) {
860 d = get_dram_base_mask(ii);
862 if(!(d.mask & 1)) continue;
864 d.base -= (hole_sizek>>9);
865 d.mask -= (hole_sizek>>9);
866 set_dram_base_mask(ii, d, sysconf.nodes);
868 if(get_DctSelHiEn(ii) & 1) {
869 sel_m = get_DctSelBaseAddr(ii);
870 sel_m -= hole_startk>>10;
871 set_DctSelBaseAddr(ii, sel_m);
875 d = get_dram_base_mask(i);
877 hoist = pci_read_config32(dev, 0xf0);
878 sel_hi_en = get_DctSelHiEn(i);
881 sel_m = get_DctSelBaseAddr(i);
885 pci_write_config32(dev, 0xf0, 0);
886 d.mask -= (hole_sizek>>9);
887 set_dram_base_mask(i, d, sysconf.nodes);
888 if(one_DCT || (sel_m >= (hole_startk>>10))) {
890 sel_m -= hole_startk>>10;
891 set_DctSelBaseAddr(i, sel_m);
895 set_DctSelBaseOffset(i, 0);
899 d.base -= (hole_sizek>>9);
900 d.mask -= (hole_sizek>>9);
901 set_dram_base_mask(i, d, sysconf.nodes);
904 sel_m -= hole_startk>>10;
905 set_DctSelBaseAddr(i, sel_m);
914 static void pci_domain_set_resources(device_t dev)
916 #if CONFIG_PCI_64BIT_PREF_MEM == 1
917 struct resource *io, *mem1, *mem2;
918 struct resource *resource, *last;
920 unsigned long mmio_basek;
924 #if HW_MEM_HOLE_SIZEK != 0
925 struct hw_mem_hole_info mem_hole;
926 u32 reset_memhole = 1;
929 #if CONFIG_PCI_64BIT_PREF_MEM == 1
931 for(link=0; link<dev->links; link++) {
932 /* Now reallocate the pci resources memory with the
933 * highest addresses I can manage.
935 mem1 = find_resource(dev, 1|(link<<2));
936 mem2 = find_resource(dev, 2|(link<<2));
938 printk_debug("base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
939 mem1->base, mem1->limit, mem1->size, mem1->align);
940 printk_debug("base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
941 mem2->base, mem2->limit, mem2->size, mem2->align);
943 /* See if both resources have roughly the same limits */
944 if (((mem1->limit <= 0xffffffff) && (mem2->limit <= 0xffffffff)) ||
945 ((mem1->limit > 0xffffffff) && (mem2->limit > 0xffffffff)))
947 /* If so place the one with the most stringent alignment first
949 if (mem2->align > mem1->align) {
950 struct resource *tmp;
955 /* Now place the memory as high up as it will go */
956 mem2->base = resource_max(mem2);
957 mem1->limit = mem2->base - 1;
958 mem1->base = resource_max(mem1);
961 /* Place the resources as high up as they will go */
962 mem2->base = resource_max(mem2);
963 mem1->base = resource_max(mem1);
966 printk_debug("base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
967 mem1->base, mem1->limit, mem1->size, mem1->align);
968 printk_debug("base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
969 mem2->base, mem2->limit, mem2->size, mem2->align);
972 last = &dev->resource[dev->resources];
973 for(resource = &dev->resource[0]; resource < last; resource++)
975 resource->flags |= IORESOURCE_ASSIGNED;
976 resource->flags &= ~IORESOURCE_STORED;
977 link = (resource>>2) & 3;
978 compute_allocate_resource(&dev->link[link], resource,
979 BRIDGE_IO_MASK, resource->flags & BRIDGE_IO_MASK);
981 resource->flags |= IORESOURCE_STORED;
982 report_resource_stored(dev, resource, "");
987 pci_tolm = 0xffffffffUL;
988 for(link=0;link<dev->links; link++) {
989 pci_tolm = find_pci_tolm(&dev->link[link], pci_tolm);
992 #warning "FIXME handle interleaved nodes"
993 mmio_basek = pci_tolm >> 10;
994 /* Round mmio_basek to something the processor can support */
995 mmio_basek &= ~((1 << 6) -1);
997 #warning "FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M MMIO hole"
998 /* Round the mmio hold to 64M */
999 mmio_basek &= ~((64*1024) - 1);
1001 #if HW_MEM_HOLE_SIZEK != 0
1002 /* if the hw mem hole is already set in raminit stage, here we will compare
1003 * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
1004 * use hole_basek as mmio_basek and we don't need to reset hole.
1005 * otherwise We reset the hole to the mmio_basek
1008 mem_hole = get_hw_mem_hole_info();
1010 // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
1011 if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
1012 mmio_basek = mem_hole.hole_startk;
1016 #if CONFIG_AMDMCT == 0
1017 //mmio_basek = 3*1024*1024; // for debug to meet boundary
1020 if(mem_hole.node_id!=-1) {
1021 /* We need to select HW_MEM_HOLE_SIZEK for raminit, it can not
1022 make hole_startk to some basek too!
1023 We need to reset our Mem Hole, because We want more big HOLE
1025 Before that We need to disable mem hole at first, becase
1026 memhole could already be set on i+1 instead
1028 disable_hoist_memory(mem_hole.hole_startk, mem_hole.node_id);
1031 #if HW_MEM_HOLE_SIZE_AUTO_INC == 1
1032 // We need to double check if the mmio_basek is valid for hole
1033 // setting, if it is equal to basek, we need to decrease it some
1034 resource_t basek_pri;
1035 for (i = 0; i < sysconf.nodes; i++) {
1036 struct dram_base_mask_t d;
1038 d = get_dram_base_mask(i);
1040 if(!(d.mask &1)) continue;
1042 basek = ((resource_t)(d.base & 0x1fffff00)) << 9;
1043 if(mmio_basek == (u32)basek) {
1044 mmio_basek -= (uin32_t)(basek - basek_pri); // increase mem hole size to make sure it is on middle of pri node
1057 for(i = 0; i < sysconf.nodes; i++) {
1058 struct dram_base_mask_t d;
1059 resource_t basek, limitk, sizek; // 4 1T
1060 d = get_dram_base_mask(i);
1062 if(!(d.mask & 1)) continue;
1063 basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
1064 limitk = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9 ;
1065 sizek = limitk - basek;
1067 /* see if we need a hole from 0xa0000 to 0xbffff */
1068 if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
1069 ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
1071 basek = (8*64)+(16*16);
1072 sizek = limitk - ((8*64)+(16*16));
1076 // printk_debug("node %d : mmio_basek=%08x, basek=%08x, limitk=%08x\n", i, mmio_basek, basek, limitk);
1078 /* split the region to accomodate pci memory space */
1079 if ( (basek < 4*1024*1024 ) && (limitk > mmio_basek) ) {
1080 if (basek <= mmio_basek) {
1082 pre_sizek = mmio_basek - basek;
1084 ram_resource(dev, (idx | i), basek, pre_sizek);
1088 #if CONFIG_AMDMCT == 0
1089 #if HW_MEM_HOLE_SIZEK != 0
1091 struct sys_info *sysinfox = (struct sys_info *)((CONFIG_LB_MEM_TOPK<<10) - DCACHE_RAM_GLOBAL_VAR_SIZE); // in RAM
1092 struct mem_info *meminfo;
1093 meminfo = &sysinfox->meminfo[i];
1094 sizek += hoist_memory(mmio_basek,i, get_one_DCT(meminfo), sysconf.nodes);
1101 if ((basek + sizek) <= 4*1024*1024) {
1105 basek = 4*1024*1024;
1106 sizek -= (4*1024*1024 - mmio_basek);
1109 ram_resource(dev, (idx | i), basek, sizek);
1113 for(link = 0; link < dev->links; link++) {
1115 bus = &dev->link[link];
1116 if (bus->children) {
1117 assign_resources(bus);
1122 static u32 pci_domain_scan_bus(device_t dev, u32 max)
1126 /* Unmap all of the HT chains */
1127 for(reg = 0xe0; reg <= 0xec; reg += 4) {
1128 f1_write_config32(reg, 0);
1130 #if EXT_CONF_SUPPORT == 1
1132 for(i = 0; i< sysconf.nodes; i++) {
1134 for(index = 0; index < 64; index++) {
1135 pci_write_config32(__f1_dev[i], 0x110, index | (6<<28));
1136 pci_write_config32(__f1_dev[i], 0x114, 0);
1143 for(i=0;i<dev->links;i++) {
1144 max = pci_scan_bus(&dev->link[i], PCI_DEVFN(CDB, 0), 0xff, max);
1147 /* Tune the hypertransport transaction for best performance.
1148 * Including enabling relaxed ordering if it is safe.
1151 for(i = 0; i < FX_DEVS; i++) {
1153 f0_dev = __f0_dev[i];
1154 if (f0_dev && f0_dev->enabled) {
1156 httc = pci_read_config32(f0_dev, HT_TRANSACTION_CONTROL);
1157 httc &= ~HTTC_RSP_PASS_PW;
1158 if (!dev->link[0].disable_relaxed_ordering) {
1159 httc |= HTTC_RSP_PASS_PW;
1161 printk_spew("%s passpw: %s\n",
1163 (!dev->link[0].disable_relaxed_ordering)?
1164 "enabled":"disabled");
1165 pci_write_config32(f0_dev, HT_TRANSACTION_CONTROL, httc);
1171 static struct device_operations pci_domain_ops = {
1172 .read_resources = pci_domain_read_resources,
1173 .set_resources = pci_domain_set_resources,
1174 .enable_resources = enable_childrens_resources,
1176 .scan_bus = pci_domain_scan_bus,
1177 #if MMCONF_SUPPORT_DEFAULT
1178 .ops_pci_bus = &pci_ops_mmconf,
1180 .ops_pci_bus = &pci_cf8_conf1,
1184 static void sysconf_init(device_t dev) // first node
1186 sysconf.sblk = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
1188 sysconf.ht_c_num = 0;
1190 unsigned ht_c_index;
1192 for(ht_c_index=0; ht_c_index<32; ht_c_index++) {
1193 sysconf.ht_c_conf_bus[ht_c_index] = 0;
1196 sysconf.nodes = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1;
1197 #if CONFIG_MAX_PHYSICAL_CPUS > 8
1198 sysconf.nodes += (((pci_read_config32(dev, 0x160)>>4) & 7)<<3);
1201 sysconf.enabled_apic_ext_id = 0;
1202 sysconf.lift_bsp_apicid = 0;
1204 /* Find the bootstrap processors apicid */
1205 sysconf.bsp_apicid = lapicid();
1206 sysconf.apicid_offset = sysconf.bsp_apicid;
1208 #if (ENABLE_APIC_EXT_ID == 1)
1209 if (pci_read_config32(dev, 0x68) & (HTTC_APIC_EXT_ID|HTTC_APIC_EXT_BRD_CST))
1211 sysconf.enabled_apic_ext_id = 1;
1213 #if (APIC_ID_OFFSET>0)
1214 if(sysconf.enabled_apic_ext_id) {
1215 if(sysconf.bsp_apicid == 0) {
1216 /* bsp apic id is not changed */
1217 sysconf.apicid_offset = APIC_ID_OFFSET;
1219 sysconf.lift_bsp_apicid = 1;
1228 static u32 cpu_bus_scan(device_t dev, u32 max)
1230 struct bus *cpu_bus;
1232 device_t pci_domain;
1238 int disable_siblings;
1239 unsigned ApicIdCoreIdSize;
1242 ApicIdCoreIdSize = (cpuid_ecx(0x80000008)>>12 & 0xf);
1243 if(ApicIdCoreIdSize) {
1244 siblings = (1<<ApicIdCoreIdSize)-1;
1246 siblings = 3; //quad core
1249 disable_siblings = !CONFIG_LOGICAL_CPUS;
1250 #if CONFIG_LOGICAL_CPUS == 1
1251 get_option(&disable_siblings, "quad_core");
1254 // for pre_e0, nb_cfg_54 can not be set, ( even set, when you read it
1256 // How can I get the nb_cfg_54 of every node' nb_cfg_54 in bsp???
1257 // and differ d0 and e0 single core
1259 nb_cfg_54 = read_nb_cfg_54();
1262 dev_mc = dev_find_slot(0, PCI_DEVFN(CDB, 0)); //0x00
1263 if(dev_mc && dev_mc->bus) {
1264 printk_debug("%s found", dev_path(dev_mc));
1265 pci_domain = dev_mc->bus->dev;
1266 if(pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1267 printk_debug("\n%s move to ",dev_path(dev_mc));
1268 dev_mc->bus->secondary = CBB; // move to 0xff
1269 printk_debug("%s",dev_path(dev_mc));
1272 printk_debug(" but it is not under pci_domain directly ");
1277 dev_mc = dev_find_slot(CBB, PCI_DEVFN(CDB, 0));
1279 dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
1280 if (dev_mc && dev_mc->bus) {
1281 printk_debug("%s found\n", dev_path(dev_mc));
1282 pci_domain = dev_mc->bus->dev;
1283 if(pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1284 if((pci_domain->links==1) && (pci_domain->link[0].children == dev_mc)) {
1285 printk_debug("%s move to ",dev_path(dev_mc));
1286 dev_mc->bus->secondary = CBB; // move to 0xff
1287 printk_debug("%s\n",dev_path(dev_mc));
1289 printk_debug("%s move to ",dev_path(dev_mc));
1290 dev_mc->path.pci.devfn -= PCI_DEVFN(0x18,0);
1291 printk_debug("%s\n",dev_path(dev_mc));
1292 dev_mc = dev_mc->sibling;
1301 dev_mc = dev_find_slot(CBB, PCI_DEVFN(CDB, 0));
1303 printk_err("%02x:%02x.0 not found", CBB, CDB);
1307 sysconf_init(dev_mc);
1309 nodes = sysconf.nodes;
1311 #if CBB && (NODE_NUMS > 32)
1312 if(nodes>32) { // need to put node 32 to node 63 to bus 0xfe
1313 if(pci_domain->links==1) {
1314 pci_domain->links++; // from 1 to 2
1315 pci_domain->link[1].link = 1;
1316 pci_domain->link[1].dev = pci_domain;
1317 pci_domain->link[1].children = 0;
1318 printk_debug("%s links increase to %d\n", dev_path(pci_domain), pci_domain->links);
1320 pci_domain->link[1].secondary = CBB - 1;
1323 /* Find which cpus are present */
1324 cpu_bus = &dev->link[0];
1325 for(i = 0; i < nodes; i++) {
1327 struct device_path cpu_path;
1328 unsigned busn, devn;
1334 #if CBB && (NODE_NUMS > 32)
1338 pbus = &(pci_domain->link[1]);
1342 /* Find the cpu's pci device */
1343 dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1345 /* If I am probing things in a weird order
1346 * ensure all of the cpu's pci devices are found.
1349 for(j = 0; j <= 5; j++) { //FBDIMM?
1350 dev = pci_probe_dev(NULL, pbus,
1351 PCI_DEVFN(devn, j));
1353 dev = dev_find_slot(busn, PCI_DEVFN(devn,0));
1356 /* Ok, We need to set the links for that device.
1357 * otherwise the device under it will not be scanned
1366 if(dev->links < linknum) {
1367 for(j=dev->links; j<linknum; j++) {
1368 dev->link[j].link = j;
1369 dev->link[j].dev = dev;
1371 dev->links = linknum;
1372 printk_debug("%s links increase to %d\n", dev_path(dev), dev->links);
1376 cores_found = 0; // one core
1377 dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
1378 if (dev && dev->enabled) {
1379 j = pci_read_config32(dev, 0xe8);
1380 cores_found = (j >> 12) & 3; // dev is func 3
1381 printk_debug(" %s siblings=%d\n", dev_path(dev), cores_found);
1385 if(disable_siblings) {
1392 for (j = 0; j <=jj; j++ ) {
1394 /* Build the cpu device path */
1395 cpu_path.type = DEVICE_PATH_APIC;
1396 cpu_path.apic.apic_id = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:64); // ?
1398 /* See if I can find the cpu */
1399 cpu = find_dev_path(cpu_bus, &cpu_path);
1401 /* Enable the cpu if I have the processor */
1402 if (dev && dev->enabled) {
1404 cpu = alloc_dev(cpu_bus, &cpu_path);
1411 /* Disable the cpu if I don't have the processor */
1412 if (cpu && (!dev || !dev->enabled)) {
1416 /* Report what I have done */
1418 cpu->path.apic.node_id = i;
1419 cpu->path.apic.core_id = j;
1420 #if (ENABLE_APIC_EXT_ID == 1) && (APIC_ID_OFFSET>0)
1421 if(sysconf.enabled_apic_ext_id) {
1422 if(sysconf.lift_bsp_apicid) {
1423 cpu->path.apic.apic_id += sysconf.apicid_offset;
1426 if (cpu->path.apic.apic_id != 0)
1427 cpu->path.apic.apic_id += sysconf.apicid_offset;
1431 printk_debug("CPU: %s %s\n",
1432 dev_path(cpu), cpu->enabled?"enabled":"disabled");
1441 static void cpu_bus_init(device_t dev)
1443 initialize_cpus(&dev->link[0]);
1447 static void cpu_bus_noop(device_t dev)
1452 static struct device_operations cpu_bus_ops = {
1453 .read_resources = cpu_bus_noop,
1454 .set_resources = cpu_bus_noop,
1455 .enable_resources = cpu_bus_noop,
1456 .init = cpu_bus_init,
1457 .scan_bus = cpu_bus_scan,
1461 static void root_complex_enable_dev(struct device *dev)
1463 /* Set the operations if it is a special bus type */
1464 if (dev->path.type == DEVICE_PATH_PCI_DOMAIN) {
1465 dev->ops = &pci_domain_ops;
1467 else if (dev->path.type == DEVICE_PATH_APIC_CLUSTER) {
1468 dev->ops = &cpu_bus_ops;
1472 struct chip_operations northbridge_amd_amdfam10_root_complex_ops = {
1473 CHIP_NAME("AMD FAM10 Root Complex")
1474 .enable_dev = root_complex_enable_dev,