2 * This file is part of the coreboot project.
4 * Copyright (C) 2007 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <console/console.h>
23 #include <device/device.h>
24 #include <device/pci.h>
25 #include <device/pci_ids.h>
26 #include <device/hypertransport.h>
32 #include <cpu/x86/lapic.h>
34 #if CONFIG_LOGICAL_CPUS==1
35 #include <cpu/amd/quadcore.h>
36 #include <pc80/mc146818rtc.h>
40 #include "root_complex/chip.h"
41 #include "northbridge.h"
45 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
46 #include <cpu/amd/model_10xxx_rev.h>
49 #include <cpu/amd/amdfam10_sysconf.h>
51 struct amdfam10_sysconf_t sysconf;
53 #define FX_DEVS NODE_NUMS
54 static device_t __f0_dev[FX_DEVS];
55 static device_t __f1_dev[FX_DEVS];
56 static device_t __f2_dev[FX_DEVS];
57 static device_t __f4_dev[FX_DEVS];
59 device_t get_node_pci(u32 nodeid, u32 fn)
63 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
65 return dev_find_slot(CONFIG_CBB-1, PCI_DEVFN(CONFIG_CDB + nodeid - 32, fn));
69 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
73 static void get_fx_devs(void)
79 for(i = 0; i < FX_DEVS; i++) {
80 __f0_dev[i] = get_node_pci(i, 0);
81 __f1_dev[i] = get_node_pci(i, 1);
82 __f2_dev[i] = get_node_pci(i, 2);
83 __f4_dev[i] = get_node_pci(i, 4);
86 printk_err("Cannot find %02x:%02x.1", CONFIG_CBB, CONFIG_CDB);
87 die("Cannot go on\n");
91 static u32 f1_read_config32(u32 reg)
94 return pci_read_config32(__f1_dev[0], reg);
97 static void f1_write_config32(u32 reg, u32 value)
101 for(i = 0; i < FX_DEVS; i++) {
104 if (dev && dev->enabled) {
105 pci_write_config32(dev, reg, value);
111 static u32 amdfam10_nodeid(device_t dev)
115 busn = dev->bus->secondary;
116 if(busn != CONFIG_CBB) {
117 return (dev->path.pci.devfn >> 3) - CONFIG_CDB + 32;
119 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
123 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
127 #include "amdfam10_conf.c"
129 static void set_vga_enable_reg(u32 nodeid, u32 linkn)
133 val = 1 | (nodeid<<4) | (linkn<<12);
134 /* it will routing (1)mmio 0xa0000:0xbffff (2) io 0x3b0:0x3bb,
136 f1_write_config32(0xf4, val);
140 static u32 amdfam10_scan_chain(device_t dev, u32 nodeid, u32 link, u32 sblink,
141 u32 max, u32 offset_unitid)
143 // I want to put sb chain in bus 0 can I?
149 u32 ht_unitid_base[4]; // here assume only 4 HT device on chain
152 u32 is_sublink1 = (link>3);
159 #if CONFIG_HT3_SUPPORT==1
163 regpos = 0x170 + 4 * (link&3); // it is only on sublink0
164 reg = pci_read_config32(dev, regpos);
165 if(reg & 1) return max; // already ganged no sblink1
166 devx = get_node_pci(nodeid, 4);
172 dev->link[link].cap = 0x80 + ((link&3) *0x20);
174 link_type = pci_read_config32(devx, dev->link[link].cap + 0x18);
175 } while(link_type & ConnectionPending);
176 if (!(link_type & LinkConnected)) {
180 link_type = pci_read_config32(devx, dev->link[link].cap + 0x18);
181 } while(!(link_type & InitComplete));
182 if (!(link_type & NonCoherent)) {
185 /* See if there is an available configuration space mapping
186 * register in function 1.
188 ht_c_index = get_ht_c_index(nodeid, link, &sysconf);
190 #if CONFIG_EXT_CONF_SUPPORT == 0
191 if(ht_c_index>=4) return max;
194 /* Set up the primary, secondary and subordinate bus numbers.
195 * We have no idea how many busses are behind this bridge yet,
196 * so we set the subordinate bus number to 0xff for the moment.
199 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 0
200 // first chain will on bus 0
201 if((nodeid == 0) && (sblink==link)) { // actually max is 0 here
204 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 1
205 // second chain will be on 0x40, third 0x80, forth 0xc0
206 // i would refined that to 2, 3, 4 ==> 0, 0x, 40, 0x80, 0xc0
207 // >4 will use more segments, We can have 16 segmment and every segment have 256 bus, For that case need the kernel support mmio pci config.
209 min_bus = ((busn>>3) + 1) << 3; // one node can have 8 link and segn is the same
211 max = min_bus | (segn<<8);
221 max_bus = 0xfc | (segn<<8);
223 dev->link[link].secondary = min_bus;
224 dev->link[link].subordinate = max_bus;
225 /* Read the existing primary/secondary/subordinate bus
226 * number configuration.
228 busses = pci_read_config32(devx, dev->link[link].cap + 0x14);
230 /* Configure the bus numbers for this bridge: the configuration
231 * transactions will not be propagates by the bridge if it is
232 * not correctly configured
234 busses &= 0xffff00ff;
235 busses |= ((u32)(dev->link[link].secondary) << 8);
236 pci_write_config32(devx, dev->link[link].cap + 0x14, busses);
239 /* set the config map space */
241 set_config_map_reg(nodeid, link, ht_c_index, dev->link[link].secondary, dev->link[link].subordinate, sysconf.segbit, sysconf.nodes);
243 /* Now we can scan all of the subordinate busses i.e. the
244 * chain on the hypertranport link
247 ht_unitid_base[i] = 0x20;
250 //if ext conf is enabled, only need use 0x1f
252 max_devfn = (0x17<<3) | 7;
254 max_devfn = (0x1f<<3) | 7;
256 max = hypertransport_scan_chain(&dev->link[link], 0, max_devfn, max, ht_unitid_base, offset_unitid);
259 /* We know the number of busses behind this bridge. Set the
260 * subordinate bus number to it's real value
262 if(ht_c_index>3) { // clear the extend reg
263 clear_config_map_reg(nodeid, link, ht_c_index, (max+1)>>sysconf.segbit, (dev->link[link].subordinate)>>sysconf.segbit, sysconf.nodes);
266 dev->link[link].subordinate = max;
267 set_config_map_reg(nodeid, link, ht_c_index, dev->link[link].secondary, dev->link[link].subordinate, sysconf.segbit, sysconf.nodes);
271 // config config_reg, and ht_unitid_base to update hcdn_reg;
274 temp |= (ht_unitid_base[i] & 0xff) << (i*8);
277 sysconf.hcdn_reg[ht_c_index] = temp;
281 store_ht_c_conf_bus(nodeid, link, ht_c_index, dev->link[link].secondary, dev->link[link].subordinate, &sysconf);
287 static u32 amdfam10_scan_chains(device_t dev, u32 max)
291 u32 sblink = sysconf.sblk;
292 u32 offset_unitid = 0;
294 nodeid = amdfam10_nodeid(dev);
297 // Put sb chain in bus 0
298 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 0
300 #if ((CONFIG_HT_CHAIN_UNITID_BASE != 1) || (CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20))
303 max = amdfam10_scan_chain(dev, nodeid, sblink, sblink, max, offset_unitid ); // do sb ht chain at first, in case s2885 put sb chain (8131/8111) on link2, but put 8151 on link0
308 #if CONFIG_PCI_BUS_SEGN_BITS
309 max = check_segn(dev, max, sysconf.nodes, &sysconf);
313 for(link = 0; link < dev->links; link++) {
314 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 0
315 if( (nodeid == 0) && (sblink == link) ) continue; //already done
318 #if ((CONFIG_HT_CHAIN_UNITID_BASE != 1) || (CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20))
319 #if CONFIG_SB_HT_CHAIN_UNITID_OFFSET_ONLY == 1
320 if((nodeid == 0) && (sblink == link))
325 max = amdfam10_scan_chain(dev, nodeid, link, sblink, max, offset_unitid);
331 static int reg_useable(u32 reg,device_t goal_dev, u32 goal_nodeid,
334 struct resource *res;
338 for(nodeid = 0; !res && (nodeid < NODE_NUMS); nodeid++) {
340 dev = __f0_dev[nodeid];
343 for(link = 0; !res && (link < 8); link++) {
344 res = probe_resource(dev, IOINDEX(0x1000 + reg, link));
350 if ( (goal_link == (link - 1)) &&
351 (goal_nodeid == (nodeid - 1)) &&
359 static struct resource *amdfam10_find_iopair(device_t dev, u32 nodeid, u32 link)
361 struct resource *resource;
365 for(reg = 0xc0; reg <= 0xd8; reg += 0x8) {
367 result = reg_useable(reg, dev, nodeid, link);
369 /* I have been allocated this one */
372 else if (result > 1) {
373 /* I have a free register pair */
378 reg = free_reg; // if no free, the free_reg still be 0
383 //because of Extend conf space, we will never run out of reg, but we need one index to differ them. so same node and same link can have multi range
384 u32 index = get_io_addr_index(nodeid, link);
385 reg = 0x110+ (index<<24) + (4<<20); // index could be 0, 255
388 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
393 static struct resource *amdfam10_find_mempair(device_t dev, u32 nodeid, u32 link)
395 struct resource *resource;
399 for(reg = 0x80; reg <= 0xb8; reg += 0x8) {
401 result = reg_useable(reg, dev, nodeid, link);
403 /* I have been allocated this one */
406 else if (result > 1) {
407 /* I have a free register pair */
417 //because of Extend conf space, we will never run out of reg,
418 // but we need one index to differ them. so same node and
419 // same link can have multi range
420 u32 index = get_mmio_addr_index(nodeid, link);
421 reg = 0x110+ (index<<24) + (6<<20); // index could be 0, 63
424 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
429 static void amdfam10_link_read_bases(device_t dev, u32 nodeid, u32 link)
431 struct resource *resource;
433 /* Initialize the io space constraints on the current bus */
434 resource = amdfam10_find_iopair(dev, nodeid, link);
437 #if CONFIG_EXT_CONF_SUPPORT == 1
438 if((resource->index & 0x1fff) == 0x1110) { // ext
443 align = log2(HT_IO_HOST_ALIGN);
446 resource->align = align;
447 resource->gran = align;
448 resource->limit = 0xffffUL;
449 resource->flags = IORESOURCE_IO;
452 /* Initialize the prefetchable memory constraints on the current bus */
453 resource = amdfam10_find_mempair(dev, nodeid, link);
457 resource->align = log2(HT_MEM_HOST_ALIGN);
458 resource->gran = log2(HT_MEM_HOST_ALIGN);
459 resource->limit = 0xffffffffffULL;
460 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
462 #if CONFIG_EXT_CONF_SUPPORT == 1
463 if((resource->index & 0x1fff) == 0x1110) { // ext
464 normalize_resource(resource);
470 /* Initialize the memory constraints on the current bus */
471 resource = amdfam10_find_mempair(dev, nodeid, link);
475 resource->align = log2(HT_MEM_HOST_ALIGN);
476 resource->gran = log2(HT_MEM_HOST_ALIGN);
477 resource->limit = 0xffffffffffULL;
478 resource->flags = IORESOURCE_MEM;
480 #if CONFIG_EXT_CONF_SUPPORT == 1
481 if((resource->index & 0x1fff) == 0x1110) { // ext
482 normalize_resource(resource);
490 static void amdfam10_read_resources(device_t dev)
494 nodeid = amdfam10_nodeid(dev);
495 for(link = 0; link < dev->links; link++) {
496 if (dev->link[link].children) {
497 amdfam10_link_read_bases(dev, nodeid, link);
503 static void amdfam10_set_resource(device_t dev, struct resource *resource,
506 resource_t rbase, rend;
510 /* Make certain the resource has actually been set */
511 if (!(resource->flags & IORESOURCE_ASSIGNED)) {
515 /* If I have already stored this resource don't worry about it */
516 if (resource->flags & IORESOURCE_STORED) {
520 /* Only handle PCI memory and IO resources */
521 if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
524 /* Ensure I am actually looking at a resource of function 1 */
525 if ((resource->index & 0xffff) < 0x1000) {
528 /* Get the base address */
529 rbase = resource->base;
531 /* Get the limit (rounded up) */
532 rend = resource_end(resource);
534 /* Get the register and link */
535 reg = resource->index & 0xfff; // 4k
536 link = IOINDEX_LINK(resource->index);
538 if (resource->flags & IORESOURCE_IO) {
540 set_io_addr_reg(dev, nodeid, link, reg, rbase>>8, rend>>8);
541 store_conf_io_addr(nodeid, link, reg, (resource->index >> 24), rbase>>8, rend>>8);
543 else if (resource->flags & IORESOURCE_MEM) {
544 set_mmio_addr_reg(nodeid, link, reg, (resource->index >>24), rbase>>8, rend>>8, sysconf.nodes) ;// [39:8]
545 store_conf_mmio_addr(nodeid, link, reg, (resource->index >>24), rbase>>8, rend>>8);
547 resource->flags |= IORESOURCE_STORED;
548 sprintf(buf, " <node %02x link %02x>",
550 report_resource_stored(dev, resource, buf);
555 * I tried to reuse the resource allocation code in amdfam10_set_resource()
556 * but it is too diffcult to deal with the resource allocation magic.
558 #if CONFIG_CONSOLE_VGA_MULTI == 1
559 extern device_t vga_pri; // the primary vga device, defined in device.c
562 static void amdfam10_create_vga_resource(device_t dev, unsigned nodeid)
566 /* find out which link the VGA card is connected,
567 * we only deal with the 'first' vga card */
568 for (link = 0; link < dev->links; link++) {
569 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
570 #if CONFIG_CONSOLE_VGA_MULTI == 1
571 printk_debug("VGA: vga_pri bus num = %d dev->link[link] bus range [%d,%d]\n", vga_pri->bus->secondary,
572 dev->link[link].secondary,dev->link[link].subordinate);
573 /* We need to make sure the vga_pri is under the link */
574 if((vga_pri->bus->secondary >= dev->link[link].secondary ) &&
575 (vga_pri->bus->secondary <= dev->link[link].subordinate )
582 /* no VGA card installed */
583 if (link == dev->links)
586 printk_debug("VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, link);
587 set_vga_enable_reg(nodeid, link);
590 static void amdfam10_set_resources(device_t dev)
595 /* Find the nodeid */
596 nodeid = amdfam10_nodeid(dev);
598 amdfam10_create_vga_resource(dev, nodeid);
600 /* Set each resource we have found */
601 for(i = 0; i < dev->resources; i++) {
602 amdfam10_set_resource(dev, &dev->resource[i], nodeid);
605 for(link = 0; link < dev->links; link++) {
607 bus = &dev->link[link];
609 assign_resources(bus);
615 static void amdfam10_enable_resources(device_t dev)
617 pci_dev_enable_resources(dev);
618 enable_childrens_resources(dev);
621 static void mcf0_control_init(struct device *dev)
625 static struct device_operations northbridge_operations = {
626 .read_resources = amdfam10_read_resources,
627 .set_resources = amdfam10_set_resources,
628 .enable_resources = amdfam10_enable_resources,
629 .init = mcf0_control_init,
630 .scan_bus = amdfam10_scan_chains,
636 static struct pci_driver mcf0_driver __pci_driver = {
637 .ops = &northbridge_operations,
638 .vendor = PCI_VENDOR_ID_AMD,
642 struct chip_operations northbridge_amd_amdfam10_ops = {
643 CHIP_NAME("AMD FAM10 Northbridge")
647 static void amdfam10_domain_read_resources(device_t dev)
649 struct resource *resource;
653 /* Find the already assigned resource pairs */
655 for(reg = 0x80; reg <= 0xd8; reg+= 0x08) {
657 base = f1_read_config32(reg);
658 limit = f1_read_config32(reg + 0x04);
659 /* Is this register allocated? */
660 if ((base & 3) != 0) {
661 unsigned nodeid, link;
663 if(reg<0xc0) { // mmio
664 nodeid = (limit & 0xf) + (base&0x30);
666 nodeid = (limit & 0xf) + ((base>>4)&0x30);
668 link = (limit >> 4) & 7;
669 reg_dev = __f0_dev[nodeid];
671 /* Reserve the resource */
672 struct resource *reg_resource;
673 reg_resource = new_resource(reg_dev, IOINDEX(0x1000 + reg, link));
675 reg_resource->flags = 1;
680 /* FIXME: do we need to check extend conf space?
681 I don't believe that much preset value */
683 #if CONFIG_PCI_64BIT_PREF_MEM == 0
684 /* Initialize the system wide io space constraints */
685 resource = new_resource(dev, IOINDEX_SUBTRACTIVE(0, 0));
686 resource->base = 0x400;
687 resource->limit = 0xffffUL;
688 resource->flags = IORESOURCE_IO | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
690 /* Initialize the system wide memory resources constraints */
691 resource = new_resource(dev, IOINDEX_SUBTRACTIVE(1, 0));
692 resource->limit = 0xfcffffffffULL;
693 resource->flags = IORESOURCE_MEM | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
695 for(link=0; link<dev->links; link++) {
696 /* Initialize the system wide io space constraints */
697 resource = new_resource(dev, 0|(link<<2));
698 resource->base = 0x400;
699 resource->limit = 0xffffUL;
700 resource->flags = IORESOURCE_IO;
702 /* Initialize the system wide prefetchable memory resources constraints */
703 resource = new_resource(dev, 1|(link<<2));
704 resource->limit = 0xfcffffffffULL;
705 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
707 /* Initialize the system wide memory resources constraints */
708 resource = new_resource(dev, 2|(link<<2));
709 resource->limit = 0xfcffffffffULL;
710 resource->flags = IORESOURCE_MEM;
715 static void ram_resource(device_t dev, unsigned long index,
716 resource_t basek, resource_t sizek)
718 struct resource *resource;
723 resource = new_resource(dev, index);
724 resource->base = basek << 10;
725 resource->size = sizek << 10;
726 resource->flags = IORESOURCE_MEM | IORESOURCE_CACHEABLE | \
727 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
730 static void tolm_test(void *gp, struct device *dev, struct resource *new)
732 struct resource **best_p = gp;
733 struct resource *best;
735 if (!best || (best->base > new->base)) {
741 static u32 find_pci_tolm(struct bus *bus, u32 tolm)
743 struct resource *min;
745 search_bus_resources(bus, IORESOURCE_MEM, IORESOURCE_MEM, tolm_test, &min);
746 if (min && tolm > min->base) {
752 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
754 struct hw_mem_hole_info {
755 unsigned hole_startk;
759 static struct hw_mem_hole_info get_hw_mem_hole_info(void)
761 struct hw_mem_hole_info mem_hole;
764 mem_hole.hole_startk = CONFIG_HW_MEM_HOLE_SIZEK;
765 mem_hole.node_id = -1;
767 for (i = 0; i < sysconf.nodes; i++) {
768 struct dram_base_mask_t d;
770 d = get_dram_base_mask(i);
771 if(!(d.mask & 1)) continue; // no memory on this node
773 hole = pci_read_config32(__f1_dev[i], 0xf0);
774 if(hole & 1) { // we find the hole
775 mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
776 mem_hole.node_id = i; // record the node No with hole
777 break; // only one hole
781 //We need to double check if there is speical set on base reg and limit reg are not continous instead of hole, it will find out it's hole_startk
782 if(mem_hole.node_id==-1) {
783 resource_t limitk_pri = 0;
784 for(i=0; i<sysconf.nodes; i++) {
785 struct dram_base_mask_t d;
786 resource_t base_k, limit_k;
787 d = get_dram_base_mask(i);
788 if(!(d.base & 1)) continue;
790 base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
791 if(base_k > 4 *1024 * 1024) break; // don't need to go to check
792 if(limitk_pri != base_k) { // we find the hole
793 mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G
794 mem_hole.node_id = i;
795 break; //only one hole
798 limit_k = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9;
799 limitk_pri = limit_k;
806 #if CONFIG_AMDMCT == 0
807 static void disable_hoist_memory(unsigned long hole_startk, int i)
811 struct dram_base_mask_t d;
818 struct sys_info *sysinfox = (struct sys_info *)((CONFIG_LB_MEM_TOPK<<10) - CONFIG_DCACHE_RAM_GLOBAL_VAR_SIZE); // in RAM
819 struct mem_info *meminfo;
820 meminfo = &sysinfox->meminfo[i];
822 one_DCT = get_one_DCT(meminfo);
824 // 1. find which node has hole
825 // 2. change limit in that node.
826 // 3. change base and limit in later node
827 // 4. clear that node f0
829 // if there is not mem hole enabled, we need to change it's base instead
831 hole_sizek = (4*1024*1024) - hole_startk;
833 for(ii=NODE_NUMS-1;ii>i;ii--) {
835 d = get_dram_base_mask(ii);
837 if(!(d.mask & 1)) continue;
839 d.base -= (hole_sizek>>9);
840 d.mask -= (hole_sizek>>9);
841 set_dram_base_mask(ii, d, sysconf.nodes);
843 if(get_DctSelHiEn(ii) & 1) {
844 sel_m = get_DctSelBaseAddr(ii);
845 sel_m -= hole_startk>>10;
846 set_DctSelBaseAddr(ii, sel_m);
850 d = get_dram_base_mask(i);
852 hoist = pci_read_config32(dev, 0xf0);
853 sel_hi_en = get_DctSelHiEn(i);
856 sel_m = get_DctSelBaseAddr(i);
860 pci_write_config32(dev, 0xf0, 0);
861 d.mask -= (hole_sizek>>9);
862 set_dram_base_mask(i, d, sysconf.nodes);
863 if(one_DCT || (sel_m >= (hole_startk>>10))) {
865 sel_m -= hole_startk>>10;
866 set_DctSelBaseAddr(i, sel_m);
870 set_DctSelBaseOffset(i, 0);
874 d.base -= (hole_sizek>>9);
875 d.mask -= (hole_sizek>>9);
876 set_dram_base_mask(i, d, sysconf.nodes);
879 sel_m -= hole_startk>>10;
880 set_DctSelBaseAddr(i, sel_m);
889 #if CONFIG_HAVE_HIGH_TABLES==1
890 #define HIGH_TABLES_SIZE 64 // maximum size of high tables in KB
891 extern uint64_t high_tables_base, high_tables_size;
894 static void pci_domain_set_resources(device_t dev)
896 #if CONFIG_PCI_64BIT_PREF_MEM == 1
897 struct resource *io, *mem1, *mem2;
898 struct resource *resource, *last;
900 unsigned long mmio_basek;
904 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
905 struct hw_mem_hole_info mem_hole;
906 u32 reset_memhole = 1;
909 #if CONFIG_PCI_64BIT_PREF_MEM == 1
911 for(link=0; link<dev->links; link++) {
912 /* Now reallocate the pci resources memory with the
913 * highest addresses I can manage.
915 mem1 = find_resource(dev, 1|(link<<2));
916 mem2 = find_resource(dev, 2|(link<<2));
918 printk_debug("base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
919 mem1->base, mem1->limit, mem1->size, mem1->align);
920 printk_debug("base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
921 mem2->base, mem2->limit, mem2->size, mem2->align);
923 /* See if both resources have roughly the same limits */
924 if (((mem1->limit <= 0xffffffff) && (mem2->limit <= 0xffffffff)) ||
925 ((mem1->limit > 0xffffffff) && (mem2->limit > 0xffffffff)))
927 /* If so place the one with the most stringent alignment first
929 if (mem2->align > mem1->align) {
930 struct resource *tmp;
935 /* Now place the memory as high up as it will go */
936 mem2->base = resource_max(mem2);
937 mem1->limit = mem2->base - 1;
938 mem1->base = resource_max(mem1);
941 /* Place the resources as high up as they will go */
942 mem2->base = resource_max(mem2);
943 mem1->base = resource_max(mem1);
946 printk_debug("base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
947 mem1->base, mem1->limit, mem1->size, mem1->align);
948 printk_debug("base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
949 mem2->base, mem2->limit, mem2->size, mem2->align);
952 last = &dev->resource[dev->resources];
953 for(resource = &dev->resource[0]; resource < last; resource++)
955 resource->flags |= IORESOURCE_ASSIGNED;
956 resource->flags &= ~IORESOURCE_STORED;
957 link = (resource>>2) & 3;
958 resource->flags |= IORESOURCE_STORED;
959 report_resource_stored(dev, resource, "");
964 pci_tolm = 0xffffffffUL;
965 for(link=0;link<dev->links; link++) {
966 pci_tolm = find_pci_tolm(&dev->link[link], pci_tolm);
969 #warning "FIXME handle interleaved nodes"
970 mmio_basek = pci_tolm >> 10;
971 /* Round mmio_basek to something the processor can support */
972 mmio_basek &= ~((1 << 6) -1);
974 #warning "FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M MMIO hole"
975 /* Round the mmio hold to 64M */
976 mmio_basek &= ~((64*1024) - 1);
978 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
979 /* if the hw mem hole is already set in raminit stage, here we will compare
980 * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
981 * use hole_basek as mmio_basek and we don't need to reset hole.
982 * otherwise We reset the hole to the mmio_basek
985 mem_hole = get_hw_mem_hole_info();
987 // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
988 if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
989 mmio_basek = mem_hole.hole_startk;
993 #if CONFIG_AMDMCT == 0
994 //mmio_basek = 3*1024*1024; // for debug to meet boundary
997 if(mem_hole.node_id!=-1) {
998 /* We need to select CONFIG_HW_MEM_HOLE_SIZEK for raminit, it can not
999 make hole_startk to some basek too!
1000 We need to reset our Mem Hole, because We want more big HOLE
1002 Before that We need to disable mem hole at first, becase
1003 memhole could already be set on i+1 instead
1005 disable_hoist_memory(mem_hole.hole_startk, mem_hole.node_id);
1008 #if CONFIG_HW_MEM_HOLE_SIZE_AUTO_INC == 1
1009 // We need to double check if the mmio_basek is valid for hole
1010 // setting, if it is equal to basek, we need to decrease it some
1011 resource_t basek_pri;
1012 for (i = 0; i < sysconf.nodes; i++) {
1013 struct dram_base_mask_t d;
1015 d = get_dram_base_mask(i);
1017 if(!(d.mask &1)) continue;
1019 basek = ((resource_t)(d.base & 0x1fffff00)) << 9;
1020 if(mmio_basek == (u32)basek) {
1021 mmio_basek -= (uin32_t)(basek - basek_pri); // increase mem hole size to make sure it is on middle of pri node
1034 for(i = 0; i < sysconf.nodes; i++) {
1035 struct dram_base_mask_t d;
1036 resource_t basek, limitk, sizek; // 4 1T
1037 d = get_dram_base_mask(i);
1039 if(!(d.mask & 1)) continue;
1040 basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
1041 limitk = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9 ;
1042 sizek = limitk - basek;
1044 /* see if we need a hole from 0xa0000 to 0xbffff */
1045 if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
1046 ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
1048 basek = (8*64)+(16*16);
1049 sizek = limitk - ((8*64)+(16*16));
1053 // printk_debug("node %d : mmio_basek=%08x, basek=%08x, limitk=%08x\n", i, mmio_basek, basek, limitk);
1055 /* split the region to accomodate pci memory space */
1056 if ( (basek < 4*1024*1024 ) && (limitk > mmio_basek) ) {
1057 if (basek <= mmio_basek) {
1059 pre_sizek = mmio_basek - basek;
1061 ram_resource(dev, (idx | i), basek, pre_sizek);
1064 #if CONFIG_HAVE_HIGH_TABLES==1
1065 if (i==0 && high_tables_base==0) {
1066 /* Leave some space for ACPI, PIRQ and MP tables */
1067 high_tables_base = (mmio_basek - HIGH_TABLES_SIZE) * 1024;
1068 high_tables_size = HIGH_TABLES_SIZE * 1024;
1069 printk_debug("(split)%xK table at =%08llx\n", HIGH_TABLES_SIZE,
1074 #if CONFIG_AMDMCT == 0
1075 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
1077 struct sys_info *sysinfox = (struct sys_info *)((CONFIG_LB_MEM_TOPK<<10) - CONFIG_DCACHE_RAM_GLOBAL_VAR_SIZE); // in RAM
1078 struct mem_info *meminfo;
1079 meminfo = &sysinfox->meminfo[i];
1080 sizek += hoist_memory(mmio_basek,i, get_one_DCT(meminfo), sysconf.nodes);
1087 if ((basek + sizek) <= 4*1024*1024) {
1091 basek = 4*1024*1024;
1092 sizek -= (4*1024*1024 - mmio_basek);
1095 ram_resource(dev, (idx | i), basek, sizek);
1097 #if CONFIG_HAVE_HIGH_TABLES==1
1098 printk_debug("%d: mmio_basek=%08lx, basek=%08x, limitk=%08x\n",
1099 i, mmio_basek, basek, limitk);
1100 if (i==0 && high_tables_base==0) {
1101 /* Leave some space for ACPI, PIRQ and MP tables */
1102 high_tables_base = (limitk - HIGH_TABLES_SIZE) * 1024;
1103 high_tables_size = HIGH_TABLES_SIZE * 1024;
1108 for(link = 0; link < dev->links; link++) {
1110 bus = &dev->link[link];
1111 if (bus->children) {
1112 assign_resources(bus);
1117 static u32 amdfam10_domain_scan_bus(device_t dev, u32 max)
1121 /* Unmap all of the HT chains */
1122 for(reg = 0xe0; reg <= 0xec; reg += 4) {
1123 f1_write_config32(reg, 0);
1125 #if CONFIG_EXT_CONF_SUPPORT == 1
1127 for(i = 0; i< sysconf.nodes; i++) {
1129 for(index = 0; index < 64; index++) {
1130 pci_write_config32(__f1_dev[i], 0x110, index | (6<<28));
1131 pci_write_config32(__f1_dev[i], 0x114, 0);
1138 for(i=0;i<dev->links;i++) {
1139 max = pci_scan_bus(&dev->link[i], PCI_DEVFN(CONFIG_CDB, 0), 0xff, max);
1142 /* Tune the hypertransport transaction for best performance.
1143 * Including enabling relaxed ordering if it is safe.
1146 for(i = 0; i < FX_DEVS; i++) {
1148 f0_dev = __f0_dev[i];
1149 if (f0_dev && f0_dev->enabled) {
1151 httc = pci_read_config32(f0_dev, HT_TRANSACTION_CONTROL);
1152 httc &= ~HTTC_RSP_PASS_PW;
1153 if (!dev->link[0].disable_relaxed_ordering) {
1154 httc |= HTTC_RSP_PASS_PW;
1156 printk_spew("%s passpw: %s\n",
1158 (!dev->link[0].disable_relaxed_ordering)?
1159 "enabled":"disabled");
1160 pci_write_config32(f0_dev, HT_TRANSACTION_CONTROL, httc);
1166 static struct device_operations pci_domain_ops = {
1167 .read_resources = amdfam10_domain_read_resources,
1168 .set_resources = pci_domain_set_resources,
1169 .enable_resources = enable_childrens_resources,
1171 .scan_bus = amdfam10_domain_scan_bus,
1172 #if CONFIG_MMCONF_SUPPORT_DEFAULT
1173 .ops_pci_bus = &pci_ops_mmconf,
1175 .ops_pci_bus = &pci_cf8_conf1,
1179 static void sysconf_init(device_t dev) // first node
1181 sysconf.sblk = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
1183 sysconf.ht_c_num = 0;
1185 unsigned ht_c_index;
1187 for(ht_c_index=0; ht_c_index<32; ht_c_index++) {
1188 sysconf.ht_c_conf_bus[ht_c_index] = 0;
1191 sysconf.nodes = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1;
1192 #if CONFIG_MAX_PHYSICAL_CPUS > 8
1193 sysconf.nodes += (((pci_read_config32(dev, 0x160)>>4) & 7)<<3);
1196 sysconf.enabled_apic_ext_id = 0;
1197 sysconf.lift_bsp_apicid = 0;
1199 /* Find the bootstrap processors apicid */
1200 sysconf.bsp_apicid = lapicid();
1201 sysconf.apicid_offset = sysconf.bsp_apicid;
1203 #if (CONFIG_ENABLE_APIC_EXT_ID == 1)
1204 if (pci_read_config32(dev, 0x68) & (HTTC_APIC_EXT_ID|HTTC_APIC_EXT_BRD_CST))
1206 sysconf.enabled_apic_ext_id = 1;
1208 #if (CONFIG_APIC_ID_OFFSET>0)
1209 if(sysconf.enabled_apic_ext_id) {
1210 if(sysconf.bsp_apicid == 0) {
1211 /* bsp apic id is not changed */
1212 sysconf.apicid_offset = CONFIG_APIC_ID_OFFSET;
1214 sysconf.lift_bsp_apicid = 1;
1223 static u32 cpu_bus_scan(device_t dev, u32 max)
1225 struct bus *cpu_bus;
1227 device_t pci_domain;
1233 int disable_siblings;
1234 unsigned ApicIdCoreIdSize;
1237 ApicIdCoreIdSize = (cpuid_ecx(0x80000008)>>12 & 0xf);
1238 if(ApicIdCoreIdSize) {
1239 siblings = (1<<ApicIdCoreIdSize)-1;
1241 siblings = 3; //quad core
1244 disable_siblings = !CONFIG_LOGICAL_CPUS;
1245 #if CONFIG_LOGICAL_CPUS == 1
1246 get_option(&disable_siblings, "quad_core");
1249 // for pre_e0, nb_cfg_54 can not be set, ( even set, when you read it
1251 // How can I get the nb_cfg_54 of every node' nb_cfg_54 in bsp???
1252 // and differ d0 and e0 single core
1254 nb_cfg_54 = read_nb_cfg_54();
1257 dev_mc = dev_find_slot(0, PCI_DEVFN(CONFIG_CDB, 0)); //0x00
1258 if(dev_mc && dev_mc->bus) {
1259 printk_debug("%s found", dev_path(dev_mc));
1260 pci_domain = dev_mc->bus->dev;
1261 if(pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1262 printk_debug("\n%s move to ",dev_path(dev_mc));
1263 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
1264 printk_debug("%s",dev_path(dev_mc));
1267 printk_debug(" but it is not under pci_domain directly ");
1272 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
1274 dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
1275 if (dev_mc && dev_mc->bus) {
1276 printk_debug("%s found\n", dev_path(dev_mc));
1277 pci_domain = dev_mc->bus->dev;
1278 if(pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1279 if((pci_domain->links==1) && (pci_domain->link[0].children == dev_mc)) {
1280 printk_debug("%s move to ",dev_path(dev_mc));
1281 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
1282 printk_debug("%s\n",dev_path(dev_mc));
1284 printk_debug("%s move to ",dev_path(dev_mc));
1285 dev_mc->path.pci.devfn -= PCI_DEVFN(0x18,0);
1286 printk_debug("%s\n",dev_path(dev_mc));
1287 dev_mc = dev_mc->sibling;
1296 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
1298 printk_err("%02x:%02x.0 not found", CONFIG_CBB, CONFIG_CDB);
1302 sysconf_init(dev_mc);
1304 nodes = sysconf.nodes;
1306 #if CONFIG_CBB && (NODE_NUMS > 32)
1307 if(nodes>32) { // need to put node 32 to node 63 to bus 0xfe
1308 if(pci_domain->links==1) {
1309 pci_domain->links++; // from 1 to 2
1310 pci_domain->link[1].link = 1;
1311 pci_domain->link[1].dev = pci_domain;
1312 pci_domain->link[1].children = 0;
1313 printk_debug("%s links increase to %d\n", dev_path(pci_domain), pci_domain->links);
1315 pci_domain->link[1].secondary = CONFIG_CBB - 1;
1318 /* Find which cpus are present */
1319 cpu_bus = &dev->link[0];
1320 for(i = 0; i < nodes; i++) {
1322 struct device_path cpu_path;
1323 unsigned busn, devn;
1327 devn = CONFIG_CDB+i;
1329 #if CONFIG_CBB && (NODE_NUMS > 32)
1333 pbus = &(pci_domain->link[1]);
1337 /* Find the cpu's pci device */
1338 dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1340 /* If I am probing things in a weird order
1341 * ensure all of the cpu's pci devices are found.
1344 for(j = 0; j <= 5; j++) { //FBDIMM?
1345 dev = pci_probe_dev(NULL, pbus,
1346 PCI_DEVFN(devn, j));
1348 dev = dev_find_slot(busn, PCI_DEVFN(devn,0));
1351 /* Ok, We need to set the links for that device.
1352 * otherwise the device under it will not be scanned
1356 #if CONFIG_HT3_SUPPORT==1
1361 if(dev->links < linknum) {
1362 for(j=dev->links; j<linknum; j++) {
1363 dev->link[j].link = j;
1364 dev->link[j].dev = dev;
1366 dev->links = linknum;
1367 printk_debug("%s links increase to %d\n", dev_path(dev), dev->links);
1371 cores_found = 0; // one core
1372 dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
1373 if (dev && dev->enabled) {
1374 j = pci_read_config32(dev, 0xe8);
1375 cores_found = (j >> 12) & 3; // dev is func 3
1376 printk_debug(" %s siblings=%d\n", dev_path(dev), cores_found);
1380 if(disable_siblings) {
1387 for (j = 0; j <=jj; j++ ) {
1389 /* Build the cpu device path */
1390 cpu_path.type = DEVICE_PATH_APIC;
1391 cpu_path.apic.apic_id = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:64); // ?
1393 /* See if I can find the cpu */
1394 cpu = find_dev_path(cpu_bus, &cpu_path);
1396 /* Enable the cpu if I have the processor */
1397 if (dev && dev->enabled) {
1399 cpu = alloc_dev(cpu_bus, &cpu_path);
1406 /* Disable the cpu if I don't have the processor */
1407 if (cpu && (!dev || !dev->enabled)) {
1411 /* Report what I have done */
1413 cpu->path.apic.node_id = i;
1414 cpu->path.apic.core_id = j;
1415 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET>0)
1416 if(sysconf.enabled_apic_ext_id) {
1417 if(sysconf.lift_bsp_apicid) {
1418 cpu->path.apic.apic_id += sysconf.apicid_offset;
1421 if (cpu->path.apic.apic_id != 0)
1422 cpu->path.apic.apic_id += sysconf.apicid_offset;
1426 printk_debug("CPU: %s %s\n",
1427 dev_path(cpu), cpu->enabled?"enabled":"disabled");
1436 static void cpu_bus_init(device_t dev)
1438 initialize_cpus(&dev->link[0]);
1442 static void cpu_bus_noop(device_t dev)
1447 static struct device_operations cpu_bus_ops = {
1448 .read_resources = cpu_bus_noop,
1449 .set_resources = cpu_bus_noop,
1450 .enable_resources = cpu_bus_noop,
1451 .init = cpu_bus_init,
1452 .scan_bus = cpu_bus_scan,
1456 static void root_complex_enable_dev(struct device *dev)
1458 /* Set the operations if it is a special bus type */
1459 if (dev->path.type == DEVICE_PATH_PCI_DOMAIN) {
1460 dev->ops = &pci_domain_ops;
1462 else if (dev->path.type == DEVICE_PATH_APIC_CLUSTER) {
1463 dev->ops = &cpu_bus_ops;
1467 struct chip_operations northbridge_amd_amdfam10_root_complex_ops = {
1468 CHIP_NAME("AMD FAM10 Root Complex")
1469 .enable_dev = root_complex_enable_dev,