2 * This file is part of the coreboot project.
4 * Copyright (C) 2007 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <console/console.h>
23 #include <device/device.h>
24 #include <device/pci.h>
25 #include <device/pci_ids.h>
26 #include <device/hypertransport.h>
32 #include <cpu/x86/lapic.h>
34 #if CONFIG_LOGICAL_CPUS==1
35 #include <cpu/amd/multicore.h>
36 #include <pc80/mc146818rtc.h>
40 #include "root_complex/chip.h"
41 #include "northbridge.h"
45 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
46 #include <cpu/amd/model_10xxx_rev.h>
49 #include <cpu/amd/amdfam10_sysconf.h>
50 #if CONFIG_AMD_SB_CIMX
54 struct amdfam10_sysconf_t sysconf;
56 #define FX_DEVS NODE_NUMS
57 static device_t __f0_dev[FX_DEVS];
58 static device_t __f1_dev[FX_DEVS];
59 static device_t __f2_dev[FX_DEVS];
60 static device_t __f4_dev[FX_DEVS];
61 static unsigned fx_devs=0;
63 device_t get_node_pci(u32 nodeid, u32 fn)
67 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
69 return dev_find_slot(CONFIG_CBB-1, PCI_DEVFN(CONFIG_CDB + nodeid - 32, fn));
73 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
77 static void get_fx_devs(void)
80 for(i = 0; i < FX_DEVS; i++) {
81 __f0_dev[i] = get_node_pci(i, 0);
82 __f1_dev[i] = get_node_pci(i, 1);
83 __f2_dev[i] = get_node_pci(i, 2);
84 __f4_dev[i] = get_node_pci(i, 4);
85 if (__f0_dev[i] != NULL && __f1_dev[i] != NULL)
88 if (__f1_dev[0] == NULL || __f0_dev[0] == NULL || fx_devs == 0) {
89 die("Cannot find 0:0x18.[0|1]\n");
93 static u32 f1_read_config32(unsigned reg)
97 return pci_read_config32(__f1_dev[0], reg);
100 static void f1_write_config32(unsigned reg, u32 value)
105 for(i = 0; i < fx_devs; i++) {
108 if (dev && dev->enabled) {
109 pci_write_config32(dev, reg, value);
114 static u32 amdfam10_nodeid(device_t dev)
118 busn = dev->bus->secondary;
119 if(busn != CONFIG_CBB) {
120 return (dev->path.pci.devfn >> 3) - CONFIG_CDB + 32;
122 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
126 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
132 static void set_vga_enable_reg(u32 nodeid, u32 linkn)
136 val = 1 | (nodeid<<4) | (linkn<<12);
137 /* it will routing (1)mmio 0xa0000:0xbffff (2) io 0x3b0:0x3bb,
139 f1_write_config32(0xf4, val);
143 static u32 amdfam10_scan_chain(device_t dev, u32 nodeid, struct bus *link, u32 link_num, u32 sblink,
144 u32 max, u32 offset_unitid)
146 // I want to put sb chain in bus 0 can I?
152 u32 ht_unitid_base[4]; // here assume only 4 HT device on chain
155 u32 is_sublink1 = (link_num>3);
159 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 1
164 #if CONFIG_HT3_SUPPORT==1
168 regpos = 0x170 + 4 * (link_num&3); // it is only on sublink0
169 reg = pci_read_config32(dev, regpos);
170 if(reg & 1) return max; // already ganged no sblink1
171 devx = get_node_pci(nodeid, 4);
177 link->cap = 0x80 + ((link_num&3) *0x20);
179 link_type = pci_read_config32(devx, link->cap + 0x18);
180 } while(link_type & ConnectionPending);
181 if (!(link_type & LinkConnected)) {
185 link_type = pci_read_config32(devx, link->cap + 0x18);
186 } while(!(link_type & InitComplete));
187 if (!(link_type & NonCoherent)) {
190 /* See if there is an available configuration space mapping
191 * register in function 1.
193 ht_c_index = get_ht_c_index(nodeid, link_num, &sysconf);
195 #if CONFIG_EXT_CONF_SUPPORT == 0
196 if(ht_c_index>=4) return max;
199 /* Set up the primary, secondary and subordinate bus numbers.
200 * We have no idea how many busses are behind this bridge yet,
201 * so we set the subordinate bus number to 0xff for the moment.
203 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 0
204 // first chain will on bus 0
205 if((nodeid == 0) && (sblink==link_num)) { // actually max is 0 here
208 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 1
209 // second chain will be on 0x40, third 0x80, forth 0xc0
210 // i would refined that to 2, 3, 4 ==> 0, 0x, 40, 0x80, 0xc0
211 // >4 will use more segments, We can have 16 segmment and every segment have 256 bus, For that case need the kernel support mmio pci config.
213 min_bus = ((busn>>3) + 1) << 3; // one node can have 8 link and segn is the same
215 max = min_bus | (segn<<8);
225 max_bus = 0xfc | (segn<<8);
227 link->secondary = min_bus;
228 link->subordinate = max_bus;
230 /* Read the existing primary/secondary/subordinate bus
231 * number configuration.
233 busses = pci_read_config32(devx, link->cap + 0x14);
235 /* Configure the bus numbers for this bridge: the configuration
236 * transactions will not be propagates by the bridge if it is
237 * not correctly configured
239 busses &= 0xffff00ff;
240 busses |= ((u32)(link->secondary) << 8);
241 pci_write_config32(devx, link->cap + 0x14, busses);
244 /* set the config map space */
246 set_config_map_reg(nodeid, link_num, ht_c_index, link->secondary, link->subordinate, sysconf.segbit, sysconf.nodes);
248 /* Now we can scan all of the subordinate busses i.e. the
249 * chain on the hypertranport link
252 ht_unitid_base[i] = 0x20;
255 //if ext conf is enabled, only need use 0x1f
257 max_devfn = (0x17<<3) | 7;
259 max_devfn = (0x1f<<3) | 7;
261 max = hypertransport_scan_chain(link, 0, max_devfn, max, ht_unitid_base, offset_unitid);
263 /* We know the number of busses behind this bridge. Set the
264 * subordinate bus number to it's real value
266 if(ht_c_index>3) { // clear the extend reg
267 clear_config_map_reg(nodeid, link_num, ht_c_index, (max+1)>>sysconf.segbit, (link->subordinate)>>sysconf.segbit, sysconf.nodes);
270 link->subordinate = max;
271 set_config_map_reg(nodeid, link_num, ht_c_index, link->secondary, link->subordinate, sysconf.segbit, sysconf.nodes);
275 // use ht_unitid_base to update hcdn_reg
278 temp |= (ht_unitid_base[i] & 0xff) << (i*8);
281 sysconf.hcdn_reg[ht_c_index] = temp;
284 store_ht_c_conf_bus(nodeid, link_num, ht_c_index, link->secondary, link->subordinate, &sysconf);
288 static unsigned amdfam10_scan_chains(device_t dev, unsigned max)
292 unsigned sblink = sysconf.sblk;
293 unsigned offset_unitid = 0;
295 nodeid = amdfam10_nodeid(dev);
297 // Put sb chain in bus 0
298 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 0
300 #if ((CONFIG_HT_CHAIN_UNITID_BASE != 1) || (CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20))
303 for (link = dev->link_list; link; link = link->next)
304 if (link->link_num == sblink)
305 max = amdfam10_scan_chain(dev, nodeid, link, sblink, sblink, max, offset_unitid ); // do sb ht chain at first, in case s2885 put sb chain (8131/8111) on link2, but put 8151 on link0
309 #if CONFIG_PCI_BUS_SEGN_BITS
310 max = check_segn(dev, max, sysconf.nodes, &sysconf);
313 for(link = dev->link_list; link; link = link->next) {
314 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 0
315 if( (nodeid == 0) && (sblink == link->link_num) ) continue; //already done
318 #if ((CONFIG_HT_CHAIN_UNITID_BASE != 1) || (CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20))
319 #if CONFIG_SB_HT_CHAIN_UNITID_OFFSET_ONLY == 1
320 if((nodeid == 0) && (sblink == link->link_num))
325 max = amdfam10_scan_chain(dev, nodeid, link, link->link_num, sblink, max, offset_unitid);
331 static int reg_useable(unsigned reg, device_t goal_dev, unsigned goal_nodeid,
334 struct resource *res;
335 unsigned nodeid, link = 0;
338 for(nodeid = 0; !res && (nodeid < fx_devs); nodeid++) {
340 dev = __f0_dev[nodeid];
343 for(link = 0; !res && (link < 8); link++) {
344 res = probe_resource(dev, IOINDEX(0x1000 + reg, link));
350 if ( (goal_link == (link - 1)) &&
351 (goal_nodeid == (nodeid - 1)) &&
359 static struct resource *amdfam10_find_iopair(device_t dev, unsigned nodeid, unsigned link)
361 struct resource *resource;
365 for(reg = 0xc0; reg <= 0xd8; reg += 0x8) {
367 result = reg_useable(reg, dev, nodeid, link);
369 /* I have been allocated this one */
372 else if (result > 1) {
373 /* I have a free register pair */
378 reg = free_reg; // if no free, the free_reg still be 0
383 //because of Extend conf space, we will never run out of reg, but we need one index to differ them. so same node and same link can have multi range
384 u32 index = get_io_addr_index(nodeid, link);
385 reg = 0x110+ (index<<24) + (4<<20); // index could be 0, 255
388 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
393 static struct resource *amdfam10_find_mempair(device_t dev, u32 nodeid, u32 link)
395 struct resource *resource;
399 for(reg = 0x80; reg <= 0xb8; reg += 0x8) {
401 result = reg_useable(reg, dev, nodeid, link);
403 /* I have been allocated this one */
406 else if (result > 1) {
407 /* I have a free register pair */
417 //because of Extend conf space, we will never run out of reg,
418 // but we need one index to differ them. so same node and
419 // same link can have multi range
420 u32 index = get_mmio_addr_index(nodeid, link);
421 reg = 0x110+ (index<<24) + (6<<20); // index could be 0, 63
424 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
429 static void amdfam10_link_read_bases(device_t dev, u32 nodeid, u32 link)
431 struct resource *resource;
433 /* Initialize the io space constraints on the current bus */
434 resource = amdfam10_find_iopair(dev, nodeid, link);
437 #if CONFIG_EXT_CONF_SUPPORT == 1
438 if((resource->index & 0x1fff) == 0x1110) { // ext
443 align = log2(HT_IO_HOST_ALIGN);
446 resource->align = align;
447 resource->gran = align;
448 resource->limit = 0xffffUL;
449 resource->flags = IORESOURCE_IO | IORESOURCE_BRIDGE;
452 /* Initialize the prefetchable memory constraints on the current bus */
453 resource = amdfam10_find_mempair(dev, nodeid, link);
457 resource->align = log2(HT_MEM_HOST_ALIGN);
458 resource->gran = log2(HT_MEM_HOST_ALIGN);
459 resource->limit = 0xffffffffffULL;
460 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
461 resource->flags |= IORESOURCE_BRIDGE;
463 #if CONFIG_EXT_CONF_SUPPORT == 1
464 if((resource->index & 0x1fff) == 0x1110) { // ext
465 normalize_resource(resource);
471 /* Initialize the memory constraints on the current bus */
472 resource = amdfam10_find_mempair(dev, nodeid, link);
476 resource->align = log2(HT_MEM_HOST_ALIGN);
477 resource->gran = log2(HT_MEM_HOST_ALIGN);
478 resource->limit = 0xffffffffffULL;
479 resource->flags = IORESOURCE_MEM | IORESOURCE_BRIDGE;
480 #if CONFIG_EXT_CONF_SUPPORT == 1
481 if((resource->index & 0x1fff) == 0x1110) { // ext
482 normalize_resource(resource);
488 static void amdfam10_read_resources(device_t dev)
492 nodeid = amdfam10_nodeid(dev);
493 for(link = dev->link_list; link; link = link->next) {
494 if (link->children) {
495 amdfam10_link_read_bases(dev, nodeid, link->link_num);
500 static void amdfam10_set_resource(device_t dev, struct resource *resource,
503 resource_t rbase, rend;
504 unsigned reg, link_num;
507 /* Make certain the resource has actually been set */
508 if (!(resource->flags & IORESOURCE_ASSIGNED)) {
512 /* If I have already stored this resource don't worry about it */
513 if (resource->flags & IORESOURCE_STORED) {
517 /* Only handle PCI memory and IO resources */
518 if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
521 /* Ensure I am actually looking at a resource of function 1 */
522 if ((resource->index & 0xffff) < 0x1000) {
525 /* Get the base address */
526 rbase = resource->base;
528 /* Get the limit (rounded up) */
529 rend = resource_end(resource);
531 /* Get the register and link */
532 reg = resource->index & 0xfff; // 4k
533 link_num = IOINDEX_LINK(resource->index);
535 if (resource->flags & IORESOURCE_IO) {
537 set_io_addr_reg(dev, nodeid, link_num, reg, rbase>>8, rend>>8);
538 store_conf_io_addr(nodeid, link_num, reg, (resource->index >> 24), rbase>>8, rend>>8);
540 else if (resource->flags & IORESOURCE_MEM) {
541 set_mmio_addr_reg(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8, sysconf.nodes) ;// [39:8]
542 store_conf_mmio_addr(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8);
544 resource->flags |= IORESOURCE_STORED;
545 sprintf(buf, " <node %x link %x>",
547 report_resource_stored(dev, resource, buf);
551 * I tried to reuse the resource allocation code in amdfam10_set_resource()
552 * but it is too difficult to deal with the resource allocation magic.
555 static void amdfam10_create_vga_resource(device_t dev, unsigned nodeid)
559 /* find out which link the VGA card is connected,
560 * we only deal with the 'first' vga card */
561 for (link = dev->link_list; link; link = link->next) {
562 if (link->bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
563 #if CONFIG_MULTIPLE_VGA_ADAPTERS == 1
564 extern device_t vga_pri; // the primary vga device, defined in device.c
565 printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary,
566 link->secondary,link->subordinate);
567 /* We need to make sure the vga_pri is under the link */
568 if((vga_pri->bus->secondary >= link->secondary ) &&
569 (vga_pri->bus->secondary <= link->subordinate )
576 /* no VGA card installed */
580 printk(BIOS_DEBUG, "VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, link->link_num);
581 set_vga_enable_reg(nodeid, link->link_num);
584 static void amdfam10_set_resources(device_t dev)
588 struct resource *res;
590 /* Find the nodeid */
591 nodeid = amdfam10_nodeid(dev);
593 amdfam10_create_vga_resource(dev, nodeid);
595 /* Set each resource we have found */
596 for(res = dev->resource_list; res; res = res->next) {
597 amdfam10_set_resource(dev, res, nodeid);
600 for(bus = dev->link_list; bus; bus = bus->next) {
602 assign_resources(bus);
607 static void mcf0_control_init(struct device *dev)
611 static struct device_operations northbridge_operations = {
612 .read_resources = amdfam10_read_resources,
613 .set_resources = amdfam10_set_resources,
614 .enable_resources = pci_dev_enable_resources,
615 .init = mcf0_control_init,
616 .scan_bus = amdfam10_scan_chains,
622 static const struct pci_driver mcf0_driver __pci_driver = {
623 .ops = &northbridge_operations,
624 .vendor = PCI_VENDOR_ID_AMD,
628 struct chip_operations northbridge_amd_amdfam10_ops = {
629 CHIP_NAME("AMD FAM10 Northbridge")
633 static void amdfam10_domain_read_resources(device_t dev)
637 /* Find the already assigned resource pairs */
639 for(reg = 0x80; reg <= 0xd8; reg+= 0x08) {
641 base = f1_read_config32(reg);
642 limit = f1_read_config32(reg + 0x04);
643 /* Is this register allocated? */
644 if ((base & 3) != 0) {
645 unsigned nodeid, reg_link;
647 if(reg<0xc0) { // mmio
648 nodeid = (limit & 0xf) + (base&0x30);
650 nodeid = (limit & 0xf) + ((base>>4)&0x30);
652 reg_link = (limit >> 4) & 7;
653 reg_dev = __f0_dev[nodeid];
655 /* Reserve the resource */
656 struct resource *res;
657 res = new_resource(reg_dev, IOINDEX(0x1000 + reg, reg_link));
664 /* FIXME: do we need to check extend conf space?
665 I don't believe that much preset value */
667 #if CONFIG_PCI_64BIT_PREF_MEM == 0
668 pci_domain_read_resources(dev);
671 struct resource *resource;
672 for(link=dev->link_list; link; link = link->next) {
673 /* Initialize the system wide io space constraints */
674 resource = new_resource(dev, 0|(link->link_num<<2));
675 resource->base = 0x400;
676 resource->limit = 0xffffUL;
677 resource->flags = IORESOURCE_IO;
679 /* Initialize the system wide prefetchable memory resources constraints */
680 resource = new_resource(dev, 1|(link->link_num<<2));
681 resource->limit = 0xfcffffffffULL;
682 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
684 /* Initialize the system wide memory resources constraints */
685 resource = new_resource(dev, 2|(link->link_num<<2));
686 resource->limit = 0xfcffffffffULL;
687 resource->flags = IORESOURCE_MEM;
690 #if CONFIG_MMCONF_SUPPORT
691 struct resource *res = new_resource(dev, 0xc0010058);
692 res->base = CONFIG_MMCONF_BASE_ADDRESS;
693 res->size = CONFIG_MMCONF_BUS_NUMBER * 4096*256;
694 res->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
695 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
699 static u32 my_find_pci_tolm(struct bus *bus, u32 tolm)
701 struct resource *min;
703 search_bus_resources(bus, IORESOURCE_MEM, IORESOURCE_MEM, tolm_test, &min);
704 if (min && tolm > min->base) {
710 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
712 struct hw_mem_hole_info {
713 unsigned hole_startk;
717 static struct hw_mem_hole_info get_hw_mem_hole_info(void)
719 struct hw_mem_hole_info mem_hole;
722 mem_hole.hole_startk = CONFIG_HW_MEM_HOLE_SIZEK;
723 mem_hole.node_id = -1;
725 for (i = 0; i < sysconf.nodes; i++) {
726 struct dram_base_mask_t d;
728 d = get_dram_base_mask(i);
729 if(!(d.mask & 1)) continue; // no memory on this node
731 hole = pci_read_config32(__f1_dev[i], 0xf0);
732 if(hole & 1) { // we find the hole
733 mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
734 mem_hole.node_id = i; // record the node No with hole
735 break; // only one hole
739 //We need to double check if there is speical set on base reg and limit reg are not continous instead of hole, it will find out it's hole_startk
740 if(mem_hole.node_id==-1) {
741 resource_t limitk_pri = 0;
742 for(i=0; i<sysconf.nodes; i++) {
743 struct dram_base_mask_t d;
744 resource_t base_k, limit_k;
745 d = get_dram_base_mask(i);
746 if(!(d.base & 1)) continue;
748 base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
749 if(base_k > 4 *1024 * 1024) break; // don't need to go to check
750 if(limitk_pri != base_k) { // we find the hole
751 mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G
752 mem_hole.node_id = i;
753 break; //only one hole
756 limit_k = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9;
757 limitk_pri = limit_k;
763 // WHY this check? CONFIG_AMDMCT is enabled on all Fam10 boards.
764 // Does it make sense not to?
765 #if CONFIG_AMDMCT == 0
766 static void disable_hoist_memory(unsigned long hole_startk, int node_id)
770 struct dram_base_mask_t d;
777 struct sys_info *sysinfox = (struct sys_info *)((CONFIG_RAMTOP) - CONFIG_DCACHE_RAM_GLOBAL_VAR_SIZE); // in RAM
778 struct mem_info *meminfo;
779 meminfo = &sysinfox->meminfo[node_id];
781 one_DCT = get_one_DCT(meminfo);
783 // 1. find which node has hole
784 // 2. change limit in that node.
785 // 3. change base and limit in later node
786 // 4. clear that node f0
788 // if there is not mem hole enabled, we need to change it's base instead
790 hole_sizek = (4*1024*1024) - hole_startk;
792 for(i=NODE_NUMS-1;i>node_id;i--) {
794 d = get_dram_base_mask(i);
796 if(!(d.mask & 1)) continue;
798 d.base -= (hole_sizek>>9);
799 d.mask -= (hole_sizek>>9);
800 set_dram_base_mask(i, d, sysconf.nodes);
802 if(get_DctSelHiEn(i) & 1) {
803 sel_m = get_DctSelBaseAddr(i);
804 sel_m -= hole_startk>>10;
805 set_DctSelBaseAddr(i, sel_m);
809 d = get_dram_base_mask(node_id);
810 dev = __f1_dev[node_id];
811 sel_hi_en = get_DctSelHiEn(node_id);
814 sel_m = get_DctSelBaseAddr(node_id);
816 hoist = pci_read_config32(dev, 0xf0);
818 pci_write_config32(dev, 0xf0, 0);
819 d.mask -= (hole_sizek>>9);
820 set_dram_base_mask(node_id, d, sysconf.nodes);
821 if(one_DCT || (sel_m >= (hole_startk>>10))) {
823 sel_m -= hole_startk>>10;
824 set_DctSelBaseAddr(node_id, sel_m);
828 set_DctSelBaseOffset(node_id, 0);
831 d.base -= (hole_sizek>>9);
832 d.mask -= (hole_sizek>>9);
833 set_dram_base_mask(node_id, d, sysconf.nodes);
836 sel_m -= hole_startk>>10;
837 set_DctSelBaseAddr(node_id, sel_m);
846 #if CONFIG_WRITE_HIGH_TABLES==1
850 #if CONFIG_GFXUMA == 1
851 extern uint64_t uma_memory_base, uma_memory_size;
853 static void add_uma_resource(struct device *dev, int index)
855 struct resource *resource;
857 printk(BIOS_DEBUG, "Adding UMA memory area\n");
858 resource = new_resource(dev, index);
859 resource->base = (resource_t) uma_memory_base;
860 resource->size = (resource_t) uma_memory_size;
861 resource->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
862 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
866 static void amdfam10_domain_set_resources(device_t dev)
868 #if CONFIG_PCI_64BIT_PREF_MEM == 1
869 struct resource *io, *mem1, *mem2;
870 struct resource *res;
872 unsigned long mmio_basek;
876 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
877 struct hw_mem_hole_info mem_hole;
878 u32 reset_memhole = 1;
881 #if CONFIG_PCI_64BIT_PREF_MEM == 1
883 for(link = dev->link_list; link; link = link->next) {
884 /* Now reallocate the pci resources memory with the
885 * highest addresses I can manage.
887 mem1 = find_resource(dev, 1|(link->link_num<<2));
888 mem2 = find_resource(dev, 2|(link->link_num<<2));
890 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
891 mem1->base, mem1->limit, mem1->size, mem1->align);
892 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
893 mem2->base, mem2->limit, mem2->size, mem2->align);
895 /* See if both resources have roughly the same limits */
896 if (((mem1->limit <= 0xffffffff) && (mem2->limit <= 0xffffffff)) ||
897 ((mem1->limit > 0xffffffff) && (mem2->limit > 0xffffffff)))
899 /* If so place the one with the most stringent alignment first
901 if (mem2->align > mem1->align) {
902 struct resource *tmp;
907 /* Now place the memory as high up as it will go */
908 mem2->base = resource_max(mem2);
909 mem1->limit = mem2->base - 1;
910 mem1->base = resource_max(mem1);
913 /* Place the resources as high up as they will go */
914 mem2->base = resource_max(mem2);
915 mem1->base = resource_max(mem1);
918 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
919 mem1->base, mem1->limit, mem1->size, mem1->align);
920 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
921 mem2->base, mem2->limit, mem2->size, mem2->align);
924 for(res = &dev->resource_list; res; res = res->next)
926 res->flags |= IORESOURCE_ASSIGNED;
927 res->flags |= IORESOURCE_STORED;
928 report_resource_stored(dev, res, "");
932 pci_tolm = 0xffffffffUL;
933 for(link = dev->link_list; link; link = link->next) {
934 pci_tolm = my_find_pci_tolm(link, pci_tolm);
937 // FIXME handle interleaved nodes. If you fix this here, please fix
939 mmio_basek = pci_tolm >> 10;
940 /* Round mmio_basek to something the processor can support */
941 mmio_basek &= ~((1 << 6) -1);
943 // FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M
944 // MMIO hole. If you fix this here, please fix amdk8, too.
945 /* Round the mmio hole to 64M */
946 mmio_basek &= ~((64*1024) - 1);
948 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
949 /* if the hw mem hole is already set in raminit stage, here we will compare
950 * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
951 * use hole_basek as mmio_basek and we don't need to reset hole.
952 * otherwise We reset the hole to the mmio_basek
955 mem_hole = get_hw_mem_hole_info();
957 // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
958 if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
959 mmio_basek = mem_hole.hole_startk;
963 #if CONFIG_AMDMCT == 0
964 //mmio_basek = 3*1024*1024; // for debug to meet boundary
967 if(mem_hole.node_id!=-1) {
968 /* We need to select CONFIG_HW_MEM_HOLE_SIZEK for raminit, it can not
969 make hole_startk to some basek too!
970 We need to reset our Mem Hole, because We want more big HOLE
972 Before that We need to disable mem hole at first, becase
973 memhole could already be set on i+1 instead
975 disable_hoist_memory(mem_hole.hole_startk, mem_hole.node_id);
978 #if CONFIG_HW_MEM_HOLE_SIZE_AUTO_INC == 1
979 // We need to double check if the mmio_basek is valid for hole
980 // setting, if it is equal to basek, we need to decrease it some
981 resource_t basek_pri;
982 for (i = 0; i < sysconf.nodes; i++) {
983 struct dram_base_mask_t d;
985 d = get_dram_base_mask(i);
987 if(!(d.mask &1)) continue;
989 basek = ((resource_t)(d.base & 0x1fffff00)) << 9;
990 if(mmio_basek == (u32)basek) {
991 mmio_basek -= (uin32_t)(basek - basek_pri); // increase mem hole size to make sure it is on middle of pri node
1004 for(i = 0; i < sysconf.nodes; i++) {
1005 struct dram_base_mask_t d;
1006 resource_t basek, limitk, sizek; // 4 1T
1007 d = get_dram_base_mask(i);
1009 if(!(d.mask & 1)) continue;
1010 basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
1011 limitk = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9 ;
1012 sizek = limitk - basek;
1014 /* see if we need a hole from 0xa0000 to 0xbffff */
1015 if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
1016 ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
1018 basek = (8*64)+(16*16);
1019 sizek = limitk - ((8*64)+(16*16));
1023 // printk(BIOS_DEBUG, "node %d : mmio_basek=%08x, basek=%08x, limitk=%08x\n", i, mmio_basek, basek, limitk);
1025 /* split the region to accomodate pci memory space */
1026 if ( (basek < 4*1024*1024 ) && (limitk > mmio_basek) ) {
1027 if (basek <= mmio_basek) {
1029 pre_sizek = mmio_basek - basek;
1031 ram_resource(dev, (idx | i), basek, pre_sizek);
1034 #if CONFIG_WRITE_HIGH_TABLES==1
1035 if (high_tables_base==0) {
1036 /* Leave some space for ACPI, PIRQ and MP tables */
1037 #if CONFIG_GFXUMA == 1
1038 high_tables_base = uma_memory_base - HIGH_MEMORY_SIZE;
1040 high_tables_base = (mmio_basek * 1024) - HIGH_MEMORY_SIZE;
1042 high_tables_size = HIGH_MEMORY_SIZE;
1043 printk(BIOS_DEBUG, " split: %dK table at =%08llx\n",
1044 HIGH_MEMORY_SIZE / 1024, high_tables_base);
1048 #if CONFIG_AMDMCT == 0
1049 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
1051 struct sys_info *sysinfox = (struct sys_info *)((CONFIG_RAMTOP) - CONFIG_DCACHE_RAM_GLOBAL_VAR_SIZE); // in RAM
1052 struct mem_info *meminfo;
1053 meminfo = &sysinfox->meminfo[i];
1054 sizek += hoist_memory(mmio_basek,i, get_one_DCT(meminfo), sysconf.nodes);
1061 if ((basek + sizek) <= 4*1024*1024) {
1065 basek = 4*1024*1024;
1066 sizek -= (4*1024*1024 - mmio_basek);
1070 #if CONFIG_GFXUMA == 1
1071 /* Deduct uma memory before reporting because
1072 * this is what the mtrr code expects */
1073 sizek -= uma_memory_size / 1024;
1075 ram_resource(dev, (idx | i), basek, sizek);
1077 #if CONFIG_WRITE_HIGH_TABLES==1
1078 printk(BIOS_DEBUG, "%d: mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n",
1079 i, mmio_basek, basek, limitk);
1080 if (high_tables_base==0) {
1081 /* Leave some space for ACPI, PIRQ and MP tables */
1082 #if CONFIG_GFXUMA == 1
1083 high_tables_base = uma_memory_base - HIGH_MEMORY_SIZE;
1085 high_tables_base = (limitk * 1024) - HIGH_MEMORY_SIZE;
1087 high_tables_size = HIGH_MEMORY_SIZE;
1092 #if CONFIG_GFXUMA == 1
1093 add_uma_resource(dev, 7);
1096 for(link = dev->link_list; link; link = link->next) {
1097 if (link->children) {
1098 assign_resources(link);
1103 static u32 amdfam10_domain_scan_bus(device_t dev, u32 max)
1108 /* Unmap all of the HT chains */
1109 for(reg = 0xe0; reg <= 0xec; reg += 4) {
1110 f1_write_config32(reg, 0);
1112 #if CONFIG_EXT_CONF_SUPPORT == 1
1114 for(i = 0; i< sysconf.nodes; i++) {
1116 for(index = 0; index < 64; index++) {
1117 pci_write_config32(__f1_dev[i], 0x110, index | (6<<28));
1118 pci_write_config32(__f1_dev[i], 0x114, 0);
1125 for(link = dev->link_list; link; link = link->next) {
1126 max = pci_scan_bus(link, PCI_DEVFN(CONFIG_CDB, 0), 0xff, max);
1129 /* Tune the hypertransport transaction for best performance.
1130 * Including enabling relaxed ordering if it is safe.
1133 for(i = 0; i < fx_devs; i++) {
1135 f0_dev = __f0_dev[i];
1136 if (f0_dev && f0_dev->enabled) {
1138 httc = pci_read_config32(f0_dev, HT_TRANSACTION_CONTROL);
1139 httc &= ~HTTC_RSP_PASS_PW;
1140 if (!dev->link_list->disable_relaxed_ordering) {
1141 httc |= HTTC_RSP_PASS_PW;
1143 printk(BIOS_SPEW, "%s passpw: %s\n",
1145 (!dev->link_list->disable_relaxed_ordering)?
1146 "enabled":"disabled");
1147 pci_write_config32(f0_dev, HT_TRANSACTION_CONTROL, httc);
1153 static struct device_operations pci_domain_ops = {
1154 .read_resources = amdfam10_domain_read_resources,
1155 .set_resources = amdfam10_domain_set_resources,
1156 .enable_resources = NULL,
1158 .scan_bus = amdfam10_domain_scan_bus,
1159 #if CONFIG_MMCONF_SUPPORT_DEFAULT
1160 .ops_pci_bus = &pci_ops_mmconf,
1162 .ops_pci_bus = &pci_cf8_conf1,
1166 static void sysconf_init(device_t dev) // first node
1168 sysconf.sblk = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
1170 sysconf.ht_c_num = 0;
1172 unsigned ht_c_index;
1174 for(ht_c_index=0; ht_c_index<32; ht_c_index++) {
1175 sysconf.ht_c_conf_bus[ht_c_index] = 0;
1178 sysconf.nodes = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1;
1179 #if CONFIG_MAX_PHYSICAL_CPUS > 8
1180 sysconf.nodes += (((pci_read_config32(dev, 0x160)>>4) & 7)<<3);
1183 sysconf.enabled_apic_ext_id = 0;
1184 sysconf.lift_bsp_apicid = 0;
1186 /* Find the bootstrap processors apicid */
1187 sysconf.bsp_apicid = lapicid();
1188 sysconf.apicid_offset = sysconf.bsp_apicid;
1190 #if (CONFIG_ENABLE_APIC_EXT_ID == 1)
1191 if (pci_read_config32(dev, 0x68) & (HTTC_APIC_EXT_ID|HTTC_APIC_EXT_BRD_CST))
1193 sysconf.enabled_apic_ext_id = 1;
1195 #if (CONFIG_APIC_ID_OFFSET>0)
1196 if(sysconf.enabled_apic_ext_id) {
1197 if(sysconf.bsp_apicid == 0) {
1198 /* bsp apic id is not changed */
1199 sysconf.apicid_offset = CONFIG_APIC_ID_OFFSET;
1201 sysconf.lift_bsp_apicid = 1;
1208 static void add_more_links(device_t dev, unsigned total_links)
1210 struct bus *link, *last = NULL;
1213 for (link = dev->link_list; link; link = link->next)
1217 int links = total_links - last->link_num;
1218 link_num = last->link_num;
1220 link = malloc(links*sizeof(*link));
1222 die("Couldn't allocate more links!\n");
1223 memset(link, 0, links*sizeof(*link));
1229 link = malloc(total_links*sizeof(*link));
1230 memset(link, 0, total_links*sizeof(*link));
1231 dev->link_list = link;
1234 for (link_num = link_num + 1; link_num < total_links; link_num++) {
1235 link->link_num = link_num;
1237 link->next = link + 1;
1244 static u32 cpu_bus_scan(device_t dev, u32 max)
1246 struct bus *cpu_bus;
1249 device_t pci_domain;
1256 int disable_siblings;
1257 unsigned ApicIdCoreIdSize;
1260 ApicIdCoreIdSize = (cpuid_ecx(0x80000008)>>12 & 0xf);
1261 if(ApicIdCoreIdSize) {
1262 siblings = (1<<ApicIdCoreIdSize)-1;
1264 siblings = 3; //quad core
1267 disable_siblings = !CONFIG_LOGICAL_CPUS;
1268 #if CONFIG_LOGICAL_CPUS == 1
1269 get_option(&disable_siblings, "multi_core");
1272 // How can I get the nb_cfg_54 of every node's nb_cfg_54 in bsp???
1273 nb_cfg_54 = read_nb_cfg_54();
1276 dev_mc = dev_find_slot(0, PCI_DEVFN(CONFIG_CDB, 0)); //0x00
1277 if(dev_mc && dev_mc->bus) {
1278 printk(BIOS_DEBUG, "%s found", dev_path(dev_mc));
1279 pci_domain = dev_mc->bus->dev;
1280 if(pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1281 printk(BIOS_DEBUG, "\n%s move to ",dev_path(dev_mc));
1282 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
1283 printk(BIOS_DEBUG, "%s",dev_path(dev_mc));
1286 printk(BIOS_DEBUG, " but it is not under pci_domain directly ");
1288 printk(BIOS_DEBUG, "\n");
1290 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
1292 dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
1293 if (dev_mc && dev_mc->bus) {
1294 printk(BIOS_DEBUG, "%s found\n", dev_path(dev_mc));
1295 pci_domain = dev_mc->bus->dev;
1296 if(pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1297 if((pci_domain->link_list) && (pci_domain->link_list->children == dev_mc)) {
1298 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
1299 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
1300 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
1302 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
1303 dev_mc->path.pci.devfn -= PCI_DEVFN(0x18,0);
1304 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
1305 dev_mc = dev_mc->sibling;
1314 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
1316 printk(BIOS_ERR, "%02x:%02x.0 not found", CONFIG_CBB, CONFIG_CDB);
1320 sysconf_init(dev_mc);
1322 nodes = sysconf.nodes;
1324 #if CONFIG_CBB && (NODE_NUMS > 32)
1325 if(nodes>32) { // need to put node 32 to node 63 to bus 0xfe
1326 if(pci_domain->link_list && !pci_domain->link_list->next) {
1327 struct bus *new_link = new_link(pci_domain);
1328 pci_domain->link_list->next = new_link;
1329 new_link->link_num = 1;
1330 new_link->dev = pci_domain;
1331 new_link->children = 0;
1332 printk(BIOS_DEBUG, "%s links now 2\n", dev_path(pci_domain));
1334 pci_domain->link_list->next->secondary = CONFIG_CBB - 1;
1337 /* Find which cpus are present */
1338 cpu_bus = dev->link_list;
1339 for(i = 0; i < nodes; i++) {
1340 device_t cdb_dev, cpu;
1341 struct device_path cpu_path;
1342 unsigned busn, devn;
1346 devn = CONFIG_CDB+i;
1348 #if CONFIG_CBB && (NODE_NUMS > 32)
1352 pbus = pci_domain->link_list->next);
1356 /* Find the cpu's pci device */
1357 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1359 /* If I am probing things in a weird order
1360 * ensure all of the cpu's pci devices are found.
1363 for(fn = 0; fn <= 5; fn++) { //FBDIMM?
1364 cdb_dev = pci_probe_dev(NULL, pbus,
1365 PCI_DEVFN(devn, fn));
1367 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1370 /* Ok, We need to set the links for that device.
1371 * otherwise the device under it will not be scanned
1374 #if CONFIG_HT3_SUPPORT==1
1379 add_more_links(cdb_dev, linknum);
1382 cores_found = 0; // one core
1383 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
1384 if (cdb_dev && cdb_dev->enabled) {
1385 j = pci_read_config32(cdb_dev, 0xe8);
1386 cores_found = (j >> 12) & 3; // dev is func 3
1388 cores_found |= (j >> 13) & 4;
1389 printk(BIOS_DEBUG, " %s siblings=%d\n", dev_path(cdb_dev), cores_found);
1393 if(disable_siblings) {
1400 for (j = 0; j <=jj; j++ ) {
1402 /* Build the cpu device path */
1403 cpu_path.type = DEVICE_PATH_APIC;
1404 cpu_path.apic.apic_id = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:64); // ?
1406 /* See if I can find the cpu */
1407 cpu = find_dev_path(cpu_bus, &cpu_path);
1409 /* Enable the cpu if I have the processor */
1410 if (cdb_dev && cdb_dev->enabled) {
1412 cpu = alloc_dev(cpu_bus, &cpu_path);
1419 /* Disable the cpu if I don't have the processor */
1420 if (cpu && (!cdb_dev || !cdb_dev->enabled)) {
1424 /* Report what I have done */
1426 cpu->path.apic.node_id = i;
1427 cpu->path.apic.core_id = j;
1428 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET>0)
1429 if(sysconf.enabled_apic_ext_id) {
1430 if(sysconf.lift_bsp_apicid) {
1431 cpu->path.apic.apic_id += sysconf.apicid_offset;
1434 if (cpu->path.apic.apic_id != 0)
1435 cpu->path.apic.apic_id += sysconf.apicid_offset;
1439 printk(BIOS_DEBUG, "CPU: %s %s\n",
1440 dev_path(cpu), cpu->enabled?"enabled":"disabled");
1448 static void cpu_bus_init(device_t dev)
1450 initialize_cpus(dev->link_list);
1451 #if CONFIG_AMD_SB_CIMX
1452 sb_After_Pci_Init();
1457 static void cpu_bus_noop(device_t dev)
1461 static void cpu_bus_read_resources(device_t dev)
1465 static void cpu_bus_set_resources(device_t dev)
1467 struct resource *resource = find_resource(dev, 0xc0010058);
1469 report_resource_stored(dev, resource, " <mmconfig>");
1471 pci_dev_set_resources(dev);
1474 static struct device_operations cpu_bus_ops = {
1475 .read_resources = cpu_bus_read_resources,
1476 .set_resources = cpu_bus_set_resources,
1477 .enable_resources = cpu_bus_noop,
1478 .init = cpu_bus_init,
1479 .scan_bus = cpu_bus_scan,
1482 static void root_complex_enable_dev(struct device *dev)
1484 /* Set the operations if it is a special bus type */
1485 if (dev->path.type == DEVICE_PATH_PCI_DOMAIN) {
1486 dev->ops = &pci_domain_ops;
1488 else if (dev->path.type == DEVICE_PATH_APIC_CLUSTER) {
1489 dev->ops = &cpu_bus_ops;
1493 struct chip_operations northbridge_amd_amdfam10_root_complex_ops = {
1494 CHIP_NAME("AMD FAM10 Root Complex")
1495 .enable_dev = root_complex_enable_dev,