2 * This file is part of the coreboot project.
4 * Copyright (C) 2007 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <console/console.h>
23 #include <device/device.h>
24 #include <device/pci.h>
25 #include <device/pci_ids.h>
26 #include <device/hypertransport.h>
32 #include <cpu/x86/lapic.h>
34 #if CONFIG_LOGICAL_CPUS==1
35 #include <cpu/amd/multicore.h>
36 #include <pc80/mc146818rtc.h>
40 #include "root_complex/chip.h"
41 #include "northbridge.h"
45 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
46 #include <cpu/amd/model_10xxx_rev.h>
49 #include <cpu/amd/amdfam10_sysconf.h>
50 #if CONFIG_SOUTHBRIDGE_AMD_CIMX_SB800
52 #elif CONFIG_SOUTHBRIDGE_AMD_CIMX_SB900
56 struct amdfam10_sysconf_t sysconf;
58 #define FX_DEVS NODE_NUMS
59 static device_t __f0_dev[FX_DEVS];
60 static device_t __f1_dev[FX_DEVS];
61 static device_t __f2_dev[FX_DEVS];
62 static device_t __f4_dev[FX_DEVS];
63 static unsigned fx_devs=0;
65 device_t get_node_pci(u32 nodeid, u32 fn)
69 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
71 return dev_find_slot(CONFIG_CBB-1, PCI_DEVFN(CONFIG_CDB + nodeid - 32, fn));
75 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
79 static void get_fx_devs(void)
82 for(i = 0; i < FX_DEVS; i++) {
83 __f0_dev[i] = get_node_pci(i, 0);
84 __f1_dev[i] = get_node_pci(i, 1);
85 __f2_dev[i] = get_node_pci(i, 2);
86 __f4_dev[i] = get_node_pci(i, 4);
87 if (__f0_dev[i] != NULL && __f1_dev[i] != NULL)
90 if (__f1_dev[0] == NULL || __f0_dev[0] == NULL || fx_devs == 0) {
91 die("Cannot find 0:0x18.[0|1]\n");
95 static u32 f1_read_config32(unsigned reg)
99 return pci_read_config32(__f1_dev[0], reg);
102 static void f1_write_config32(unsigned reg, u32 value)
107 for(i = 0; i < fx_devs; i++) {
110 if (dev && dev->enabled) {
111 pci_write_config32(dev, reg, value);
116 static u32 amdfam10_nodeid(device_t dev)
120 busn = dev->bus->secondary;
121 if(busn != CONFIG_CBB) {
122 return (dev->path.pci.devfn >> 3) - CONFIG_CDB + 32;
124 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
128 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
134 static void set_vga_enable_reg(u32 nodeid, u32 linkn)
138 val = 1 | (nodeid<<4) | (linkn<<12);
139 /* it will routing (1)mmio 0xa0000:0xbffff (2) io 0x3b0:0x3bb,
141 f1_write_config32(0xf4, val);
145 static u32 amdfam10_scan_chain(device_t dev, u32 nodeid, struct bus *link, u32 link_num, u32 sblink,
146 u32 max, u32 offset_unitid)
148 // I want to put sb chain in bus 0 can I?
154 u32 ht_unitid_base[4]; // here assume only 4 HT device on chain
157 u32 is_sublink1 = (link_num>3);
161 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 1
166 #if CONFIG_HT3_SUPPORT==1
170 regpos = 0x170 + 4 * (link_num&3); // it is only on sublink0
171 reg = pci_read_config32(dev, regpos);
172 if(reg & 1) return max; // already ganged no sblink1
173 devx = get_node_pci(nodeid, 4);
179 link->cap = 0x80 + ((link_num&3) *0x20);
181 link_type = pci_read_config32(devx, link->cap + 0x18);
182 } while(link_type & ConnectionPending);
183 if (!(link_type & LinkConnected)) {
187 link_type = pci_read_config32(devx, link->cap + 0x18);
188 } while(!(link_type & InitComplete));
189 if (!(link_type & NonCoherent)) {
192 /* See if there is an available configuration space mapping
193 * register in function 1.
195 ht_c_index = get_ht_c_index(nodeid, link_num, &sysconf);
197 #if CONFIG_EXT_CONF_SUPPORT == 0
198 if(ht_c_index>=4) return max;
201 /* Set up the primary, secondary and subordinate bus numbers.
202 * We have no idea how many busses are behind this bridge yet,
203 * so we set the subordinate bus number to 0xff for the moment.
205 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 0
206 // first chain will on bus 0
207 if((nodeid == 0) && (sblink==link_num)) { // actually max is 0 here
210 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 1
211 // second chain will be on 0x40, third 0x80, forth 0xc0
212 // i would refined that to 2, 3, 4 ==> 0, 0x, 40, 0x80, 0xc0
213 // >4 will use more segments, We can have 16 segmment and every segment have 256 bus, For that case need the kernel support mmio pci config.
215 min_bus = ((busn>>3) + 1) << 3; // one node can have 8 link and segn is the same
217 max = min_bus | (segn<<8);
227 max_bus = 0xfc | (segn<<8);
229 link->secondary = min_bus;
230 link->subordinate = max_bus;
232 /* Read the existing primary/secondary/subordinate bus
233 * number configuration.
235 busses = pci_read_config32(devx, link->cap + 0x14);
237 /* Configure the bus numbers for this bridge: the configuration
238 * transactions will not be propagates by the bridge if it is
239 * not correctly configured
241 busses &= 0xffff00ff;
242 busses |= ((u32)(link->secondary) << 8);
243 pci_write_config32(devx, link->cap + 0x14, busses);
246 /* set the config map space */
248 set_config_map_reg(nodeid, link_num, ht_c_index, link->secondary, link->subordinate, sysconf.segbit, sysconf.nodes);
250 /* Now we can scan all of the subordinate busses i.e. the
251 * chain on the hypertranport link
254 ht_unitid_base[i] = 0x20;
257 //if ext conf is enabled, only need use 0x1f
259 max_devfn = (0x17<<3) | 7;
261 max_devfn = (0x1f<<3) | 7;
263 max = hypertransport_scan_chain(link, 0, max_devfn, max, ht_unitid_base, offset_unitid);
265 /* We know the number of busses behind this bridge. Set the
266 * subordinate bus number to it's real value
268 if(ht_c_index>3) { // clear the extend reg
269 clear_config_map_reg(nodeid, link_num, ht_c_index, (max+1)>>sysconf.segbit, (link->subordinate)>>sysconf.segbit, sysconf.nodes);
272 link->subordinate = max;
273 set_config_map_reg(nodeid, link_num, ht_c_index, link->secondary, link->subordinate, sysconf.segbit, sysconf.nodes);
277 // use ht_unitid_base to update hcdn_reg
280 temp |= (ht_unitid_base[i] & 0xff) << (i*8);
283 sysconf.hcdn_reg[ht_c_index] = temp;
286 store_ht_c_conf_bus(nodeid, link_num, ht_c_index, link->secondary, link->subordinate, &sysconf);
290 static unsigned amdfam10_scan_chains(device_t dev, unsigned max)
294 unsigned sblink = sysconf.sblk;
295 unsigned offset_unitid = 0;
297 nodeid = amdfam10_nodeid(dev);
299 // Put sb chain in bus 0
300 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 0
302 #if ((CONFIG_HT_CHAIN_UNITID_BASE != 1) || (CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20))
305 for (link = dev->link_list; link; link = link->next)
306 if (link->link_num == sblink)
307 max = amdfam10_scan_chain(dev, nodeid, link, sblink, sblink, max, offset_unitid ); // do sb ht chain at first, in case s2885 put sb chain (8131/8111) on link2, but put 8151 on link0
311 #if CONFIG_PCI_BUS_SEGN_BITS
312 max = check_segn(dev, max, sysconf.nodes, &sysconf);
315 for(link = dev->link_list; link; link = link->next) {
316 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 0
317 if( (nodeid == 0) && (sblink == link->link_num) ) continue; //already done
320 #if ((CONFIG_HT_CHAIN_UNITID_BASE != 1) || (CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20))
321 #if CONFIG_SB_HT_CHAIN_UNITID_OFFSET_ONLY == 1
322 if((nodeid == 0) && (sblink == link->link_num))
327 max = amdfam10_scan_chain(dev, nodeid, link, link->link_num, sblink, max, offset_unitid);
333 static int reg_useable(unsigned reg, device_t goal_dev, unsigned goal_nodeid,
336 struct resource *res;
337 unsigned nodeid, link = 0;
340 for(nodeid = 0; !res && (nodeid < fx_devs); nodeid++) {
342 dev = __f0_dev[nodeid];
345 for(link = 0; !res && (link < 8); link++) {
346 res = probe_resource(dev, IOINDEX(0x1000 + reg, link));
352 if ( (goal_link == (link - 1)) &&
353 (goal_nodeid == (nodeid - 1)) &&
361 static struct resource *amdfam10_find_iopair(device_t dev, unsigned nodeid, unsigned link)
363 struct resource *resource;
367 for(reg = 0xc0; reg <= 0xd8; reg += 0x8) {
369 result = reg_useable(reg, dev, nodeid, link);
371 /* I have been allocated this one */
374 else if (result > 1) {
375 /* I have a free register pair */
380 reg = free_reg; // if no free, the free_reg still be 0
385 //because of Extend conf space, we will never run out of reg, but we need one index to differ them. so same node and same link can have multi range
386 u32 index = get_io_addr_index(nodeid, link);
387 reg = 0x110+ (index<<24) + (4<<20); // index could be 0, 255
390 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
395 static struct resource *amdfam10_find_mempair(device_t dev, u32 nodeid, u32 link)
397 struct resource *resource;
401 for(reg = 0x80; reg <= 0xb8; reg += 0x8) {
403 result = reg_useable(reg, dev, nodeid, link);
405 /* I have been allocated this one */
408 else if (result > 1) {
409 /* I have a free register pair */
419 //because of Extend conf space, we will never run out of reg,
420 // but we need one index to differ them. so same node and
421 // same link can have multi range
422 u32 index = get_mmio_addr_index(nodeid, link);
423 reg = 0x110+ (index<<24) + (6<<20); // index could be 0, 63
426 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
431 static void amdfam10_link_read_bases(device_t dev, u32 nodeid, u32 link)
433 struct resource *resource;
435 /* Initialize the io space constraints on the current bus */
436 resource = amdfam10_find_iopair(dev, nodeid, link);
439 #if CONFIG_EXT_CONF_SUPPORT == 1
440 if((resource->index & 0x1fff) == 0x1110) { // ext
445 align = log2(HT_IO_HOST_ALIGN);
448 resource->align = align;
449 resource->gran = align;
450 resource->limit = 0xffffUL;
451 resource->flags = IORESOURCE_IO | IORESOURCE_BRIDGE;
454 /* Initialize the prefetchable memory constraints on the current bus */
455 resource = amdfam10_find_mempair(dev, nodeid, link);
459 resource->align = log2(HT_MEM_HOST_ALIGN);
460 resource->gran = log2(HT_MEM_HOST_ALIGN);
461 resource->limit = 0xffffffffffULL;
462 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
463 resource->flags |= IORESOURCE_BRIDGE;
465 #if CONFIG_EXT_CONF_SUPPORT == 1
466 if((resource->index & 0x1fff) == 0x1110) { // ext
467 normalize_resource(resource);
473 /* Initialize the memory constraints on the current bus */
474 resource = amdfam10_find_mempair(dev, nodeid, link);
478 resource->align = log2(HT_MEM_HOST_ALIGN);
479 resource->gran = log2(HT_MEM_HOST_ALIGN);
480 resource->limit = 0xffffffffffULL;
481 resource->flags = IORESOURCE_MEM | IORESOURCE_BRIDGE;
482 #if CONFIG_EXT_CONF_SUPPORT == 1
483 if((resource->index & 0x1fff) == 0x1110) { // ext
484 normalize_resource(resource);
490 static void amdfam10_read_resources(device_t dev)
494 nodeid = amdfam10_nodeid(dev);
495 for(link = dev->link_list; link; link = link->next) {
496 if (link->children) {
497 amdfam10_link_read_bases(dev, nodeid, link->link_num);
502 static void amdfam10_set_resource(device_t dev, struct resource *resource,
505 resource_t rbase, rend;
506 unsigned reg, link_num;
509 /* Make certain the resource has actually been set */
510 if (!(resource->flags & IORESOURCE_ASSIGNED)) {
514 /* If I have already stored this resource don't worry about it */
515 if (resource->flags & IORESOURCE_STORED) {
519 /* Only handle PCI memory and IO resources */
520 if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
523 /* Ensure I am actually looking at a resource of function 1 */
524 if ((resource->index & 0xffff) < 0x1000) {
527 /* Get the base address */
528 rbase = resource->base;
530 /* Get the limit (rounded up) */
531 rend = resource_end(resource);
533 /* Get the register and link */
534 reg = resource->index & 0xfff; // 4k
535 link_num = IOINDEX_LINK(resource->index);
537 if (resource->flags & IORESOURCE_IO) {
539 set_io_addr_reg(dev, nodeid, link_num, reg, rbase>>8, rend>>8);
540 store_conf_io_addr(nodeid, link_num, reg, (resource->index >> 24), rbase>>8, rend>>8);
542 else if (resource->flags & IORESOURCE_MEM) {
543 set_mmio_addr_reg(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8, sysconf.nodes) ;// [39:8]
544 store_conf_mmio_addr(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8);
546 resource->flags |= IORESOURCE_STORED;
547 sprintf(buf, " <node %x link %x>",
549 report_resource_stored(dev, resource, buf);
553 * I tried to reuse the resource allocation code in amdfam10_set_resource()
554 * but it is too difficult to deal with the resource allocation magic.
557 static void amdfam10_create_vga_resource(device_t dev, unsigned nodeid)
561 /* find out which link the VGA card is connected,
562 * we only deal with the 'first' vga card */
563 for (link = dev->link_list; link; link = link->next) {
564 if (link->bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
565 #if CONFIG_MULTIPLE_VGA_ADAPTERS == 1
566 extern device_t vga_pri; // the primary vga device, defined in device.c
567 printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary,
568 link->secondary,link->subordinate);
569 /* We need to make sure the vga_pri is under the link */
570 if((vga_pri->bus->secondary >= link->secondary ) &&
571 (vga_pri->bus->secondary <= link->subordinate )
578 /* no VGA card installed */
582 printk(BIOS_DEBUG, "VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, link->link_num);
583 set_vga_enable_reg(nodeid, link->link_num);
586 static void amdfam10_set_resources(device_t dev)
590 struct resource *res;
592 /* Find the nodeid */
593 nodeid = amdfam10_nodeid(dev);
595 amdfam10_create_vga_resource(dev, nodeid);
597 /* Set each resource we have found */
598 for(res = dev->resource_list; res; res = res->next) {
599 amdfam10_set_resource(dev, res, nodeid);
602 for(bus = dev->link_list; bus; bus = bus->next) {
604 assign_resources(bus);
609 static void mcf0_control_init(struct device *dev)
613 static struct device_operations northbridge_operations = {
614 .read_resources = amdfam10_read_resources,
615 .set_resources = amdfam10_set_resources,
616 .enable_resources = pci_dev_enable_resources,
617 .init = mcf0_control_init,
618 .scan_bus = amdfam10_scan_chains,
624 static const struct pci_driver mcf0_driver __pci_driver = {
625 .ops = &northbridge_operations,
626 .vendor = PCI_VENDOR_ID_AMD,
630 struct chip_operations northbridge_amd_amdfam10_ops = {
631 CHIP_NAME("AMD FAM10 Northbridge")
635 static void amdfam10_domain_read_resources(device_t dev)
639 /* Find the already assigned resource pairs */
641 for(reg = 0x80; reg <= 0xd8; reg+= 0x08) {
643 base = f1_read_config32(reg);
644 limit = f1_read_config32(reg + 0x04);
645 /* Is this register allocated? */
646 if ((base & 3) != 0) {
647 unsigned nodeid, reg_link;
649 if(reg<0xc0) { // mmio
650 nodeid = (limit & 0xf) + (base&0x30);
652 nodeid = (limit & 0xf) + ((base>>4)&0x30);
654 reg_link = (limit >> 4) & 7;
655 reg_dev = __f0_dev[nodeid];
657 /* Reserve the resource */
658 struct resource *res;
659 res = new_resource(reg_dev, IOINDEX(0x1000 + reg, reg_link));
666 /* FIXME: do we need to check extend conf space?
667 I don't believe that much preset value */
669 #if CONFIG_PCI_64BIT_PREF_MEM == 0
670 pci_domain_read_resources(dev);
673 struct resource *resource;
674 for(link=dev->link_list; link; link = link->next) {
675 /* Initialize the system wide io space constraints */
676 resource = new_resource(dev, 0|(link->link_num<<2));
677 resource->base = 0x400;
678 resource->limit = 0xffffUL;
679 resource->flags = IORESOURCE_IO;
681 /* Initialize the system wide prefetchable memory resources constraints */
682 resource = new_resource(dev, 1|(link->link_num<<2));
683 resource->limit = 0xfcffffffffULL;
684 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
686 /* Initialize the system wide memory resources constraints */
687 resource = new_resource(dev, 2|(link->link_num<<2));
688 resource->limit = 0xfcffffffffULL;
689 resource->flags = IORESOURCE_MEM;
692 #if CONFIG_MMCONF_SUPPORT
693 struct resource *res = new_resource(dev, 0xc0010058);
694 res->base = CONFIG_MMCONF_BASE_ADDRESS;
695 res->size = CONFIG_MMCONF_BUS_NUMBER * 4096*256;
696 res->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
697 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
701 static u32 my_find_pci_tolm(struct bus *bus, u32 tolm)
703 struct resource *min;
705 search_bus_resources(bus, IORESOURCE_MEM, IORESOURCE_MEM, tolm_test, &min);
706 if (min && tolm > min->base) {
712 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
714 struct hw_mem_hole_info {
715 unsigned hole_startk;
719 static struct hw_mem_hole_info get_hw_mem_hole_info(void)
721 struct hw_mem_hole_info mem_hole;
724 mem_hole.hole_startk = CONFIG_HW_MEM_HOLE_SIZEK;
725 mem_hole.node_id = -1;
727 for (i = 0; i < sysconf.nodes; i++) {
728 struct dram_base_mask_t d;
730 d = get_dram_base_mask(i);
731 if(!(d.mask & 1)) continue; // no memory on this node
733 hole = pci_read_config32(__f1_dev[i], 0xf0);
734 if(hole & 1) { // we find the hole
735 mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
736 mem_hole.node_id = i; // record the node No with hole
737 break; // only one hole
741 //We need to double check if there is speical set on base reg and limit reg are not continous instead of hole, it will find out it's hole_startk
742 if(mem_hole.node_id==-1) {
743 resource_t limitk_pri = 0;
744 for(i=0; i<sysconf.nodes; i++) {
745 struct dram_base_mask_t d;
746 resource_t base_k, limit_k;
747 d = get_dram_base_mask(i);
748 if(!(d.base & 1)) continue;
750 base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
751 if(base_k > 4 *1024 * 1024) break; // don't need to go to check
752 if(limitk_pri != base_k) { // we find the hole
753 mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G
754 mem_hole.node_id = i;
755 break; //only one hole
758 limit_k = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9;
759 limitk_pri = limit_k;
765 // WHY this check? CONFIG_AMDMCT is enabled on all Fam10 boards.
766 // Does it make sense not to?
767 #if CONFIG_AMDMCT == 0
768 static void disable_hoist_memory(unsigned long hole_startk, int node_id)
772 struct dram_base_mask_t d;
779 struct sys_info *sysinfox = (struct sys_info *)((CONFIG_RAMTOP) - CONFIG_DCACHE_RAM_GLOBAL_VAR_SIZE); // in RAM
780 struct mem_info *meminfo;
781 meminfo = &sysinfox->meminfo[node_id];
783 one_DCT = get_one_DCT(meminfo);
785 // 1. find which node has hole
786 // 2. change limit in that node.
787 // 3. change base and limit in later node
788 // 4. clear that node f0
790 // if there is not mem hole enabled, we need to change it's base instead
792 hole_sizek = (4*1024*1024) - hole_startk;
794 for(i=NODE_NUMS-1;i>node_id;i--) {
796 d = get_dram_base_mask(i);
798 if(!(d.mask & 1)) continue;
800 d.base -= (hole_sizek>>9);
801 d.mask -= (hole_sizek>>9);
802 set_dram_base_mask(i, d, sysconf.nodes);
804 if(get_DctSelHiEn(i) & 1) {
805 sel_m = get_DctSelBaseAddr(i);
806 sel_m -= hole_startk>>10;
807 set_DctSelBaseAddr(i, sel_m);
811 d = get_dram_base_mask(node_id);
812 dev = __f1_dev[node_id];
813 sel_hi_en = get_DctSelHiEn(node_id);
816 sel_m = get_DctSelBaseAddr(node_id);
818 hoist = pci_read_config32(dev, 0xf0);
820 pci_write_config32(dev, 0xf0, 0);
821 d.mask -= (hole_sizek>>9);
822 set_dram_base_mask(node_id, d, sysconf.nodes);
823 if(one_DCT || (sel_m >= (hole_startk>>10))) {
825 sel_m -= hole_startk>>10;
826 set_DctSelBaseAddr(node_id, sel_m);
830 set_DctSelBaseOffset(node_id, 0);
833 d.base -= (hole_sizek>>9);
834 d.mask -= (hole_sizek>>9);
835 set_dram_base_mask(node_id, d, sysconf.nodes);
838 sel_m -= hole_startk>>10;
839 set_DctSelBaseAddr(node_id, sel_m);
848 #if CONFIG_WRITE_HIGH_TABLES==1
852 #if CONFIG_GFXUMA == 1
853 extern uint64_t uma_memory_base, uma_memory_size;
855 static void add_uma_resource(struct device *dev, int index)
857 struct resource *resource;
859 printk(BIOS_DEBUG, "Adding UMA memory area\n");
860 resource = new_resource(dev, index);
861 resource->base = (resource_t) uma_memory_base;
862 resource->size = (resource_t) uma_memory_size;
863 resource->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
864 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
868 static void amdfam10_domain_set_resources(device_t dev)
870 #if CONFIG_PCI_64BIT_PREF_MEM == 1
871 struct resource *io, *mem1, *mem2;
872 struct resource *res;
874 unsigned long mmio_basek;
878 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
879 struct hw_mem_hole_info mem_hole;
880 u32 reset_memhole = 1;
883 #if CONFIG_PCI_64BIT_PREF_MEM == 1
885 for(link = dev->link_list; link; link = link->next) {
886 /* Now reallocate the pci resources memory with the
887 * highest addresses I can manage.
889 mem1 = find_resource(dev, 1|(link->link_num<<2));
890 mem2 = find_resource(dev, 2|(link->link_num<<2));
892 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
893 mem1->base, mem1->limit, mem1->size, mem1->align);
894 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
895 mem2->base, mem2->limit, mem2->size, mem2->align);
897 /* See if both resources have roughly the same limits */
898 if (((mem1->limit <= 0xffffffff) && (mem2->limit <= 0xffffffff)) ||
899 ((mem1->limit > 0xffffffff) && (mem2->limit > 0xffffffff)))
901 /* If so place the one with the most stringent alignment first
903 if (mem2->align > mem1->align) {
904 struct resource *tmp;
909 /* Now place the memory as high up as it will go */
910 mem2->base = resource_max(mem2);
911 mem1->limit = mem2->base - 1;
912 mem1->base = resource_max(mem1);
915 /* Place the resources as high up as they will go */
916 mem2->base = resource_max(mem2);
917 mem1->base = resource_max(mem1);
920 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
921 mem1->base, mem1->limit, mem1->size, mem1->align);
922 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
923 mem2->base, mem2->limit, mem2->size, mem2->align);
926 for(res = &dev->resource_list; res; res = res->next)
928 res->flags |= IORESOURCE_ASSIGNED;
929 res->flags |= IORESOURCE_STORED;
930 report_resource_stored(dev, res, "");
934 pci_tolm = 0xffffffffUL;
935 for(link = dev->link_list; link; link = link->next) {
936 pci_tolm = my_find_pci_tolm(link, pci_tolm);
939 // FIXME handle interleaved nodes. If you fix this here, please fix
941 mmio_basek = pci_tolm >> 10;
942 /* Round mmio_basek to something the processor can support */
943 mmio_basek &= ~((1 << 6) -1);
945 // FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M
946 // MMIO hole. If you fix this here, please fix amdk8, too.
947 /* Round the mmio hole to 64M */
948 mmio_basek &= ~((64*1024) - 1);
950 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
951 /* if the hw mem hole is already set in raminit stage, here we will compare
952 * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
953 * use hole_basek as mmio_basek and we don't need to reset hole.
954 * otherwise We reset the hole to the mmio_basek
957 mem_hole = get_hw_mem_hole_info();
959 // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
960 if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
961 mmio_basek = mem_hole.hole_startk;
965 #if CONFIG_AMDMCT == 0
966 //mmio_basek = 3*1024*1024; // for debug to meet boundary
969 if(mem_hole.node_id!=-1) {
970 /* We need to select CONFIG_HW_MEM_HOLE_SIZEK for raminit, it can not
971 make hole_startk to some basek too!
972 We need to reset our Mem Hole, because We want more big HOLE
974 Before that We need to disable mem hole at first, becase
975 memhole could already be set on i+1 instead
977 disable_hoist_memory(mem_hole.hole_startk, mem_hole.node_id);
980 #if CONFIG_HW_MEM_HOLE_SIZE_AUTO_INC == 1
981 // We need to double check if the mmio_basek is valid for hole
982 // setting, if it is equal to basek, we need to decrease it some
983 resource_t basek_pri;
984 for (i = 0; i < sysconf.nodes; i++) {
985 struct dram_base_mask_t d;
987 d = get_dram_base_mask(i);
989 if(!(d.mask &1)) continue;
991 basek = ((resource_t)(d.base & 0x1fffff00)) << 9;
992 if(mmio_basek == (u32)basek) {
993 mmio_basek -= (uin32_t)(basek - basek_pri); // increase mem hole size to make sure it is on middle of pri node
1006 for(i = 0; i < sysconf.nodes; i++) {
1007 struct dram_base_mask_t d;
1008 resource_t basek, limitk, sizek; // 4 1T
1009 d = get_dram_base_mask(i);
1011 if(!(d.mask & 1)) continue;
1012 basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
1013 limitk = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9 ;
1014 sizek = limitk - basek;
1016 /* see if we need a hole from 0xa0000 to 0xbffff */
1017 if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
1018 ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
1020 basek = (8*64)+(16*16);
1021 sizek = limitk - ((8*64)+(16*16));
1025 // printk(BIOS_DEBUG, "node %d : mmio_basek=%08x, basek=%08x, limitk=%08x\n", i, mmio_basek, basek, limitk);
1027 /* split the region to accomodate pci memory space */
1028 if ( (basek < 4*1024*1024 ) && (limitk > mmio_basek) ) {
1029 if (basek <= mmio_basek) {
1031 pre_sizek = mmio_basek - basek;
1033 ram_resource(dev, (idx | i), basek, pre_sizek);
1036 #if CONFIG_WRITE_HIGH_TABLES==1
1037 if (high_tables_base==0) {
1038 /* Leave some space for ACPI, PIRQ and MP tables */
1039 #if CONFIG_GFXUMA == 1
1040 high_tables_base = uma_memory_base - HIGH_MEMORY_SIZE;
1042 high_tables_base = (mmio_basek * 1024) - HIGH_MEMORY_SIZE;
1044 high_tables_size = HIGH_MEMORY_SIZE;
1045 printk(BIOS_DEBUG, " split: %dK table at =%08llx\n",
1046 HIGH_MEMORY_SIZE / 1024, high_tables_base);
1050 #if CONFIG_AMDMCT == 0
1051 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
1053 struct sys_info *sysinfox = (struct sys_info *)((CONFIG_RAMTOP) - CONFIG_DCACHE_RAM_GLOBAL_VAR_SIZE); // in RAM
1054 struct mem_info *meminfo;
1055 meminfo = &sysinfox->meminfo[i];
1056 sizek += hoist_memory(mmio_basek,i, get_one_DCT(meminfo), sysconf.nodes);
1063 if ((basek + sizek) <= 4*1024*1024) {
1067 basek = 4*1024*1024;
1068 sizek -= (4*1024*1024 - mmio_basek);
1072 #if CONFIG_GFXUMA == 1
1073 /* Deduct uma memory before reporting because
1074 * this is what the mtrr code expects */
1075 sizek -= uma_memory_size / 1024;
1077 ram_resource(dev, (idx | i), basek, sizek);
1079 #if CONFIG_WRITE_HIGH_TABLES==1
1080 printk(BIOS_DEBUG, "%d: mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n",
1081 i, mmio_basek, basek, limitk);
1082 if (high_tables_base==0) {
1083 /* Leave some space for ACPI, PIRQ and MP tables */
1084 #if CONFIG_GFXUMA == 1
1085 high_tables_base = uma_memory_base - HIGH_MEMORY_SIZE;
1087 high_tables_base = (limitk * 1024) - HIGH_MEMORY_SIZE;
1089 high_tables_size = HIGH_MEMORY_SIZE;
1094 #if CONFIG_GFXUMA == 1
1095 add_uma_resource(dev, 7);
1098 for(link = dev->link_list; link; link = link->next) {
1099 if (link->children) {
1100 assign_resources(link);
1105 static u32 amdfam10_domain_scan_bus(device_t dev, u32 max)
1110 /* Unmap all of the HT chains */
1111 for(reg = 0xe0; reg <= 0xec; reg += 4) {
1112 f1_write_config32(reg, 0);
1114 #if CONFIG_EXT_CONF_SUPPORT == 1
1116 for(i = 0; i< sysconf.nodes; i++) {
1118 for(index = 0; index < 64; index++) {
1119 pci_write_config32(__f1_dev[i], 0x110, index | (6<<28));
1120 pci_write_config32(__f1_dev[i], 0x114, 0);
1127 for(link = dev->link_list; link; link = link->next) {
1128 max = pci_scan_bus(link, PCI_DEVFN(CONFIG_CDB, 0), 0xff, max);
1131 /* Tune the hypertransport transaction for best performance.
1132 * Including enabling relaxed ordering if it is safe.
1135 for(i = 0; i < fx_devs; i++) {
1137 f0_dev = __f0_dev[i];
1138 if (f0_dev && f0_dev->enabled) {
1140 httc = pci_read_config32(f0_dev, HT_TRANSACTION_CONTROL);
1141 httc &= ~HTTC_RSP_PASS_PW;
1142 if (!dev->link_list->disable_relaxed_ordering) {
1143 httc |= HTTC_RSP_PASS_PW;
1145 printk(BIOS_SPEW, "%s passpw: %s\n",
1147 (!dev->link_list->disable_relaxed_ordering)?
1148 "enabled":"disabled");
1149 pci_write_config32(f0_dev, HT_TRANSACTION_CONTROL, httc);
1155 static struct device_operations pci_domain_ops = {
1156 .read_resources = amdfam10_domain_read_resources,
1157 .set_resources = amdfam10_domain_set_resources,
1158 .enable_resources = NULL,
1160 .scan_bus = amdfam10_domain_scan_bus,
1161 #if CONFIG_MMCONF_SUPPORT_DEFAULT
1162 .ops_pci_bus = &pci_ops_mmconf,
1164 .ops_pci_bus = &pci_cf8_conf1,
1168 static void sysconf_init(device_t dev) // first node
1170 sysconf.sblk = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
1172 sysconf.ht_c_num = 0;
1174 unsigned ht_c_index;
1176 for(ht_c_index=0; ht_c_index<32; ht_c_index++) {
1177 sysconf.ht_c_conf_bus[ht_c_index] = 0;
1180 sysconf.nodes = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1;
1181 #if CONFIG_MAX_PHYSICAL_CPUS > 8
1182 sysconf.nodes += (((pci_read_config32(dev, 0x160)>>4) & 7)<<3);
1185 sysconf.enabled_apic_ext_id = 0;
1186 sysconf.lift_bsp_apicid = 0;
1188 /* Find the bootstrap processors apicid */
1189 sysconf.bsp_apicid = lapicid();
1190 sysconf.apicid_offset = sysconf.bsp_apicid;
1192 #if (CONFIG_ENABLE_APIC_EXT_ID == 1)
1193 if (pci_read_config32(dev, 0x68) & (HTTC_APIC_EXT_ID|HTTC_APIC_EXT_BRD_CST))
1195 sysconf.enabled_apic_ext_id = 1;
1197 #if (CONFIG_APIC_ID_OFFSET>0)
1198 if(sysconf.enabled_apic_ext_id) {
1199 if(sysconf.bsp_apicid == 0) {
1200 /* bsp apic id is not changed */
1201 sysconf.apicid_offset = CONFIG_APIC_ID_OFFSET;
1203 sysconf.lift_bsp_apicid = 1;
1210 static void add_more_links(device_t dev, unsigned total_links)
1212 struct bus *link, *last = NULL;
1215 for (link = dev->link_list; link; link = link->next)
1219 int links = total_links - last->link_num;
1220 link_num = last->link_num;
1222 link = malloc(links*sizeof(*link));
1224 die("Couldn't allocate more links!\n");
1225 memset(link, 0, links*sizeof(*link));
1231 link = malloc(total_links*sizeof(*link));
1232 memset(link, 0, total_links*sizeof(*link));
1233 dev->link_list = link;
1236 for (link_num = link_num + 1; link_num < total_links; link_num++) {
1237 link->link_num = link_num;
1239 link->next = link + 1;
1246 static u32 cpu_bus_scan(device_t dev, u32 max)
1248 struct bus *cpu_bus;
1251 device_t pci_domain;
1258 int disable_siblings;
1259 unsigned ApicIdCoreIdSize;
1262 ApicIdCoreIdSize = (cpuid_ecx(0x80000008)>>12 & 0xf);
1263 if(ApicIdCoreIdSize) {
1264 siblings = (1<<ApicIdCoreIdSize)-1;
1266 siblings = 3; //quad core
1269 disable_siblings = !CONFIG_LOGICAL_CPUS;
1270 #if CONFIG_LOGICAL_CPUS == 1
1271 get_option(&disable_siblings, "multi_core");
1274 // How can I get the nb_cfg_54 of every node's nb_cfg_54 in bsp???
1275 nb_cfg_54 = read_nb_cfg_54();
1278 dev_mc = dev_find_slot(0, PCI_DEVFN(CONFIG_CDB, 0)); //0x00
1279 if(dev_mc && dev_mc->bus) {
1280 printk(BIOS_DEBUG, "%s found", dev_path(dev_mc));
1281 pci_domain = dev_mc->bus->dev;
1282 if(pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1283 printk(BIOS_DEBUG, "\n%s move to ",dev_path(dev_mc));
1284 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
1285 printk(BIOS_DEBUG, "%s",dev_path(dev_mc));
1288 printk(BIOS_DEBUG, " but it is not under pci_domain directly ");
1290 printk(BIOS_DEBUG, "\n");
1292 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
1294 dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
1295 if (dev_mc && dev_mc->bus) {
1296 printk(BIOS_DEBUG, "%s found\n", dev_path(dev_mc));
1297 pci_domain = dev_mc->bus->dev;
1298 if(pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1299 if((pci_domain->link_list) && (pci_domain->link_list->children == dev_mc)) {
1300 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
1301 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
1302 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
1304 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
1305 dev_mc->path.pci.devfn -= PCI_DEVFN(0x18,0);
1306 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
1307 dev_mc = dev_mc->sibling;
1316 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
1318 printk(BIOS_ERR, "%02x:%02x.0 not found", CONFIG_CBB, CONFIG_CDB);
1322 sysconf_init(dev_mc);
1324 nodes = sysconf.nodes;
1326 #if CONFIG_CBB && (NODE_NUMS > 32)
1327 if(nodes>32) { // need to put node 32 to node 63 to bus 0xfe
1328 if(pci_domain->link_list && !pci_domain->link_list->next) {
1329 struct bus *new_link = new_link(pci_domain);
1330 pci_domain->link_list->next = new_link;
1331 new_link->link_num = 1;
1332 new_link->dev = pci_domain;
1333 new_link->children = 0;
1334 printk(BIOS_DEBUG, "%s links now 2\n", dev_path(pci_domain));
1336 pci_domain->link_list->next->secondary = CONFIG_CBB - 1;
1339 /* Find which cpus are present */
1340 cpu_bus = dev->link_list;
1341 for(i = 0; i < nodes; i++) {
1342 device_t cdb_dev, cpu;
1343 struct device_path cpu_path;
1344 unsigned busn, devn;
1348 devn = CONFIG_CDB+i;
1350 #if CONFIG_CBB && (NODE_NUMS > 32)
1354 pbus = pci_domain->link_list->next);
1358 /* Find the cpu's pci device */
1359 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1361 /* If I am probing things in a weird order
1362 * ensure all of the cpu's pci devices are found.
1365 for(fn = 0; fn <= 5; fn++) { //FBDIMM?
1366 cdb_dev = pci_probe_dev(NULL, pbus,
1367 PCI_DEVFN(devn, fn));
1369 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1372 /* Ok, We need to set the links for that device.
1373 * otherwise the device under it will not be scanned
1376 #if CONFIG_HT3_SUPPORT==1
1381 add_more_links(cdb_dev, linknum);
1384 cores_found = 0; // one core
1385 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
1386 if (cdb_dev && cdb_dev->enabled) {
1387 j = pci_read_config32(cdb_dev, 0xe8);
1388 cores_found = (j >> 12) & 3; // dev is func 3
1390 cores_found |= (j >> 13) & 4;
1391 printk(BIOS_DEBUG, " %s siblings=%d\n", dev_path(cdb_dev), cores_found);
1395 if(disable_siblings) {
1402 for (j = 0; j <=jj; j++ ) {
1404 /* Build the cpu device path */
1405 cpu_path.type = DEVICE_PATH_APIC;
1406 cpu_path.apic.apic_id = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:64); // ?
1408 /* See if I can find the cpu */
1409 cpu = find_dev_path(cpu_bus, &cpu_path);
1411 /* Enable the cpu if I have the processor */
1412 if (cdb_dev && cdb_dev->enabled) {
1414 cpu = alloc_dev(cpu_bus, &cpu_path);
1421 /* Disable the cpu if I don't have the processor */
1422 if (cpu && (!cdb_dev || !cdb_dev->enabled)) {
1426 /* Report what I have done */
1428 cpu->path.apic.node_id = i;
1429 cpu->path.apic.core_id = j;
1430 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET>0)
1431 if(sysconf.enabled_apic_ext_id) {
1432 if(sysconf.lift_bsp_apicid) {
1433 cpu->path.apic.apic_id += sysconf.apicid_offset;
1436 if (cpu->path.apic.apic_id != 0)
1437 cpu->path.apic.apic_id += sysconf.apicid_offset;
1441 printk(BIOS_DEBUG, "CPU: %s %s\n",
1442 dev_path(cpu), cpu->enabled?"enabled":"disabled");
1450 static void cpu_bus_init(device_t dev)
1452 initialize_cpus(dev->link_list);
1453 #if CONFIG_SOUTHBRIDGE_AMD_CIMX_SB800 || CONFIG_SOUTHBRIDGE_AMD_CIMX_SB900
1454 sb_After_Pci_Init();
1459 static void cpu_bus_noop(device_t dev)
1463 static void cpu_bus_read_resources(device_t dev)
1467 static void cpu_bus_set_resources(device_t dev)
1469 struct resource *resource = find_resource(dev, 0xc0010058);
1471 report_resource_stored(dev, resource, " <mmconfig>");
1473 pci_dev_set_resources(dev);
1476 static struct device_operations cpu_bus_ops = {
1477 .read_resources = cpu_bus_read_resources,
1478 .set_resources = cpu_bus_set_resources,
1479 .enable_resources = cpu_bus_noop,
1480 .init = cpu_bus_init,
1481 .scan_bus = cpu_bus_scan,
1484 static void root_complex_enable_dev(struct device *dev)
1486 /* Set the operations if it is a special bus type */
1487 if (dev->path.type == DEVICE_PATH_PCI_DOMAIN) {
1488 dev->ops = &pci_domain_ops;
1490 else if (dev->path.type == DEVICE_PATH_APIC_CLUSTER) {
1491 dev->ops = &cpu_bus_ops;
1495 struct chip_operations northbridge_amd_amdfam10_root_complex_ops = {
1496 CHIP_NAME("AMD FAM10 Root Complex")
1497 .enable_dev = root_complex_enable_dev,